blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
24da100dd2dcfb1fbf2dc0f990d2db5cecb40f9e | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2021_01_01/operations/_blob_services_operations.py | 0d43959e0413d45f681583c3efd5aacfe3752027 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
]
| permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 13,580 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class BlobServicesOperations(object):
"""BlobServicesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
account_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.BlobServiceItems"]
"""List blob services of storage account. It returns a collection of one object named default.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BlobServiceItems or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2021_01_01.models.BlobServiceItems]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobServiceItems"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('BlobServiceItems', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices'} # type: ignore
def set_service_properties(
self,
resource_group_name, # type: str
account_name, # type: str
parameters, # type: "_models.BlobServiceProperties"
**kwargs # type: Any
):
# type: (...) -> "_models.BlobServiceProperties"
"""Sets the properties of a storage account’s Blob service, including properties for Storage
Analytics and CORS (Cross-Origin Resource Sharing) rules.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The properties of a storage account’s Blob service, including properties for
Storage Analytics and CORS (Cross-Origin Resource Sharing) rules.
:type parameters: ~azure.mgmt.storage.v2021_01_01.models.BlobServiceProperties
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobServiceProperties, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_01_01.models.BlobServiceProperties
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobServiceProperties"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
blob_services_name = "default"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.set_service_properties.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'BlobServicesName': self._serialize.url("blob_services_name", blob_services_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'BlobServiceProperties')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobServiceProperties', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_service_properties.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/{BlobServicesName}'} # type: ignore
def get_service_properties(
self,
resource_group_name, # type: str
account_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.BlobServiceProperties"
"""Gets the properties of a storage account’s Blob service, including properties for Storage
Analytics and CORS (Cross-Origin Resource Sharing) rules.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobServiceProperties, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_01_01.models.BlobServiceProperties
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobServiceProperties"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
blob_services_name = "default"
accept = "application/json"
# Construct URL
url = self.get_service_properties.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'BlobServicesName': self._serialize.url("blob_services_name", blob_services_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobServiceProperties', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_service_properties.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/{BlobServicesName}'} # type: ignore
| [
"[email protected]"
]
| |
0e10c4dc821d92d19a19781c29184f2c21a3a2f8 | a2e0e03e31f892454e537df32e3e1e1d94764fa0 | /virtual/bin/gunicorn_paster | a5acb6c5f33e8c0f85c1fc0f5f42198bd48c6b30 | [
"MIT"
]
| permissive | MichelAtieno/Instagram-Clone | 557272585a3fff6f7a7c552b08cc5ef5e2c129da | 7250579e4f91084ad9bf8bd688df3f556dfef64a | refs/heads/master | 2020-03-30T16:23:19.351522 | 2018-10-09T09:42:05 | 2018-10-09T09:42:05 | 151,406,356 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | #!/home/michel/Desktop/Python-Django/Instagram/virtual/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.pasterapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"[email protected]"
]
| ||
3a4c7f9d712049cf02648c56d53ff66b940cd9fb | 05d692469305dd1adb9ebc46080525bb4515b424 | /Exception handling/tryfinally5.py | 7a605945006ab61d29eedb42aaf62afea001654d | []
| no_license | rajdharmkar/pythoncode | 979805bc0e672f123ca1460644a4bd71d7854fd5 | 15b758d373f27da5680a711bf12c07e86758c447 | refs/heads/master | 2020-08-07T18:30:55.575632 | 2019-10-14T12:46:09 | 2019-10-14T12:46:09 | 213,551,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | try:
fob = open ( 'test.txt', 'w' )
fob.write ( "It's my test file to verify try-finally in exception handling!!"
)
print 'try block executed'
finally:
fob.close ()
print 'finally block executed'
| [
"[email protected]"
]
| |
6893b1b04629476fddf2845af7cfe5908b9cb720 | 72e11a80587342b3f278d4df18406cd4ce7531e8 | /hgdemandimport/demandimportpy3.py | e2ea27fa0f1166fc55324efb1bbdaf6c4a5029c6 | []
| no_license | EnjoyLifeFund/Debian_py36_packages | 740666f290cef73a4f634558ccf3fd4926addeda | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | refs/heads/master | 2021-08-24T02:17:24.349195 | 2017-12-06T06:18:35 | 2017-12-06T06:18:35 | 113,167,612 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,498 | py | # demandimportpy3 - global demand-loading of modules for Mercurial
#
# Copyright 2017 Facebook Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""Lazy loading for Python 3.6 and above.
This uses the new importlib finder/loader functionality available in Python 3.5
and up. The code reuses most of the mechanics implemented inside importlib.util,
but with a few additions:
* Allow excluding certain modules from lazy imports.
* Expose an interface that's substantially the same as demandimport for
Python 2.
This also has some limitations compared to the Python 2 implementation:
* Much of the logic is per-package, not per-module, so any packages loaded
before demandimport is enabled will not be lazily imported in the future. In
practice, we only expect builtins to be loaded before demandimport is
enabled.
"""
# This line is unnecessary, but it satisfies test-check-py3-compat.t.
from __future__ import absolute_import
import contextlib
import importlib.abc
import importlib.machinery
import importlib.util
import sys
_deactivated = False
class _lazyloaderex(importlib.util.LazyLoader):
"""This is a LazyLoader except it also follows the _deactivated global and
the ignore list.
"""
def exec_module(self, module):
"""Make the module load lazily."""
if _deactivated or module.__name__ in ignore:
self.loader.exec_module(module)
else:
super().exec_module(module)
# This is 3.6+ because with Python 3.5 it isn't possible to lazily load
# extensions. See the discussion in https://python.org/sf/26186 for more.
_extensions_loader = _lazyloaderex.factory(
importlib.machinery.ExtensionFileLoader)
_bytecode_loader = _lazyloaderex.factory(
importlib.machinery.SourcelessFileLoader)
_source_loader = _lazyloaderex.factory(importlib.machinery.SourceFileLoader)
def _makefinder(path):
return importlib.machinery.FileFinder(
path,
# This is the order in which loaders are passed in in core Python.
(_extensions_loader, importlib.machinery.EXTENSION_SUFFIXES),
(_source_loader, importlib.machinery.SOURCE_SUFFIXES),
(_bytecode_loader, importlib.machinery.BYTECODE_SUFFIXES),
)
ignore = []
def init(ignorelist):
global ignore
ignore = ignorelist
def isenabled():
return _makefinder in sys.path_hooks and not _deactivated
def disable():
try:
while True:
sys.path_hooks.remove(_makefinder)
except ValueError:
pass
def enable():
sys.path_hooks.insert(0, _makefinder)
@contextlib.contextmanager
def deactivated():
# This implementation is a bit different from Python 2's. Python 3
# maintains a per-package finder cache in sys.path_importer_cache (see
# PEP 302). This means that we can't just call disable + enable.
# If we do that, in situations like:
#
# demandimport.enable()
# ...
# from foo.bar import mod1
# with demandimport.deactivated():
# from foo.bar import mod2
#
# mod2 will be imported lazily. (The converse also holds -- whatever finder
# first gets cached will be used.)
#
# Instead, have a global flag the LazyLoader can use.
global _deactivated
demandenabled = isenabled()
if demandenabled:
_deactivated = True
try:
yield
finally:
if demandenabled:
_deactivated = False
| [
"[email protected]"
]
| |
68b377124eb26ae187dc04f00fc3c6cc81fed129 | a21d2fb3f111f30b842a4c3a5c6940d1a003b94d | /Python3/Foundation/Day 8/进程 join.py | 883a86cf1f1208853b0f0f2f71b0dd2e70e1d4ae | []
| no_license | hygnic/MyPython | 438f16206770a006a3b7bcf2ada9150c71ce8af9 | 26aaa57728ad545af5920ff2015eae258712d077 | refs/heads/master | 2021-07-01T02:10:15.810495 | 2021-02-26T08:07:54 | 2021-02-26T08:07:54 | 220,612,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py | # User: hygnic
# Date: 2018/9/8
# User: hygnic
# Date: 2018/9/8
import os
import time
from multiprocessing import Process
# help(os)
def func1(args):
print(args)
time.sleep(2)
print('son process: ', os.getpid())
def func2(filename, content):
with open(filename, 'w') as content_wp:
content_wp.write(content)
if __name__ == '__main__':
# 注册进程
j_list = []
for i in range(10): # 开启多个子进程
f1 = Process(target=func1, args=('*' * i,)) # 单个参数时有一个逗号,元组
# p2 = Process(target=func, args=('实参', '实参2')) 通过这种方式开启多个子进程
f1.start() # 开启一个子进程 内部会调用run()方法
j_list.append(f1) # 表中全是一个个进程
f2 = Process(target=func2, args=('info', 'func2 content'))
f2.start()
# print(j_list)
# 阻塞当前进程,直到调用join方法的那个进程执行完,再继续执行当前进程。将异步改为同步
[f1.join() for f1 in j_list] # 列表表达式
print('Done! father process: ', os.getpid())
| [
"[email protected]"
]
| |
52339edf02f3ab2499baae92bfcd98d8aca6a7e2 | e86de5af089798890fae230fad381ca5a84fa562 | /rssant_feedlib/reader.py | 4b3ef65d52ba94cb18dbc3e72c7a306030e97ffe | [
"BSD-3-Clause"
]
| permissive | RustamYasaviev/rssant | 853508adfbb269d3ce91d4b4a122b8c65537ee51 | 25a66e136a6154b4ce3ef4004e562c7d0be67ec0 | refs/heads/master | 2022-12-30T23:57:42.546833 | 2020-10-23T11:08:26 | 2020-10-23T11:08:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,566 | py | import re
import socket
import ssl
import ipaddress
import logging
from urllib.parse import urlparse
from http import HTTPStatus
import requests
from rssant_common.dns_service import DNSService, DNS_SERVICE
from .response import FeedResponse, FeedResponseStatus
from .response_builder import FeedResponseBuilder
from .useragent import DEFAULT_USER_AGENT
from . import cacert
LOG = logging.getLogger(__name__)
class FeedReaderError(Exception):
"""FeedReaderError"""
status = None
class PrivateAddressError(FeedReaderError):
"""Private IP address"""
status = FeedResponseStatus.PRIVATE_ADDRESS_ERROR.value
class ContentTooLargeError(FeedReaderError):
"""Content too large"""
status = FeedResponseStatus.CONTENT_TOO_LARGE_ERROR.value
class ContentTypeNotSupportError(FeedReaderError):
"""ContentTypeNotSupportError"""
status = FeedResponseStatus.CONTENT_TYPE_NOT_SUPPORT_ERROR.value
class RSSProxyError(FeedReaderError):
"""RSSProxyError"""
status = FeedResponseStatus.RSS_PROXY_ERROR.value
RE_WEBPAGE_CONTENT_TYPE = re.compile(
r'(text/html|application/xml|text/xml|text/plain|application/json|'
r'application/.*xml|application/.*json|text/.*xml)', re.I)
RE_WEBPAGE_EXT = re.compile(
r'(html|xml|json|txt|opml|rss|feed|atom)', re.I)
RE_URL_EXT_SEP = re.compile(r'[./]')
def _get_url_ext(url: str):
"""
>>> _get_url_ext('http://example.com/blog/feed')
'feed'
>>> _get_url_ext('http://example.com/blog/feed.xml')
'xml'
>>> no_error = _get_url_ext('http://example.com')
"""
try:
url_path = urlparse(url).path.strip('/')
except ValueError:
return ''
parts = RE_URL_EXT_SEP.split(url_path[::-1], 1)
if len(parts) > 0:
return parts[0][::-1]
return ''
def is_webpage(content_type, url=None):
"""
>>> is_webpage(' text/HTML ')
True
>>> is_webpage('application/rss+xml; charset=utf-8')
True
>>> is_webpage('application/atom+json')
True
>>> is_webpage('image/jpeg')
False
>>> is_webpage('')
True
>>> is_webpage('application/octet-stream', 'https://www.example.com/feed.XML?q=1')
True
>>> is_webpage('application/octet-stream', 'https://www.example.com/feed')
True
"""
if content_type:
content_type = content_type.split(';', maxsplit=1)[0].strip()
if bool(RE_WEBPAGE_CONTENT_TYPE.fullmatch(content_type)):
return True
# for most of compatibility
if not content_type:
return True
# feed use may 'application/octet-stream', check url ext for the case
# eg: https://blog.racket-lang.org/
if url:
url_ext = _get_url_ext(url)
if url_ext:
if bool(RE_WEBPAGE_EXT.fullmatch(url_ext.lstrip('.'))):
return True
return False
def is_ok_status(status):
return status and 200 <= status <= 299
class FeedReader:
def __init__(
self,
session=None,
user_agent=DEFAULT_USER_AGENT,
request_timeout=30,
max_content_length=10 * 1024 * 1024,
allow_private_address=False,
allow_non_webpage=False,
rss_proxy_url=None,
rss_proxy_token=None,
dns_service: DNSService = DNS_SERVICE,
):
if session is None:
session = requests.session()
self._close_session = True
else:
self._close_session = False
self.session = session
self.user_agent = user_agent
self.request_timeout = request_timeout
self.max_content_length = max_content_length
self.allow_private_address = allow_private_address
self.allow_non_webpage = allow_non_webpage
self.rss_proxy_url = rss_proxy_url
self.rss_proxy_token = rss_proxy_token
self.dns_service = dns_service
self._cacert = cacert.where()
@property
def has_rss_proxy(self):
return bool(self.rss_proxy_url)
def _resolve_hostname(self, hostname):
if self.dns_service:
hosts = self.dns_service.resolve(hostname)
if hosts:
yield from hosts
return
addrinfo = socket.getaddrinfo(hostname, None)
for family, __, __, __, sockaddr in addrinfo:
if family == socket.AF_INET:
ip, __ = sockaddr
yield ip
elif family == socket.AF_INET6:
ip, __, __, __ = sockaddr
yield ip
def check_private_address(self, url):
"""Prevent request private address, which will attack local network"""
if self.allow_private_address:
return
hostname = urlparse(url).hostname
for ip in self._resolve_hostname(hostname):
ip = ipaddress.ip_address(ip)
if ip.is_private:
raise PrivateAddressError(ip)
def check_content_type(self, response):
if self.allow_non_webpage:
return
if not is_ok_status(response.status_code):
return
content_type = response.headers.get('content-type')
if not is_webpage(content_type, str(response.url)):
raise ContentTypeNotSupportError(
f'content-type {content_type!r} not support')
def _read_content(self, response: requests.Response):
content_length = response.headers.get('Content-Length')
if content_length:
content_length = int(content_length)
if content_length > self.max_content_length:
msg = 'content length {} larger than limit {}'.format(
content_length, self.max_content_length)
raise ContentTooLargeError(msg)
content_length = 0
content = bytearray()
for data in response.iter_content(chunk_size=64 * 1024):
content_length += len(data)
if content_length > self.max_content_length:
msg = 'content length larger than limit {}'.format(
self.max_content_length)
raise ContentTooLargeError(msg)
content.extend(data)
return content
def _decode_content(self, content: bytes):
if not content:
return ''
return content.decode('utf-8', errors='ignore')
def _prepare_headers(self, url, etag=None, last_modified=None):
headers = {}
if callable(self.user_agent):
headers['User-Agent'] = self.user_agent(url)
else:
headers['User-Agent'] = self.user_agent
if etag:
headers["ETag"] = etag
if last_modified:
headers["If-Modified-Since"] = last_modified
return headers
def _send_request(self, request, ignore_content):
# http://docs.python-requests.org/en/master/user/advanced/#timeouts
response = self.session.send(
request, verify=self._cacert, timeout=(6.5, self.request_timeout), stream=True)
try:
if not is_ok_status(response.status_code):
content = self._read_content(response)
return response, content
self.check_content_type(response)
content = None
if not ignore_content:
content = self._read_content(response)
finally:
# Fix: Requests memory leak
# https://github.com/psf/requests/issues/4601
response.close()
return response, content
def _read(self, url, etag=None, last_modified=None, ignore_content=False):
headers = self._prepare_headers(url, etag=etag, last_modified=last_modified)
req = requests.Request('GET', url, headers=headers)
prepared = self.session.prepare_request(req)
if not self.allow_private_address:
self.check_private_address(prepared.url)
response, content = self._send_request(prepared, ignore_content=ignore_content)
return response.headers, content, response.url, response.status_code
def _read_by_proxy(self, url, etag=None, last_modified=None, ignore_content=False):
if not self.has_rss_proxy:
raise ValueError("rss_proxy_url not provided")
headers = self._prepare_headers(url, etag=etag, last_modified=last_modified)
data = dict(
url=url,
token=self.rss_proxy_token,
headers=headers,
)
req = requests.Request('POST', self.rss_proxy_url, json=data)
prepared = self.session.prepare_request(req)
response, content = self._send_request(prepared, ignore_content=ignore_content)
if not is_ok_status(response.status_code):
message = 'status={} body={!r}'.format(
response.status_code, self._decode_content(content))
raise RSSProxyError(message)
proxy_status = response.headers.get('x-rss-proxy-status', None)
if proxy_status and proxy_status.upper() == 'ERROR':
message = 'status={} body={!r}'.format(
response.status_code, self._decode_content(content))
raise RSSProxyError(message)
proxy_status = int(proxy_status) if proxy_status else HTTPStatus.OK.value
return response.headers, content, url, proxy_status
def read(self, url, *args, use_proxy=False, **kwargs) -> FeedResponse:
headers = content = None
try:
if use_proxy:
headers, content, url, status = self._read_by_proxy(url, *args, **kwargs)
else:
headers, content, url, status = self._read(url, *args, **kwargs)
except socket.gaierror:
status = FeedResponseStatus.DNS_ERROR.value
except requests.exceptions.ReadTimeout:
status = FeedResponseStatus.READ_TIMEOUT.value
except (socket.timeout, TimeoutError, requests.exceptions.ConnectTimeout):
status = FeedResponseStatus.CONNECTION_TIMEOUT.value
except (ssl.SSLError, ssl.CertificateError, requests.exceptions.SSLError):
status = FeedResponseStatus.SSL_ERROR.value
except requests.exceptions.ProxyError:
status = FeedResponseStatus.PROXY_ERROR.value
except (ConnectionError, requests.exceptions.ConnectionError):
status = FeedResponseStatus.CONNECTION_RESET.value
except requests.exceptions.TooManyRedirects:
status = FeedResponseStatus.TOO_MANY_REDIRECT_ERROR.value
except requests.exceptions.ChunkedEncodingError:
status = FeedResponseStatus.CHUNKED_ENCODING_ERROR.value
except requests.exceptions.ContentDecodingError:
status = FeedResponseStatus.CONTENT_DECODING_ERROR.value
except UnicodeDecodeError:
status = FeedResponseStatus.CONTENT_DECODING_ERROR.value
except FeedReaderError as ex:
status = ex.status
LOG.warning(type(ex).__name__ + " url=%s %s", url, ex)
except (requests.HTTPError, requests.RequestException) as ex:
if ex.response is not None:
status = ex.response.status_code
else:
status = FeedResponseStatus.UNKNOWN_ERROR.value
builder = FeedResponseBuilder(use_proxy=use_proxy)
builder.url(url)
builder.status(status)
builder.content(content)
builder.headers(headers)
return builder.build()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def close(self):
if self._close_session:
self.session.close()
| [
"[email protected]"
]
| |
d070ea5b57e7c9f251743e491b019532adcef562 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_indispositions.py | fabc644b17f9f752e045cebb4233bf3276caa5da | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py |
#calss header
class _INDISPOSITIONS():
def __init__(self,):
self.name = "INDISPOSITIONS"
self.definitions = indisposition
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['indisposition']
| [
"[email protected]"
]
| |
11b3528b2e69e8e20b3ffec5e3cabb26665f60f8 | 7653ddbbc2256fae9cc62251f0241d0e9696df7d | /pyshtools/spectralanalysis/cross_spectrum.py | 5b48e4b63cf25c38d0ad3ff3a882735c27d890b2 | [
"BSD-3-Clause"
]
| permissive | SHTOOLS/SHTOOLS | c3415b38da290805ecdfd59699587e5ac5233cc8 | 93e77dcc6b36b2363f07d79d07ec47d86e6cba65 | refs/heads/master | 2023-08-31T01:35:49.211882 | 2023-08-28T10:50:08 | 2023-08-28T10:50:08 | 24,725,612 | 315 | 117 | BSD-3-Clause | 2023-08-28T10:50:10 | 2014-10-02T15:53:36 | Python | UTF-8 | Python | false | false | 6,773 | py | import numpy as _np
from scipy.special import factorial as _factorial
def cross_spectrum(clm1, clm2, normalization='4pi', degrees=None, lmax=None,
convention='power', unit='per_l', base=10.):
"""
Return the cross-spectrum of the spherical harmonic coefficients as a
function of spherical harmonic degree.
Usage
-----
array = cross_spectrum(clm1, clm2, [normalization, degrees, lmax,
convention, unit, base])
Returns
-------
array : ndarray, shape (len(degrees))
1-D ndarray of the spectrum.
Parameters
----------
clm1 : ndarray, shape (2, lmax + 1, lmax + 1)
ndarray containing the first set of spherical harmonic coefficients.
clm2 : ndarray, shape (2, lmax + 1, lmax + 1)
ndarray containing the second set of spherical harmonic coefficients.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized coefficients,
respectively.
lmax : int, optional, default = len(clm[0,:,0]) - 1.
Maximum spherical harmonic degree to output.
degrees : ndarray, optional, default = numpy.arange(lmax+1)
Array containing the spherical harmonic degrees where the spectrum
is computed.
convention : str, optional, default = 'power'
The type of spectrum to return: 'power' for power spectrum, 'energy'
for energy spectrum, and 'l2norm' for the l2-norm spectrum.
unit : str, optional, default = 'per_l'
If 'per_l', return the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', return the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', return the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum.
Notes
-----
This function returns either the cross-power spectrum, cross-energy
spectrum, or l2-cross-norm spectrum. Total cross-power is defined as the
integral of the clm1 times the conjugate of clm2 over all space, divided
by the area the functions span. If the mean of the functions is zero,
this is equivalent to the covariance of the two functions. The total
cross-energy is the integral of clm1 times the conjugate of clm2 over all
space and is 4pi times the total power. The l2-cross-norm is the
sum of clm1 times the conjugate of clm2 over all angular orders as a
function of spherical harmonic degree.
The output spectrum can be expresed using one of three units. 'per_l'
returns the contribution to the total spectrum from all angular orders
at degree l. 'per_lm' returns the average contribution to the total
spectrum from a single coefficient at degree l, and is equal to the
'per_l' spectrum divided by (2l+1). 'per_dlogl' returns the contribution to
the total spectrum from all angular orders over an infinitessimal
logarithmic degree band. The contrubution in the band dlog_a(l) is
spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base, and where
spectrum(l, 'per_dlogl) is equal to spectrum(l, 'per_l')*l*log(a).
"""
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError("The normalization must be '4pi', 'ortho', " +
"'schmidt', or 'unnorm'. Input value was {:s}."
.format(repr(normalization)))
if convention.lower() not in ('power', 'energy', 'l2norm'):
raise ValueError("convention must be 'power', 'energy', or " +
"'l2norm'. Input value was {:s}"
.format(repr(convention)))
if unit.lower() not in ('per_l', 'per_lm', 'per_dlogl'):
raise ValueError("unit must be 'per_l', 'per_lm', or 'per_dlogl'." +
"Input value was {:s}".format(repr(unit)))
if _np.iscomplexobj(clm1) is not _np.iscomplexobj(clm2):
raise ValueError('clm1 and clm2 must both be either real or ' +
'complex. \nclm1 is complex : {:s}\n'
.format(repr(_np.iscomplexobj(clm1))) +
'clm2 is complex : {:s}'
.format(repr(_np.iscomplexobj(clm2))))
if lmax is None:
lmax = len(clm1[0, :, 0]) - 1
if degrees is None:
degrees = _np.arange(lmax+1)
if _np.iscomplexobj(clm1):
array = _np.empty(len(degrees), dtype=_np.complex128)
else:
array = _np.empty(len(degrees))
if normalization.lower() == 'unnorm':
if convention.lower() == 'l2norm':
raise ValueError("convention can not be set to 'l2norm' when " +
"using unnormalized harmonics.")
for i, l in enumerate(degrees):
ms = _np.arange(l+1)
conv = _factorial(l+ms) / (2. * l + 1.) / _factorial(l-ms)
if _np.iscomplexobj(clm1):
array[i] = (conv[0:l + 1] * clm1[0, l, 0:l + 1] *
clm2[0, l, 0:l + 1].conjugate()).real.sum() + \
(conv[1:l + 1] * clm1[1, l, 1:l + 1] *
clm2[1, l, 1:l + 1].conjugate()).real.sum()
else:
conv[1:l + 1] = conv[1:l + 1] / 2.
array[i] = (conv[0:l + 1] * clm1[0, l, 0:l+1]**2).sum() + \
(conv[1:l + 1] * clm2[1, l, 1:l+1]**2).sum()
else:
for i, l in enumerate(degrees):
if _np.iscomplexobj(clm1):
array[i] = (clm1[0, l, 0:l + 1] *
clm2[0, l, 0:l + 1].conjugate()).sum() + \
(clm1[1, l, 1:l + 1] *
clm2[1, l, 1:l + 1].conjugate()).sum()
else:
array[i] = (clm1[0, l, 0:l + 1] * clm2[0, l, 0:l + 1]).sum() \
+ (clm1[1, l, 1:l + 1] * clm2[1, l, 1:l + 1]).sum()
if convention.lower() == 'l2norm':
return array
else:
if normalization.lower() == '4pi':
pass
elif normalization.lower() == 'schmidt':
array /= (2. * degrees + 1.)
elif normalization.lower() == 'ortho':
array /= (4. * _np.pi)
if convention.lower() == 'energy':
array *= 4. * _np.pi
if unit.lower() == 'per_l':
pass
elif unit.lower() == 'per_lm':
array /= (2. * degrees + 1.)
elif unit.lower() == 'per_dlogl':
array *= degrees * _np.log(base)
return array
| [
"[email protected]"
]
| |
b9c464b3bef52750e1eb2aeee448f5b6b3831cf5 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/acl/acl.py | 3d4dcafee709a227b8b3d2212c124493b9b78f7a | []
| no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 3,907 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class ACL(Mo):
meta = ClassMeta("cobra.model.acl.ACL")
meta.isAbstract = True
meta.moClassName = "aclACL"
meta.moClassName = "aclACL"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Access control list"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.superClasses.add("cobra.model.pol.Instr")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.concreteSubClasses.add("cobra.model.ipv6acl.ACL")
meta.concreteSubClasses.add("cobra.model.ipv4acl.ACL")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 28441, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
]
| |
97538fec829806a6dc0663c869c8d080db247647 | f62aa26461e381435c86019ca2f502d10ff75b88 | /catalog/migrations/0006_auto_20170121_1008.py | aa8cfe9522994670ffaf090ae699983f1dd31edd | []
| no_license | viktortat/CAP | edb2aef09169d9bcf04b541682d8dcb067edf1c6 | 60221d8fa1d1ccb209e40001554cb004480dd2d5 | refs/heads/master | 2021-06-12T13:03:33.917809 | 2017-04-30T19:58:38 | 2017-04-30T19:58:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-21 07:08
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('catalog', '0005_auto_20170113_0259'),
]
operations = [
migrations.AddField(
model_name='catalogsite',
name='pub_date',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='Дата'),
),
migrations.AlterField(
model_name='catalogsite',
name='price',
field=models.FloatField(default=0),
),
]
| [
"[email protected]"
]
| |
a7cab9582745010178a2ff7a604325a6acc85ace | f2d961fe88b67f9d1eb52170d4df7c0363fae073 | /paper/plottraces_wt.py | 0f6fb632d5797568f4da3376d5ce5e68dd5a2795 | []
| no_license | acvmanzo/mn | 12ffbf1aae9c8088a8461bb2e7f4823e31329338 | c1d52e65e0bdec504d4e3954ad02ffe91f7f31ad | refs/heads/master | 2021-01-23T04:23:47.366109 | 2014-06-08T11:05:11 | 2014-06-08T11:05:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,820 | py | import mn.dftf.dftf as dftf
import os
import matplotlib.pyplot as plt
import numpy as np
import operator
from mn.cmn.cmn import *
import matplotlib
import pickle
import matplotlib as mpl
# Plots raw traces on one graph from the movies specified below.
# Run from the 'data' folder; in the 'data' folder are individual movie folders
# (similar to the experiment/data folders).
# Same values as in dftf.py, but only plotting roi1.
DFTSIZE=10000
RESULTS_FILE = 'results1.txt'
PARAMS_FILE = 'params'
CORRPARAMS_FILE = 'corrparams'
HZ_BOUND1 = 0.5
HZ_BOUND2 = 'end'
KEYLIST = 'keylist'
COLS= ['Mean1']
ROIS = ['roi1']
TYPE = 'raw' # Choose 'dft' or 'raw'
if TYPE == 'dft':
PLOTNAME = 'dfttraces.png'
YLABEL = 'Amplitude'
XLABEL = 'Hz'
YMIN = 0
YLIM = 4
if TYPE == 'raw':
PLOTNAME = 'rawtraces'
YLABEL = 'Arbitrary Intensity'
XLABEL = 'Time (s)'
YMIN = -5
YLIM = 90
FONTSIZE = 6.7 # Font size for tick labels, axis labels.
FIGW = 1.75 # Figure width in inches
FIGH = 2.5 # Figure height in inches
FIGDPI = 600 # Figure dpi
BORDER = 'no'
YAXISTICKS = 2
TIME = 1 # Length of time the traces show.
XLIMHZ = 10
LINEWIDTH = 0.75
# Dictionary where the keys are the movie names and the values are the condition, the y offset of
# the trace (so that they aren't on top of each other), and the color the of the trace.
#MOVIES = {'mov_20101130_200135': ['112648-GAL4', 32.5+1, 'k'], 'mov_20110803_190537': ['UAS-TNT', 14+1, 'b'], 'mov_20101213_193258': ['112648 x TNT', 0, 'r']}
#DFT_MOVIES = {'mov_20101130_200135': ['112648-GAL4', 3.1-0.25, 'k'], 'mov_20110803_190537': ['UAS-TNT', 1.8-0.25, 'b'], 'mov_20101213_193258': ['112648 x TNT', 0.25, 'r']}
#MOVIES = {'mov_20110518_184507': ['24', 70, 'k'], 'mov_20110518_185105': ['30', 20, 'b'], 'mov_20110518_184217': ['24', 50, 'k'], 'mov_20110518_184849': ['30', 0, 'b']}
#MOVIES = {'mov_20101130_200533': ['control', 45, 'k'], 'mov_20110518_191243': ['112648 x dtrpa1 - 24', 30, 'b'], 'mov_20110527_163607_part2' :['112648 x dtrpa1 - 32', 15, 'r'], 'mov_20110518_192012': ['112648 x dtrpa1 - 32', -5, 'r']}
#MOVIES = {'mov_20110830_152007': ['24 h/100 mM suc', 70, 'k', '(i) '], 'mov_20110830_192926': ['10 h/100 mM suc', 45, 'k', '(ii) '], 'mov_20110901_182709' :['24 h/500 mM suc', 20, 'k', '(iii) '], 'mov_20110113_180524': ['500 mM suc + 2.5% MC', -1, 'k', '(iv) ']}
MOVIES = {'mov_20110830_192926': ['10 h/100 mM suc', 70, 'k', '(i) '],
'mov_20110830_152007': ['24 h/100 mM suc', 45, 'k', '(ii) '],
'mov_20110901_182709' :['24 h/500 mM suc', 20, 'k', '(iii) '],
'mov_20110113_180524': ['24 h/500 mM suc + 2.5% MC', -1, 'k', '(iv) ']}
matplotlib.rc('axes', linewidth=LINEWIDTH)
def oneplot(moviedict, toplotdict, figw, figh, figdpi, fontsz, border, ylabel, ylim, time, ymin,
lw):
"""Moviedict is the above dictionary of movies, toplotdict is a dictionary produced by
toplot(), and other values are what's specified as global variables."""
print(toplotdict.keys())
fontv = mpl.font_manager.FontProperties()
# Uncomment line below to set the font to verdana; the default matplotlib font is very
# similar (just slightly narrower).
fontv = mpl.font_manager.FontProperties(fname='/usr/share/matplotlib/mpl-data/fonts/ttf/arial.ttf')
fontv.set_size(fontsz)
fig1 = plt.figure(figsize=(figw, figh), dpi=figdpi, facecolor='w', edgecolor='k')
#Plots data on one graph with parameters specified in the moviedict directory.
for k, v in moviedict.iteritems():
print(k)
cond1, offset, color, inum = v
xvals = toplotdict[k][0]
data = toplotdict[k][1] + offset
condition = cond1
plt.plot(xvals, data, color, linewidth=lw, label=condition)
print(condition)
#if k == 'mov_20110113_180524':
#plt.text(0.5, offset+7, inum+condition, horizontalalignment='left',
#fontproperties=fontv)
#else:
#plt.text(0.5, offset+9, inum+condition, horizontalalignment='left',
#fontproperties=fontv)
if k == 'mov_20110113_180524':
plt.text(0.05, offset+7, inum+condition, horizontalalignment='left',
fontproperties=fontv)
else:
plt.text(0.05, offset+9, inum+condition, horizontalalignment='left',
fontproperties=fontv)
ax = plt.gca()
## Plots legend.
#legend = plt.legend()
### Manipulates order of the legend entries.
##handles, labels = ax.get_legend_handles_labels()
##handles2 = handles[0], handles[2], handles[1], handles[3]
##labels2 = labels[0], labels[2], labels[1], labels[3]
##legend = ax.legend(handles2, labels2, bbox_to_anchor=(0, 0, 1, 1), transform=plt.gcf().transFigure)
### Changes legend font to fontsz.
#ltext = legend.get_texts()
#plt.setp(ltext, fontsize=fontsz)
### Removes border around the legend.
#legend.draw_frame(False)
#Uncomment lines below to display without top and right borders.
if border == 'no':
for loc, spine in ax.spines.iteritems():
if loc in ['left','bottom']:
pass
elif loc in ['right','top']:
spine.set_color('none') # don't draw spine
else:
raise ValueError('unknown spine location: %s'%loc)
#Uncomment lines below to display ticks only where there are borders.
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
## Removes tick labels and ticks from yaxis.
ax.axes.yaxis.set_major_locator(matplotlib.ticker.NullLocator())
# Specifies axis labels and axis tick label sizes.
plt.xlabel(XLABEL, fontproperties=fontv, labelpad=4)
plt.ylabel(ylabel, fontproperties=fontv, labelpad=4)
plt.xticks(fontproperties=fontv)
plt.yticks(fontproperties=fontv)
# Specifies axis limits.
plt.axis( [0, time, ymin, ylim])
# Adjusts the space between the plot and the edges of the figure; (0,0) is the lower
#lefthand corner of the figure.
fig1.subplots_adjust(top=0.95)
fig1.subplots_adjust(left=0.15)
#fig1.subplots_adjust(right=0.95)
fig1.subplots_adjust(bottom=0.15)
def gentoplot(time):
"""Generates a dictionary where the keys are movie names and the values are the raw trace for
plotting. Time specifies the length of time in seconds of the plots shown."""
toplot = {}
# Generates a list of movie paths in the data folder.
files = dftf.batch_s('.')
# Generates dft traces and plots for each roi in each movie.
for file in files:
os.chdir(file)
print(os.path.basename(file))
for col in COLS:
if os.path.exists('params') == True:
rawtracedata = dftf.TraceData(fname=RESULTS_FILE, paramsfile=PARAMS_FILE,
corrparamsfile=CORRPARAMS_FILE, colname=col)
td = rawtracedata.Processrawtrace(DFTSIZE, HZ_BOUND1, HZ_BOUND2)
moviename = os.path.basename(os.path.abspath('.'))
# Selects the area of the raw trace to plot.
frames = time * td['fps']
#print(frames)
plottime = td['seltrace'][:frames]/6
#print(len(plottime))
ms = plottime-np.mean(plottime)
xsec = np.linspace(0, len(plottime)/td['fps'], len(plottime))
#print(xsec)
condition = td['condition']
toplot[moviename] = [xsec, ms, condition]
print(np.max(ms), np.min(ms))
return(toplot)
def gentoplot_dft(xlimhz):
toplot = {}
# Generates a list of movie paths in the data folder.
files = dftf.batch_s('.')
# Generates dft traces and plots for each roi in each movie.
for file in files:
os.chdir(file)
print(os.path.basename(file))
for col in COLS:
if os.path.exists('params') == True:
rawtracedata = dftf.TraceData(fname=RESULTS_FILE, paramsfile=PARAMS_FILE,
corrparamsfile=CORRPARAMS_FILE, colname=col)
td = rawtracedata.Processrawtrace(DFTSIZE, HZ_BOUND1, HZ_BOUND2)
condition = td['condition']
m = td['peakf']
xpoints = np.linspace(0, td['fps']/2, td['dftsize']/2)
prop = xlimhz/(td['fps']/2)
tracelen = np.rint(prop*len(td['dftnormtrunctrace']))
toplot[td['moviename']] = [xpoints[:tracelen],
td['dftnormtrunctrace'][:tracelen], condition]
return(toplot)
if TYPE == 'dft':
toplot = gentoplot_dft(XLIMHZ)
#oneplot(MOVIES, toplot, FIGW, FIGH, FIGDPI, FONTSIZE, BORDER, YLABEL, YLIM, TIME)
oneplot(DFT_MOVIES, toplot, FIGW, FIGH, FIGDPI, FONTSIZE, BORDER, YLABEL, YLIM, XLIMHZ, YMIN)
# Saves the figures in plots/plots.
plotfolder = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath('.'))), 'plots')
makenewdir(plotfolder)
figname = os.path.join(plotfolder, PLOTNAME)
plt.savefig(figname, dpi=FIGDPI)
# Saves a file showing the movies I used for the plot.
fname = os.path.join(plotfolder, 'movies_used_for_dfttraces.txt')
with open(fname, 'w') as f:
for k, v in MOVIES.iteritems():
f.write(k + ' ' + v[0] + '\n')
if TYPE == 'raw':
toplot = gentoplot(TIME)
oneplot(MOVIES, toplot, FIGW, FIGH, FIGDPI, FONTSIZE, BORDER, YLABEL, YLIM, TIME, YMIN,
LINEWIDTH)
# Saves the figures in plots/plots.
plotfolder = os.path.join(os.path.dirname(os.path.abspath('../')), 'plots')
makenewdir(plotfolder)
figname = os.path.join(plotfolder, PLOTNAME)
plt.savefig(figname+'.svg', dpi=FIGDPI)
plt.savefig(figname+'.png', dpi=FIGDPI)
# Saves a file showing the movies I used for the plot and a pickle file with all the variables.
fname = os.path.join(plotfolder, 'movies_used_for_rawtraces.txt')
with open(fname, 'w') as f:
for k, v in MOVIES.iteritems():
f.write(k + ' ' + v[0] + '\n')
picklename = os.path.join(plotfolder, 'picklefile')
with open(picklename, 'w') as h:
d = {}
d['MOVIES'] = MOVIES
d['FONTSIZE'] = FONTSIZE
d['FIGW'] = FIGW
d['FIGH'] = FIGH
d['FIGDPI'] = FIGDPI
d['YAXISTICKS'] = YAXISTICKS
d['TIME'] = TIME
d['XLIMHZ'] = XLIMHZ
d['PLOTNAME'] = PLOTNAME
d['YLABEL'] = YLABEL
d['XLABEL'] = XLABEL
d['YMIN'] = YMIN
d['YLIM'] = YLIM
print(d)
picklefile = pickle.Pickler(h)
picklefile.dump(d)
| [
"[email protected]"
]
| |
e98b22fa6ef267f696bb0d745c79f47d0d9e171b | 20f16917c9245aae71cb50fcc4b3e34e1e2a5006 | /LessonThree/Python07/src/Story_start.py | bf42f5f5399b21892641b105ca409e6280efa206 | []
| no_license | yinsendemogui/Alex | f4bce794efb5cacdf547c420d7a3a3c5d27be5c8 | eeb230b9028ced5c7fc0f293c1d4d7b98c521721 | refs/heads/master | 2020-06-11T19:17:41.397658 | 2017-01-07T15:50:48 | 2017-01-07T15:50:48 | 75,628,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,646 | py | #!usr/bin/env python
# -*- coding:utf-8 -*-
# auther:Mr.chen
# 描述:
import time,os,sys
sys.path.append('..')
from lib import common
# from lib.Players_model import players_Model
DIR = os.path.dirname(__file__)
DIR = DIR.replace('src','db/')
TAG = True
def Pre_chapter(user):
time.sleep(2)
title = """
* * * * * * * * * * * * * * * * * * * * * * *预章:传说* * * * * * * * * * * * * * * * * * * * * * *
"""
print (title)
time.sleep(5)
text = """
相传很久以前,于古国疆域,有一奇人姓夸名父.
以大力闻于世间,以才智惊于圣贤,以风韵传于万载..
忽一日,慕之者至.询问之,其曰...
吾父乃真之才,生于凡中.无师而达天地...
终其一生教化万民,此乃吾真之所持..
父之事迹.且听我慢慢道来...
"""
for i in text.decode('utf-8'):
if i != ' ':
time.sleep(0.5)
print i.encode('utf-8'),
else:
print i.encode('utf-8'),
The_first_chapter(user)
def The_first_chapter(user):
# dict = common.log_info_read(DIR + 'config_conf')
# for S in dict['students']:
# if S.Name == user.Name:
time.sleep(2)
introduce = """
登场人物介绍
姓名:{0}
年龄:{1}
国籍:{2}
特长:{3}
体力:{4}
武力:{5}
智力:{6}
魅力:{7}
秘籍:无
点评:屌丝,唯撩妹甚
姓名:灵儿
年龄:22
国籍:china
特长:
体力:1000
武力:70
智力:70
魅力:100
秘籍:游戏保护,万法不侵
点评:白富美
""".format(user.Name,user.Age,user.Nationality,user.Specialty,user.Strength,user.Force,user.IQ,user.Charm)
for i in introduce.decode('utf-8'):
if i != ' ':
time.sleep(0.2)
print i.encode('utf-8'),
else:
print i.encode('utf-8'),
time.sleep(2)
title = """
* * * * * * * * * * * * * * * * * * * * * * *第一章:缘启* * * * * * * * * * * * * * * * * * * * * * *
"""
print (title)
time.sleep(5)
text = """
我的父亲叫做{0},本是一介草民,少时机缘之下,
救助了一个跳河自杀之人,本也并无所求,只因
我父那时在河中捕鱼,闲河中波澜太盛,吓跑鱼儿,
故,救之,以安抚鱼心。谁想此人竟是一小门派
掌教之子,因修炼走火,盲目间跌落河中。恰逢我父
出海,机缘所致,掌教有感我父恩德,故收其为徒,
传功授法,指引修行。说来也怪,我父不论武力{1},
智力{1}魅力{2}尽数低于常人,但唯独撩妹能力
极其出众,故派中最小师妹灵儿常伴左右,个中滋味
不足为外人道也。
""".format(user.Name,user.Force,user.IQ,user.Charm)
for i in text.decode('utf-8'):
if i != ' ':
time.sleep(0.5)
print i.encode('utf-8'),
else:
print i.encode('utf-8'),
The_second_chapter(user)
def The_second_chapter(user):
time.sleep(2)
introduce = """
登场人物介绍
姓名:高富帅
年龄:34
国籍:china
特长:有钱有势
体力:1000
武力:70
智力:70
魅力:70
秘籍:无
点评:如其名
"""
for i in introduce.decode('utf-8'):
if i != ' ':
time.sleep(0.2)
print i.encode('utf-8'),
else:
print i.encode('utf-8'),
time.sleep(2)
title = """
* * * * * * * * * * * * * * * * * * * * * * *第二章:幻灭* * * * * * * * * * * * * * * * * * * * * * *
"""
print (title)
time.sleep(5)
text = """
我父和灵儿就这样朝夕相处,日久生情,只待谈婚论嫁之时。
但,世事难料。一日,掌门大寿,宴请四方,祝寿者繁多。
有一人姓高名富帅,乃当朝一品大员之子,见灵儿貌美,
意欲图之。在其下手一刻,幸被我父所阻,于是心生恨意,
命其下人,禀报大员,以圣上赐婚为由,向掌门施压。怎料,
掌门欲息事宁人,遂命灵儿随高富帅回京,奉旨完婚。师命
难违,灵儿纵千般不愿,亦感无可奈何。临行前,挥泪别过,
劝我父放下仇恨,勿思勿念。我父伤心之余,亦感自身渺小。
暗发宏愿,以期报仇雪恨,救灵儿于水火之间。
"""
for i in text.decode('utf-8'):
if i != ' ':
time.sleep(0.5)
print i.encode('utf-8'),
else:
print i.encode('utf-8'),
The_third_chapter(user)
def The_third_chapter(user):
time.sleep(2)
title = """
* * * * * * * * * * * * * * * * * * * * * * *第三章:暗涛* * * * * * * * * * * * * * * * * * * * * * *
"""
print (title)
time.sleep(5)
text = """
灵儿事毕,我父再无心静修,辞别掌教,下山入世。
得一高人指点,拜于一隐门之中,勤学苦练,终得
真传。我父正欲出山报仇,被隐门上士所阻,言道
京城宦官家有一大内高手田伯光,武力高达90有余,
欲胜之需闯本门的锁妖塔拿一绝世宝物(双倍暴击率)
方可成行。
"""
for i in text.decode('utf-8'):
if i != ' ':
time.sleep(0.5)
print i.encode('utf-8'),
else:
print i.encode('utf-8'),
time.sleep(2)
while TAG:
text = """
剧情分支选择如下:
1,听劝
2,不听劝
"""
print (text)
choose = raw_input("请输入索引进行选择")
if choose == '1':
Lock_demon_tower(user)
elif choose == '2':
Fail_ending_one()
else:
print ("你的选择有误!")
def Lock_demon_tower(user):
List = []
dict = common.log_info_read(DIR + 'config_conf')
for pobj in dict['players']:
if pobj.Name == user.Name:
P = pobj
time.sleep(2)
title = """
* * * * * * * * * * * * * * * * * * * * * * *第四章:勇闯锁妖塔* * * * * * * * * * * * * * * * * * * * * * *
"""
print (title)
time.sleep(5)
text = """
反复思量,我父还是决定暂缓报仇,遵从隐士的看法,
独自一人来到锁妖塔前,看者前方雄伟的高达{0}
层的锁妖塔,暗下决心,要尽快完成闯塔拿到宝物.
于是,我父来到了塔下的驿站里...
""".format(str(len(user.Tlist_obj)))
for i in text.decode('utf-8'):
if i != ' ':
time.sleep(0.5)
print i.encode('utf-8'),
else:
print i.encode('utf-8'),
while TAG:
test = """
请问现在你想去哪?
1,闯塔
2,打开背包(吃药) 你还有{0}体力
3,不闯了,直接去报仇
""".format(str(P.Strength))
print (test)
choose = raw_input("请输入索引进行选择:")
num = 0
bum = 0
if choose == '1':
for tobj in dict['towers']:
if P.schedule[tobj] == 100:
schedule = '已达成'
bum += 1
else:
schedule = P.schedule[tobj]
print ("{0},{1},难度系数:{2},进度率:{3}%,创塔次数:{4}次".format(str(num+1),tobj.Lname,tobj.Difficulty,str(schedule),str(P.num[tobj])))
if bum == len(P.Tlist_obj):
print ("{0},锁妖塔顶层,难度系统:0".format(str(num+2)))
num += 1
List.append(str(num))
decide = raw_input("请输入索引进行选择:")
if decide == str(len(P.Tlist_obj)+1) and bum == len(P.Tlist_obj):
Lock_demon_tower_Top(user)
if decide in List:
if P.schedule[dict['towers'][int(decide)-1]] < 100:
for i in range(10):
re = P.Begins(dict['towers'][int(decide)-1])
if re == False:
common.log_info_write(DIR + 'config_conf', dict)
break
else:
common.log_info_write(DIR + 'config_conf', dict)
else:
print ("本层已经闯过了!")
else:
print ("你的输入有误!")
elif choose == '2':
while TAG:
text = """
背囊物品如下: 你还有{0}体力
1,大还丹:{1}个
2,小还丹 {2}个
""".format(str(P.Strength),str(P.Item['大还丹']),str(P.Item['大还丹']))
print (text)
choose = raw_input("请输入索引进行选择:")
if choose == '1':
if P.Item['大还丹'] > 0 :
P.Item['大还丹'] -= 1
P.Strength += 500
common.log_info_write(DIR + 'config_conf', dict)
break
else:
print ("大还丹个数为0")
break
elif choose == '2':
if P.Item['小还丹'] > 0:
P.Item['小还丹'] -= 1
P.Strength += 200
common.log_info_write(DIR + 'config_conf', dict)
break
else:
print ("小还丹个数为0")
break
else:
print ("你的输入有误!请重新输入!")
elif choose == '3':
Fail_ending_one()
else:
print ("你的输入有误!")
def Lock_demon_tower_Top(user):
dict = common.log_info_read(DIR + 'config_conf')
for pobj in dict['players']:
if pobj.Name == user.Name:
P = pobj
time.sleep(2)
title = """
* * * * * * * * * * * * * * * * * * * * * * *第五章:锁妖塔顶* * * * * * * * * * * * * * * * * * * * * * *
"""
print (title)
time.sleep(5)
text = """
克服磨难,吾父终至,锁妖塔顶。与前相比,此地奇静。
地方不大,有水缸一口,两人高有余。好奇之下,
侧身观之,怎料竟有活人居于缸内,遂上前,救出。
原来此人就是灵儿。询问下,方知,那日毕,其心已死,
趁高富帅不备,遂逃出,寻短见,幸被隐门上士所救,居
此疗伤,恰逢我父闯塔,喜得相逢。至此,我父恍然,直呼,
此宝胜万宝也(主角瞬间满怒体力翻倍)
"""
for i in text.decode('utf-8'):
if i != ' ':
time.sleep(0.5)
print i.encode('utf-8'),
else:
print i.encode('utf-8'),
P.Strength = P.Strength * 2
common.log_info_write(DIR + 'config_conf', dict)
Wu_Duo(user)
def Wu_Duo(user):
time.sleep(2)
title = """
* * * * * * * * * * * * * * * * * * * * * * *终章:武夺* * * * * * * * * * * * * * * * * * * * * * *
"""
print (title)
time.sleep(5)
text = """
经过不懈的努力,战胜了诸多困苦(实在懒得编了),
我们的主角终于和美女团结友爱的在一起生活,剧终
"""
for i in text.decode('utf-8'):
if i != ' ':
time.sleep(0.5)
print i.encode('utf-8'),
else:
print i.encode('utf-8'),
exit()
def Fail_ending_one():
time.sleep(2)
title = """
* * * * * * * * * * * * * * * * * * * * * * *终章:武夺* * * * * * * * * * * * * * * * * * * * * * *
"""
print (title)
time.sleep(5)
text = """
报仇心切,我父终是不肯听劝,遂一人趁夜逃出隐门,
数日后,进京踩点,待万事俱备只欠东风之时,奈何
大员祖宅大内高手,先知先觉,早已暗随我父三日有余,
眼见我父正待出手,遂突袭之,我父重伤,感叹报仇无望,
自此隐居山林,不问世事.....BAD END......
"""
for i in text.decode('utf-8'):
if i != ' ':
time.sleep(0.5)
print i.encode('utf-8'),
else:
print i.encode('utf-8'),
exit() | [
"[email protected]"
]
| |
c46422aa62a585b2fef203ad5395901b118ea3da | d1cd97730d5ed4f7bec147d237cfe9ac9b2f6134 | /app.py | 9ef7c1d9e2022174e85c07cdc30742e823d87014 | []
| no_license | tsungic/MVP-backend | b5354c6fb13bfdfbc33ad7d85b98b195c90a1be1 | 2c371f4a10e36799c8c26cac933b55caff86ff72 | refs/heads/master | 2023-04-01T20:35:46.341005 | 2021-04-14T22:17:31 | 2021-04-14T22:17:31 | 349,769,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,489 | py | from flask import Flask, request, Response
from flask_cors import CORS
import dbcreds
import mariadb
import json
import secrets
app = Flask(__name__)
CORS(app)
@app.route("/api/users", methods=["GET","POST","PATCH","DELETE"])
def users():
if request.method =="GET":
user_id = request.args.get("userId")
conn = None
cursor = None
users_data = None
try:
conn = mariadb .connect(user=dbcreds.user, password=dbcreds.password, host= dbcreds.host, port= dbcreds.port, database= dbcreds.database)
cursor = conn.cursor()
if user_id:
cursor.execute("SELECT * FROM users where id =?", [user_id])
users_data = cursor.fetchall()
else:
cursor.execute("SELECT * FROM users")
users_data = cursor.fetchall()
except Exception as e:
print(e)
finally:
if(cursor !=None):
cursor.close()
if(conn != None):
conn.rollback()
conn.close()
if users_data or users_data ==[]:
users_info =[]
for user in users_data:
user_dic={
"userId": user[0],
"email": user [1],
"name": user [3]
}
users_info.append(user_dic)
return Response(json.dumps(users_info, default = str), mimetype="application/json", status=200)
else:
return Response("failure", mimetype="html/text", status=400)
if request.method =="POST":
conn = None
cursor = None
user_info = request.json
name = user_info.get("name")
password = user_info.get("password")
email = user_info.get("email")
user_session_id = None
if email!=None and email !="" and name!=None and name !="" and password!=None and password !="" :
try:
conn = mariadb .connect(user=dbcreds.user, password=dbcreds.password, host= dbcreds.host, port= dbcreds.port, database= dbcreds.database)
cursor = conn.cursor()
cursor.execute("INSERT INTO users (email, password, name) VALUES (?,?,?)", [email, password, name])
conn.commit()
user_id = cursor.lastrowid
login_token= secrets.token_urlsafe(20)
cursor.execute("INSERT INTO user_session (user_id, loginToken) VALUES (?,?)", [user_id, login_token])
conn.commit()
user_session_id = cursor.lastrowid
except Exception as e:
print(e)
finally:
if(cursor !=None):
cursor.close()
if(conn != None):
conn.rollback()
conn.close()
if user_session_id != None:
user_dic={
"userId": user_id,
"email": email,
"name": name,
"loginToken": login_token
}
return Response(json.dumps(user_dic, default = str), mimetype="application/json", status=200)
else:
return Response("failure", mimetype="html/text", status=400)
if request.method == "PATCH":
user_info = request.json
conn = None
cursor = None
name = user_info.get("name")
password = user_info.get("password")
email = user_info.get("email")
login_token = user_info.get("loginToken")
user= None
try:
conn = mariadb .connect(user=dbcreds.user, password=dbcreds.password, host= dbcreds.host, port= dbcreds.port, database= dbcreds.database)
cursor = conn.cursor()
if email != None and email !="" and login_token != None and login_token !="":
#get userid based on login token
cursor.execute("SELECT user_id FROM user_session where loginToken = ?",[login_token])
user_id = cursor.fetchone()[0]
#can update user table based on user id
cursor.execute("UPDATE users SET email = ? where id = ?", [email, user_id])
if name != None and name !="" and login_token != None and login_token !="":
cursor.execute("SELECT user_id FROM user_session where loginToken = ?",[login_token])
user_id = cursor.fetchone()[0]
cursor.execute("UPDATE users SET name = ? where id = ?", [name, user_id])
if password != None and password !="" and login_token != None and login_token !="":
cursor.execute("SELECT user_id FROM user_session where loginToken = ?",[login_token])
user_id = cursor.fetchone()[0]
cursor.execute("UPDATE users SET password = ? where id = ?", [password, user_id])
conn.commit()
row=cursor.rowcount
cursor.execute("SELECT * FROM users where id = ?", [user_id])
user = cursor.fetchone()
except Exception as e:
print (e)
finally:
if(cursor !=None):
cursor.close()
if(conn != None):
conn.rollback()
conn.close()
if user != None:
user_dic={
"userId": user[0],
"email": user [1],
"name": user[3]
}
return Response(json.dumps(user_dic, default = str), mimetype="application/json", status=200)
else:
return Response("failure", mimetype="html/text", status=400)
if request.method == "DELETE":
user_info = request.json
conn = None
cursor = None
password = user_info.get("password")
login_token = user_info.get("loginToken")
user= None
try:
conn = mariadb .connect(user=dbcreds.user, password=dbcreds.password, host= dbcreds.host, port= dbcreds.port, database= dbcreds.database)
cursor = conn.cursor()
cursor.execute("SELECT user_id FROM user_session WHERE loginToken = ?",[login_token])
user_id = cursor.fetchone()[0]
if password != None and password !="" and login_token != None and login_token !="":
cursor.execute("DELETE FROM users WHERE id = ?",[user_id])
conn.commit()
row=cursor.rowcount
except Exception as e:
print (e)
finally:
if(cursor !=None):
cursor.close()
if(conn != None):
conn.rollback()
conn.close()
if user == None:
return Response("Delete successful", mimetype="application/json", status=200)
else:
return Response("failure", mimetype="html/text", status=400)
@app.route("/api/login", methods=["POST", "DELETE"])
def login():
if request.method == "POST":
conn = None
cursor = None
users_data = None
user_info = request.json
password = user_info.get("password")
email = user_info.get("email")
login_rows = None
user_data = None
if email !="" and email !=None and password !="" and password !=None:
try:
conn = mariadb .connect(user=dbcreds.user, password=dbcreds.password, host= dbcreds.host, port= dbcreds.port, database= dbcreds.database)
cursor = conn.cursor()
cursor.execute("SELECT * FROM users where email =? AND password =?", [email, password])
user_data = cursor.fetchone()
rows = cursor.rowcount
#to login need user id, can get from fetch one(which hold all user data)
if (user_data != None):
#user id is first row in db-0
user_id = user_data[0]
login_token = secrets.token_urlsafe(20)
cursor.execute("INSERT INTO user_session (user_id, loginToken) VALUES (?,?)",[user_id, login_token])
conn.commit()
#login_rows check if insertion is done correct
login_rows = cursor.rowcount
except Exception as e:
print(e)
finally:
if(cursor !=None):
cursor.close()
if(conn != None):
conn.rollback()
conn.close()
#determine if login is working or not
if(login_rows != None):
#return user date
user_dic = {
"userId": user_data[0],
"email": user_data [1],
"name": user_data[3],
"loginToken": login_token
}
return Response(json.dumps(user_dic, default = str), mimetype="application/json", status=200)
else:
return Response("failure", mimetype="html/text", status=400)
if request.method =="DELETE":
login_token = request.json.get("loginToken")
rows = None
if login_token != None and login_token !="":
try:
conn = mariadb .connect(user=dbcreds.user, password=dbcreds.password, host= dbcreds.host, port= dbcreds.port, database= dbcreds.database)
cursor = conn.cursor()
cursor.execute("DELETE FROM user_session where loginToken = ?", [login_token])
conn.commit()
rows = cursor.rowcount
except Exception as e:
print(e)
finally:
if(cursor !=None):
cursor.close()
if(conn != None):
conn.rollback()
conn.close()
if (rows == 1):
return Response("logout success", mimetype="text/html", status =204)
else:
return Response ("logout failed", mimetype="text/html", status =404)
@app.route("/api/place", methods=["GET","POST","PATCH","DELETE"])
def place():
if request.method == "GET":
user_id = request.args.get("userId")
conn = None
cursor = None
place_data = None
try:
conn = mariadb .connect(user=dbcreds.user, password=dbcreds.password, host= dbcreds.host, port= dbcreds.port, database= dbcreds.database)
cursor = conn.cursor()
if user_id:
cursor.execute("SELECT * FROM users u INNER JOIN place p ON u.id = p.user_id WHERE u.id = ?", [user_id])
t_data = cursor.fetchall()
else:
cursor.execute("SELECT * FROM users u INNER JOIN place p ON u.id = p.user_id")
place_data = cursor.fetchall()
except Exception as e:
print(e)
finally:
if(cursor !=None):
cursor.close()
if(conn != None):
conn.rollback()
conn.close()
if place_data or place_data ==[]:
place_info =[]
#create for loop
for place in place_data:
place_dic={
"placeId": place[4],
"userId": place [0],
"name": place [5],
"accomodates": place[6],
"bathrooms": place [7],
"bedrooms": place [8],
"beds": place [9],
"images": place [10],
"price": place [13],
"propertyType": place [14],
"roomType": place[15]
}
place_info.append(place_dic)
return Response(json.dumps(place_info, default = str), mimetype="application/json", status=200)
else:
return Response("failure", mimetype="html/text", status=400)
if request.method == "POST":
login_token = request.json.get("loginToken")
name = request.json.get("name")
conn = None
cursor = None
place = None
user_id = None
place_id = None
try:
conn = mariadb .connect(user=dbcreds.user, password=dbcreds.password, host= dbcreds.host, port= dbcreds.port, database= dbcreds.database)
cursor = conn.cursor()
cursor.execute("SELECT user_id FROM user_session WHERE loginToken = ?", [login_token])
user_id = cursor.fetchone()[0]
cursor.execute("INSERT INTO place(user_id, name) VALUES (?,?)", [user_id, name])
conn.commit()
place_id = cursor.lastrowid
cursor.execute("SELECT * FROM users u INNER JOIN place p ON u.id = p.user_id where p.id = ?", [place_id])
place = cursor.fetchone()
except Exception as e:
print(e)
finally:
if(cursor !=None):
cursor.close()
if(conn != None):
conn.rollback()
conn.close()
if place or place ==[]:
place_dic={
"placeId": place[4],
"userId": place [0],
"name": place [5],
"accomodates": place[6],
"bathrooms": place [7],
"bedrooms": place [8],
"beds": place [9],
"images": place [10],
"price": place [13],
"propertyType": place [14],
"roomType": place[15]
}
return Response(json.dumps(place_dic, default = str), mimetype="application/json", status=201)
else:
return Response("failure", mimetype="html/text", status=400)
if request.method == "PATCH":
login_token = request.json.get("loginToken")
place_id = request.json.get("placeId")
name = request.json.get("name")
conn = None
cursor = None
user_id = None
rows= None
try:
conn = mariadb .connect(user=dbcreds.user, password=dbcreds.password, host= dbcreds.host, port= dbcreds.port, database= dbcreds.database)
cursor = conn.cursor()
cursor.execute("SELECT user_id FROM user_session WHERE loginToken = ?", [login_token])
user_id = cursor.fetchone()[0]
cursor.execute("UPDATE place SET name = ? WHERE id=? AND user_id =?", [name, place_id, user_id])
conn.commit()
rows = cursor.rowcount
except Exception as e:
print(e)
finally:
if(cursor !=None):
cursor.close()
if(conn != None):
conn.rollback()
conn.close()
if rows != None:
response_dic={
"placeId": place_id,
"name": name,
}
return Response(json.dumps(response_dic, default = str), mimetype="application/json", status=200)
else:
return Response("failure", mimetype="html/text", status=400)
if request.method == "DELETE":
login_token = request.json.get("loginToken")
place_id = request.json.get("placeId")
conn = None
cursor = None
user_id = None
rows= None
try:
conn = mariadb .connect(user=dbcreds.user, password=dbcreds.password, host= dbcreds.host, port= dbcreds.port, database= dbcreds.database)
cursor = conn.cursor()
cursor.execute("SELECT user_id FROM user_session WHERE loginToken = ?", [login_token])
user_id = cursor.fetchone()[0]
cursor.execute("DELETE FROM place WHERE id=? AND user_id =?", [place_id, user_id])
conn.commit()
rows = cursor.rowcount
except Exception as e:
print(e)
finally:
if(cursor !=None):
cursor.close()
if(conn != None):
conn.rollback()
conn.close()
if rows != None:
return Response("Delete success", mimetype="html/text", status=204)
else:
return Response("failure", mimetype="html/text", status=400)
| [
"[email protected]"
]
| |
cc20d5ddabeb4b62b1d598fca3a72d742feb2a74 | 202bb7c5e37d3f117315e8bba3bd21e84b48fe6b | /alpha/WHSZIWHEN11.py | 2ee339eed264849e5d11f95226f1fdd2cfbb9e8e | []
| no_license | haishuowang/work_whs | 897cd10a65035191e702811ed650061f7109b9fa | b6a17aefc5905ad9c11dba4d745591ed92b1e386 | refs/heads/master | 2020-07-03T10:30:14.231858 | 2020-06-09T08:47:18 | 2020-06-09T08:47:18 | 201,877,822 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 74,134 | py | import numpy as np
import pandas as pd
import os
import sys
from itertools import product, permutations, combinations
from datetime import datetime
import time
import matplotlib.pyplot as plt
from collections import OrderedDict
import sys
sys.path.append("/mnt/mfs/LIB_ROOT")
import open_lib.shared_paths.path as pt
from open_lib.shared_tools import send_email
def plot_send_result(pnl_df, sharpe_ratio, subject, text=''):
figure_save_path = os.path.join('/mnt/mfs/dat_whs', 'tmp_figure')
plt.figure(figsize=[16, 8])
plt.plot(pnl_df.index, pnl_df.cumsum(), label='sharpe_ratio={}'.format(sharpe_ratio))
plt.grid()
plt.legend()
plt.savefig(os.path.join(figure_save_path, '{}.png'.format(subject)))
plt.close()
to = ['[email protected]']
filepath = [os.path.join(figure_save_path, '{}.png'.format(subject))]
send_email.send_email(text, to, filepath, subject)
class BackTest:
@staticmethod
def AZ_Load_csv(target_path, index_time_type=True):
if index_time_type:
target_df = pd.read_table(target_path, sep='|', index_col=0, low_memory=False, parse_dates=True)
else:
target_df = pd.read_table(target_path, sep='|', index_col=0, low_memory=False)
return target_df
@staticmethod
def AZ_Catch_error(func):
def _deco(*args, **kwargs):
try:
ret = func(*args, **kwargs)
except:
ret = sys.exc_info()
print(ret[0], ":", ret[1])
return ret
return _deco
@staticmethod
def AZ_Time_cost(func):
t1 = time.time()
def _deco(*args, **kwargs):
ret = func(*args, **kwargs)
return ret
t2 = time.time()
print(f'cost_time: {t2-t1}')
return _deco
@staticmethod
def AZ_Sharpe_y(pnl_df):
return round((np.sqrt(250) * pnl_df.mean()) / pnl_df.std(), 4)
@staticmethod
def AZ_MaxDrawdown(asset_df):
return asset_df - np.maximum.accumulate(asset_df)
def AZ_Col_zscore(self, df, n, cap=None, min_periods=1):
df_mean = self.AZ_Rolling_mean(df, n, min_periods=min_periods)
df_std = df.rolling(window=n, min_periods=min_periods).std()
target = (df - df_mean) / df_std
if cap is not None:
target[target > cap] = cap
target[target < -cap] = -cap
return target
@staticmethod
def AZ_Row_zscore(df, cap=None):
df_mean = df.mean(axis=1)
df_std = df.std(axis=1)
target = df.sub(df_mean, axis=0).div(df_std, axis=0)
if cap is not None:
target[target > cap] = cap
target[target < -cap] = -cap
return target
@staticmethod
def AZ_Rolling(df, n, min_periods=1):
return df.rolling(window=n, min_periods=min_periods)
@staticmethod
def AZ_Rolling_mean(df, n, min_periods=1):
target = df.rolling(window=n, min_periods=min_periods).mean()
target.iloc[:n - 1] = np.nan
return target
@staticmethod
def AZ_Rolling_sharpe(pnl_df, roll_year=1, year_len=250, min_periods=1, cut_point_list=None, output=False):
if cut_point_list is None:
cut_point_list = [0.05, 0.33, 0.5, 0.66, 0.95]
rolling_sharpe = pnl_df.rolling(int(roll_year * year_len), min_periods=min_periods) \
.apply(lambda x: np.sqrt(year_len) * x.mean() / x.std(), raw=True)
rolling_sharpe.iloc[:int(roll_year * year_len) - 1] = np.nan
cut_sharpe = rolling_sharpe.quantile(cut_point_list)
if output:
return rolling_sharpe, cut_sharpe.round(4)
else:
return cut_sharpe.round(4)
@staticmethod
def AZ_Pot(pos_df, asset_last):
"""
计算 pnl/turover*10000的值,衡量cost的影响
:param pos_df: 仓位信息
:param asset_last: 最后一天的收益
:return:
"""
trade_times = pos_df.diff().abs().sum().sum()
if trade_times == 0:
return 0
else:
pot = asset_last / trade_times * 10000
return round(pot, 2)
@staticmethod
def AZ_Normal_IC(signal, pct_n, min_valids=None, lag=0):
signal = signal.shift(lag)
signal = signal.replace(0, np.nan)
corr_df = signal.corrwith(pct_n, axis=1).dropna()
if min_valids is not None:
signal_valid = signal.count(axis=1)
signal_valid[signal_valid < min_valids] = np.nan
signal_valid[signal_valid >= min_valids] = 1
corr_signal = corr_df * signal_valid
else:
corr_signal = corr_df
return round(corr_signal, 6)
def AZ_Normal_IR(self, signal, pct_n, min_valids=None, lag=0):
corr_signal = self.AZ_Normal_IC(signal, pct_n, min_valids, lag)
ic_mean = corr_signal.mean()
ic_std = corr_signal.std()
ir = ic_mean / ic_std
return ir, corr_signal
@staticmethod
def AZ_Leverage_ratio(asset_df):
"""
返回250天的return/(负的 一个月的return)
:param asset_df:
:return:
"""
asset_20 = asset_df - asset_df.shift(20)
asset_250 = asset_df - asset_df.shift(250)
if asset_250.mean() > 0:
return asset_250.mean() / (-asset_20.min())
else:
return asset_250.mean() / (-asset_20.max())
@staticmethod
def AZ_Locked_date_deal(position_df, locked_df):
"""
处理回测中停牌,涨停等 仓位需要锁死的情况
:param position_df:仓位信息
:param locked_df:停牌 涨跌停等不能交易信息(能交易记为1, 不能记为nan)
:return:
"""
position_df_adj = (position_df * locked_df).dropna(how='all', axis=0) \
.fillna(method='ffill')
return position_df_adj
@staticmethod
def AZ_Path_create(target_path):
"""
添加新路径
:param target_path:
:return:
"""
if not os.path.exists(target_path):
os.makedirs(target_path)
@staticmethod
def AZ_split_stock(stock_list):
"""
在stock_list中寻找A股代码
:param stock_list:
:return:
"""
eqa = [x for x in stock_list if (x.startswith('0') or x.startswith('3')) and x.endwith('SZ')
or x.startswith('6') and x.endwith('SH')]
return eqa
@staticmethod
def AZ_add_stock_suffix(stock_list):
"""
whs
给stock_list只有数字的 A股代码 添加后缀
如 000001 运行后 000001.SZ
:param stock_list:
:return:
"""
return list(map(lambda x: x + '.SH' if x.startswith('6') else x + '.SZ', stock_list))
@staticmethod
def AZ_Delete_file(target_path, except_list=None):
if except_list is None:
except_list = []
assert type(except_list) == list
file_list = os.listdir(target_path)
file_list = list(set(file_list) - set(except_list))
for file_name in sorted(file_list):
os.remove(os.path.join(target_path, file_name))
@staticmethod
def AZ_turnover(pos_df):
diff_sum = pos_df.diff().abs().sum().sum()
pos_sum = pos_df.abs().sum().sum()
if pos_sum == 0:
return .0
return diff_sum / float(pos_sum)
@staticmethod
def AZ_annual_return(pos_df, return_df):
temp_pnl = (pos_df * return_df).sum().sum()
temp_pos = pos_df.abs().sum().sum()
if temp_pos == 0:
return .0
else:
return temp_pnl * 250.0 / temp_pos
def AZ_fit_ratio(self, pos_df, return_df):
"""
传入仓位 和 每日收益
:param pos_df:
:param return_df:
:return: 时间截面上的夏普 * sqrt(abs(年化)/换手率), 当换手率为0时,返回0
"""
sharp_ratio = self.AZ_Sharpe_y((pos_df * return_df).sum(axis=1))
ann_return = self.AZ_annual_return(pos_df, return_df)
turnover = self.AZ_turnover(pos_df)
if turnover == 0:
return .0
else:
return round(sharp_ratio * np.sqrt(abs(ann_return) / turnover), 2)
def AZ_fit_ratio_rolling(self, pos_df, pnl_df, roll_year=1, year_len=250, min_periods=1, cut_point_list=None,
output=False):
if cut_point_list is None:
cut_point_list = [0.05, 0.33, 0.5, 0.66, 0.95]
rolling_sharpe, cut_sharpe = self.AZ_Rolling_sharpe(pnl_df, roll_year=roll_year, year_len=year_len,
min_periods=min_periods, cut_point_list=cut_point_list,
output=True)
rolling_return = pnl_df.rolling(int(roll_year * year_len), min_periods=min_periods).apply(
lambda x: 250.0 * x.sum().sum())
rolling_diff_pos = pos_df.diff().abs().sum(axis=1).rolling(int(roll_year * year_len),
min_periods=min_periods).apply(
lambda x: x.sum().sum())
rolling_return.iloc[:int(roll_year * year_len) - 1] = np.nan
rolling_diff_pos.iloc[:int(roll_year * year_len) - 1] = np.nan
rolling_fit_ratio = rolling_sharpe * np.sqrt(abs(rolling_return) / rolling_diff_pos)
rolling_fit_ratio = rolling_fit_ratio.replace(np.inf, np.nan)
rolling_fit_ratio = rolling_fit_ratio.replace(-np.inf, np.nan)
cut_fit = rolling_fit_ratio.quantile(cut_point_list)
return cut_fit.round(4)
@staticmethod
def AZ_VAR(pos_df, return_df, confidence_level, backward_len=500, forwward_len=250):
tradeDayList = pos_df.index[:-forwward_len]
col01 = return_df.columns[0]
varList = []
cut_point_list = [0.05, 0.33, 0.5, 0.66, 0.95]
if len(tradeDayList) == 0:
print('数据量太少')
else:
for tradeDay in tradeDayList:
tempPos = pos_df.loc[tradeDay, :]
dayIndex = list(return_df.loc[:tradeDay, col01].index[-backward_len:]) + list(
return_df.loc[tradeDay:, col01].index[:forwward_len])
return_df_c = return_df[list(tempPos.index)]
historyReturn = list(return_df_c.mul(tempPos, axis=1).loc[dayIndex[0]:dayIndex[-1], :].sum(axis=1))
historyReturn.sort()
varList.append(historyReturn[int(len(historyReturn) * confidence_level)])
var = pd.DataFrame({'var': varList}, index=tradeDayList)
var = var.dropna()
var_fit = var.quantile(cut_point_list)
return list(var_fit['var'])
bt = BackTest()
def filter_all(cut_date, pos_df_daily, pct_n, if_return_pnl=False, if_only_long=False):
pnl_df = (pos_df_daily * pct_n).sum(axis=1)
pnl_df = pnl_df.replace(np.nan, 0)
# pnl_df = pd.Series(pnl_df)
# 样本内表现
return_in = pct_n[pct_n.index < cut_date]
pnl_df_in = pnl_df[pnl_df.index < cut_date]
asset_df_in = pnl_df_in.cumsum()
last_asset_in = asset_df_in.iloc[-1]
pos_df_daily_in = pos_df_daily[pos_df_daily.index < cut_date]
pot_in = AZ_Pot(pos_df_daily_in, last_asset_in)
leve_ratio = AZ_Leverage_ratio(asset_df_in)
if leve_ratio < 0:
leve_ratio = 100
sharpe_q_in_df = bt.AZ_Rolling_sharpe(pnl_df_in, roll_year=1, year_len=250, min_periods=1,
cut_point_list=[0.3, 0.5, 0.7], output=False)
sp_in = bt.AZ_Sharpe_y(pnl_df_in)
fit_ratio = bt.AZ_fit_ratio(pos_df_daily_in, return_in)
ic = round(bt.AZ_Normal_IC(pos_df_daily_in, pct_n, min_valids=None, lag=0).mean(), 6)
sharpe_q_in_df_u, sharpe_q_in_df_m, sharpe_q_in_df_d = sharpe_q_in_df.values
in_condition_u = sharpe_q_in_df_u > 0.9 and leve_ratio > 1
in_condition_d = sharpe_q_in_df_d < -0.9 and leve_ratio > 1
# 分双边和只做多
if if_only_long:
in_condition = in_condition_u
else:
in_condition = in_condition_u | in_condition_d
if sharpe_q_in_df_m > 0:
way = 1
else:
way = -1
# 样本外表现
pnl_df_out = pnl_df[pnl_df.index >= cut_date]
out_condition, sharpe_q_out = out_sample_perf_c(pnl_df_out, way=way)
if if_return_pnl:
return in_condition, out_condition, ic, sharpe_q_in_df_u, sharpe_q_in_df_m, sharpe_q_in_df_d, pot_in, \
fit_ratio, leve_ratio, sp_in, sharpe_q_out, pnl_df
else:
return in_condition, out_condition, ic, sharpe_q_in_df_u, sharpe_q_in_df_m, sharpe_q_in_df_d, pot_in, \
fit_ratio, leve_ratio, sp_in, sharpe_q_out
def mul_fun(a, b):
a_l = a.where(a > 0, 0)
a_s = a.where(a < 0, 0)
b_l = b.where(b > 0, 0)
b_s = b.where(b < 0, 0)
pos_l = a_l.mul(b_l)
pos_s = a_s.mul(b_s)
pos = pos_l.sub(pos_s)
return pos
def sub_fun(a, b):
return a.sub(b)
def add_fun(a, b):
return a.add(b)
def AZ_Cut_window(df, begin_date, end_date=None, column=None):
if column is None:
if end_date is None:
return df[df.index > begin_date]
else:
return df[(df.index > begin_date) & (df.index < end_date)]
else:
if end_date is None:
return df[df[column] > begin_date]
else:
return df[(df[column] > begin_date) & (df[column] < end_date)]
def AZ_Leverage_ratio(asset_df):
"""
返回250天的return/(负的 一个月的return)
:param asset_df:
:return:
"""
asset_20 = asset_df - asset_df.shift(20)
asset_250 = asset_df - asset_df.shift(250)
if asset_250.mean() > 0:
return round(asset_250.mean() / (-asset_20.min()), 2)
else:
return round(asset_250.mean() / (-asset_20.max()), 2)
def pos_daily_fun(df, n=5):
return df.rolling(window=n, min_periods=1).sum()
def AZ_Pot(pos_df_daily, last_asset):
trade_times = pos_df_daily.diff().abs().sum().sum()
if trade_times == 0:
return 0
else:
pot = last_asset / trade_times * 10000
return round(pot, 2)
def out_sample_perf_c(pnl_df_out, way=1):
# 根据sharpe大小,统计样本外的表现
# if cut_point_list is None:
# cut_point_list = [0.30]
# if way == 1:
# rolling_sharpe, cut_sharpe = \
# bt.AZ_Rolling_sharpe(pnl_df_out, roll_year=0.5, year_len=250, cut_point_list=cut_point_list, output=True)
# else:
# rolling_sharpe, cut_sharpe = \
# bt.AZ_Rolling_sharpe(-pnl_df_out, roll_year=0.5, year_len=250, cut_point_list=cut_point_list, output=True)
if way == 1:
sharpe_out = bt.AZ_Sharpe_y(pnl_df_out)
else:
sharpe_out = bt.AZ_Sharpe_y(-pnl_df_out)
out_condition = sharpe_out > 0.8
return out_condition, round(sharpe_out * way, 2)
def create_fun_set_2(fun_set):
mix_fun_set = []
for fun_1, fun_2 in product(fun_set, repeat=2):
exe_str_1 = """def {0}_{1}_fun(a, b, c):
mix_1 = {0}_fun(a, b)
mix_2 = {1}_fun(mix_1, c)
return mix_2
""".format(fun_1.__name__.split('_')[0], fun_2.__name__.split('_')[0])
exec(compile(exe_str_1, '', 'exec'))
exec('mix_fun_set += [{0}_{1}_fun]'.format(fun_1.__name__.split('_')[0], fun_2.__name__.split('_')[0]))
return mix_fun_set
def create_fun_set_2_(fun_set):
mix_fun_set = {}
for fun_1, fun_2 in product(fun_set, repeat=2):
exe_str_1 = """def {0}_{1}_fun(a, b, c):
mix_1 = {0}_fun(a, b)
mix_2 = {1}_fun(mix_1, c)
return mix_2
""".format(fun_1.__name__.split('_')[0], fun_2.__name__.split('_')[0])
exec(compile(exe_str_1, '', 'exec'))
exec('mix_fun_set[\'{0}_{1}_fun\'] = {0}_{1}_fun'
.format(fun_1.__name__.split('_')[0], fun_2.__name__.split('_')[0]))
return mix_fun_set
def create_fun_set_2_crt():
fun_2 = mul_fun
mix_fun_set = []
for fun_1 in [add_fun, sub_fun, mul_fun]:
exe_str_1 = """def {0}_{1}_fun(a, b, c):
mix_1 = {0}_fun(a, b)
mix_2 = {1}_fun(mix_1, c)
return mix_2
""".format(fun_1.__name__.split('_')[0], fun_2.__name__.split('_')[0])
exec(compile(exe_str_1, '', 'exec'))
exec('mix_fun_set += [{0}_{1}_fun]'.format(fun_1.__name__.split('_')[0], fun_2.__name__.split('_')[0]))
return mix_fun_set
def create_fun_set_2_crt_():
fun_2 = mul_fun
mix_fun_set = dict()
for fun_1 in [add_fun, sub_fun, mul_fun]:
exe_str_1 = """def {0}_{1}_fun(a, b, c):
mix_1 = {0}_fun(a, b)
mix_2 = {1}_fun(mix_1, c)
return mix_2
""".format(fun_1.__name__.split('_')[0], fun_2.__name__.split('_')[0])
exec(compile(exe_str_1, '', 'exec'))
exec('mix_fun_set[\'{0}_{1}_fun\'] = {0}_{1}_fun'
.format(fun_1.__name__.split('_')[0], fun_2.__name__.split('_')[0]))
return mix_fun_set
class FactorTest:
def __init__(self, root_path, if_save, if_new_program, begin_date, cut_date, end_date, time_para_dict, sector_name,
hold_time, lag, return_file, if_hedge, if_only_long, if_weight=0.5, ic_weight=0.5,
para_adj_set_list=None):
self.root_path = root_path
self.if_save = if_save
self.if_new_program = if_new_program
self.begin_date = begin_date
self.cut_date = cut_date
self.end_date = end_date
self.time_para_dict = time_para_dict
self.sector_name = sector_name
self.hold_time = hold_time
self.lag = lag
self.return_file = return_file
self.if_hedge = if_hedge
self.if_only_long = if_only_long
self.if_weight = if_weight
self.ic_weight = ic_weight
if para_adj_set_list is None:
self.para_adj_set_list = [
{'pot_in_num': 50, 'leve_ratio_num': 2, 'sp_in': 1.5, 'ic_num': 0.0, 'fit_ratio': 2},
{'pot_in_num': 40, 'leve_ratio_num': 2, 'sp_in': 1.5, 'ic_num': 0.0, 'fit_ratio': 2},
{'pot_in_num': 50, 'leve_ratio_num': 2, 'sp_in': 1, 'ic_num': 0.0, 'fit_ratio': 1},
{'pot_in_num': 50, 'leve_ratio_num': 1, 'sp_in': 1, 'ic_num': 0.0, 'fit_ratio': 2},
{'pot_in_num': 50, 'leve_ratio_num': 1, 'sp_in': 1, 'ic_num': 0.0, 'fit_ratio': 1},
{'pot_in_num': 40, 'leve_ratio_num': 1, 'sp_in': 1, 'ic_num': 0.0, 'fit_ratio': 1}]
return_choose = self.load_return_data()
self.xinx = return_choose.index
sector_df = self.load_sector_data()
self.xnms = sector_df.columns
return_choose = return_choose.reindex(columns=self.xnms)
self.sector_df = sector_df.reindex(index=self.xinx)
# print('Loaded sector DataFrame!')
if if_hedge:
if ic_weight + if_weight != 1:
exit(-1)
else:
if_weight = 0
ic_weight = 0
index_df_1 = self.load_index_data('000300').fillna(0)
# index_weight_1 = self.load_index_weight_data('000300')
index_df_2 = self.load_index_data('000905').fillna(0)
# index_weight_2 = self.load_index_weight_data('000905')
#
# weight_df = if_weight * index_weight_1 + ic_weight * index_weight_2
hedge_df = if_weight * index_df_1 + ic_weight * index_df_2
self.return_choose = return_choose.sub(hedge_df, axis=0)
# print('Loaded return DataFrame!')
suspendday_df, limit_buy_sell_df = self.load_locked_data()
limit_buy_sell_df_c = limit_buy_sell_df.shift(-1)
limit_buy_sell_df_c.iloc[-1] = 1
suspendday_df_c = suspendday_df.shift(-1)
suspendday_df_c.iloc[-1] = 1
self.suspendday_df_c = suspendday_df_c
self.limit_buy_sell_df_c = limit_buy_sell_df_c
# print('Loaded suspendday_df and limit_buy_sell DataFrame!')
def reindex_fun(self, df):
return df.reindex(index=self.xinx, columns=self.xnms)
@staticmethod
def create_log_save_path(target_path):
top_path = os.path.split(target_path)[0]
if not os.path.exists(top_path):
os.mkdir(top_path)
if not os.path.exists(target_path):
os.mknod(target_path)
@staticmethod
def row_extre(raw_df, sector_df, percent):
raw_df = raw_df * sector_df
target_df = raw_df.rank(axis=1, pct=True)
target_df[target_df >= 1 - percent] = 1
target_df[target_df <= percent] = -1
target_df[(target_df > percent) & (target_df < 1 - percent)] = 0
return target_df
@staticmethod
def pos_daily_fun(df, n=5):
return df.rolling(window=n, min_periods=1).sum()
def check_factor(self, name_list, file_name):
load_path = os.path.join('/mnt/mfs/dat_whs/data/new_factor_data/' + self.sector_name)
exist_factor = set([x[:-4] for x in os.listdir(load_path)])
print()
use_factor = set(name_list)
a = use_factor - exist_factor
if len(a) != 0:
print('factor not enough!')
print(a)
print(len(a))
send_email.send_email(f'{file_name} factor not enough!', ['[email protected]'], [], 'Factor Test Warning!')
@staticmethod
def create_all_para(tech_name_list, funda_name_list):
target_list_1 = []
for tech_name in tech_name_list:
for value in combinations(funda_name_list, 2):
target_list_1 += [[tech_name] + list(value)]
target_list_2 = []
for funda_name in funda_name_list:
for value in combinations(tech_name_list, 2):
target_list_2 += [[funda_name] + list(value)]
target_list = target_list_1 + target_list_2
return target_list
# 获取剔除新股的矩阵
def get_new_stock_info(self, xnms, xinx):
new_stock_data = bt.AZ_Load_csv(os.path.join(self.root_path, 'EM_Tab01/CDSY_SECUCODE/LISTSTATE.csv'))
new_stock_data.fillna(method='ffill', inplace=True)
# 获取交易日信息
return_df = bt.AZ_Load_csv(os.path.join(self.root_path, 'EM_Funda/DERIVED_14/aadj_r.csv')).astype(float)
trade_time = return_df.index
new_stock_data = new_stock_data.reindex(index=trade_time).fillna(method='ffill')
target_df = new_stock_data.shift(40).notnull().astype(int)
target_df = target_df.reindex(columns=xnms, index=xinx)
return target_df
# 获取剔除st股票的矩阵
def get_st_stock_info(self, xnms, xinx):
data = bt.AZ_Load_csv(os.path.join(self.root_path, 'EM_Tab01/CDSY_CHANGEINFO/CHANGEA.csv'))
data = data.reindex(columns=xnms, index=xinx)
data.fillna(method='ffill', inplace=True)
data = data.astype(str)
target_df = data.applymap(lambda x: 0 if 'ST' in x or 'PT' in x else 1)
return target_df
def load_return_data(self):
return_choose = bt.AZ_Load_csv(os.path.join(self.root_path, 'EM_Funda/DERIVED_14/aadj_r.csv'))
return_choose = return_choose[(return_choose.index >= self.begin_date) & (return_choose.index < self.end_date)]
return return_choose
# 获取sector data
def load_sector_data(self):
market_top_n = bt.AZ_Load_csv(os.path.join(self.root_path, 'EM_Funda/DERIVED_10/' + self.sector_name + '.csv'))
market_top_n = market_top_n.reindex(index=self.xinx)
market_top_n.dropna(how='all', axis='columns', inplace=True)
xnms = market_top_n.columns
xinx = market_top_n.index
new_stock_df = self.get_new_stock_info(xnms, xinx)
st_stock_df = self.get_st_stock_info(xnms, xinx)
sector_df = market_top_n * new_stock_df * st_stock_df
sector_df.replace(0, np.nan, inplace=True)
return sector_df
def load_index_weight_data(self, index_name):
index_info = bt.AZ_Load_csv(self.root_path + f'/EM_Funda/IDEX_YS_WEIGHT_A/SECURITYNAME_{index_name}.csv')
index_info = self.reindex_fun(index_info)
index_mask = (index_info.notnull() * 1).replace(0, np.nan)
mkt_cap = bt.AZ_Load_csv(os.path.join(self.root_path, 'EM_Funda/LICO_YS_STOCKVALUE/AmarketCapExStri.csv'))
mkt_roll = mkt_cap.rolling(250, min_periods=0).mean()
mkt_roll = self.reindex_fun(mkt_roll)
mkt_roll_qrt = np.sqrt(mkt_roll)
mkt_roll_qrt_index = mkt_roll_qrt * index_mask
index_weight = mkt_roll_qrt_index.div(mkt_roll_qrt_index.sum(axis=1), axis=0)
return index_weight
# 涨跌停都不可交易
def load_locked_data(self):
raw_suspendday_df = bt.AZ_Load_csv(
os.path.join(self.root_path, 'EM_Funda/TRAD_TD_SUSPENDDAY/SUSPENDREASON.csv'))
suspendday_df = raw_suspendday_df.isnull().astype(int)
suspendday_df = suspendday_df.reindex(columns=self.xnms, index=self.xinx, fill_value=True)
suspendday_df.replace(0, np.nan, inplace=True)
return_df = bt.AZ_Load_csv(os.path.join(self.root_path, 'EM_Funda/DERIVED_14/aadj_r.csv')).astype(float)
limit_buy_sell_df = (return_df.abs() < 0.095).astype(int)
limit_buy_sell_df = limit_buy_sell_df.reindex(columns=self.xnms, index=self.xinx, fill_value=1)
limit_buy_sell_df.replace(0, np.nan, inplace=True)
return suspendday_df, limit_buy_sell_df
# 获取index data
def load_index_data(self, index_name):
data = bt.AZ_Load_csv(os.path.join(self.root_path, 'EM_Funda/INDEX_TD_DAILYSYS/CHG.csv'))
target_df = data[index_name].reindex(index=self.xinx)
return target_df * 0.01
# 读取部分factor
def load_part_factor(self, sector_name, xnms, xinx, file_list):
factor_set = OrderedDict()
for file_name in file_list:
load_path = os.path.join('/mnt/mfs/dat_whs/data/new_factor_data/' + sector_name)
target_df = pd.read_pickle(os.path.join(load_path, file_name + '.pkl'))
factor_set[file_name] = target_df.reindex(columns=xnms, index=xinx).fillna(0)
return factor_set
# 读取factor
def load_factor(self, file_name):
factor_set = OrderedDict()
load_path = os.path.join('/mnt/mfs/dat_whs/data/new_factor_data/' + self.sector_name)
target_df = pd.read_pickle(os.path.join(load_path, file_name + '.pkl'))
factor_set[file_name] = target_df.reindex(columns=self.xnms, index=self.xinx).fillna(0)
return factor_set
def deal_mix_factor(self, mix_factor):
if self.if_only_long:
mix_factor = mix_factor[mix_factor > 0]
# 下单日期pos
order_df = mix_factor.replace(np.nan, 0)
# 排除入场场涨跌停的影响
order_df = order_df * self.sector_df * self.limit_buy_sell_df_c * self.suspendday_df_c
order_df = order_df.div(order_df.abs().sum(axis=1).replace(0, np.nan), axis=0)
order_df[order_df > 0.05] = 0.05
order_df[order_df < -0.05] = -0.05
daily_pos = pos_daily_fun(order_df, n=self.hold_time)
daily_pos.fillna(0, inplace=True)
# 排除出场涨跌停的影响
daily_pos = daily_pos * self.limit_buy_sell_df_c * self.suspendday_df_c
daily_pos.fillna(method='ffill', inplace=True)
return daily_pos
def save_load_control(self, tech_name_list, funda_name_list, suffix_name, file_name):
# 参数存储与加载的路径控制
result_save_path = '/mnt/mfs/dat_whs/result'
if self.if_new_program:
now_time = datetime.now().strftime('%Y%m%d_%H%M')
if self.if_only_long:
file_name = '{}_{}_{}_hold_{}_{}_{}_long.txt' \
.format(self.sector_name, self.if_hedge, now_time, self.hold_time, self.return_file, suffix_name)
else:
file_name = '{}_{}_{}_hold_{}_{}_{}.txt' \
.format(self.sector_name, self.if_hedge, now_time, self.hold_time, self.return_file, suffix_name)
log_save_file = os.path.join(result_save_path, 'log', file_name)
result_save_file = os.path.join(result_save_path, 'result', file_name)
para_save_file = os.path.join(result_save_path, 'para', file_name)
para_dict = dict()
para_ready_df = pd.DataFrame(list(self.create_all_para(tech_name_list, funda_name_list)))
total_para_num = len(para_ready_df)
if self.if_save:
self.create_log_save_path(log_save_file)
self.create_log_save_path(result_save_file)
self.create_log_save_path(para_save_file)
para_dict['para_ready_df'] = para_ready_df
para_dict['tech_name_list'] = tech_name_list
para_dict['funda_name_list'] = funda_name_list
pd.to_pickle(para_dict, para_save_file)
else:
log_save_file = os.path.join(result_save_path, 'log', file_name)
result_save_file = os.path.join(result_save_path, 'result', file_name)
para_save_file = os.path.join(result_save_path, 'para', file_name)
para_tested_df = pd.read_table(log_save_file, sep='|', header=None, index_col=0)
para_all_df = pd.read_pickle(para_save_file)
total_para_num = len(para_all_df)
para_ready_df = para_all_df.loc[sorted(list(set(para_all_df.index) - set(para_tested_df.index)))]
print(file_name)
print(f'para_num:{len(para_ready_df)}')
return para_ready_df, log_save_file, result_save_file, total_para_num
@staticmethod
def create_all_para_(change_list, ratio_list, tech_list):
target_list = list(product(change_list, ratio_list, tech_list))
return target_list
def save_load_control_(self, change_list, ratio_list, tech_list, suffix_name, file_name):
# 参数存储与加载的路径控制
result_save_path = '/mnt/mfs/dat_whs/result'
if self.if_new_program:
now_time = datetime.now().strftime('%Y%m%d_%H%M')
if self.if_only_long:
file_name = '{}_{}_{}_hold_{}_{}_{}_long.txt' \
.format(self.sector_name, self.if_hedge, now_time, self.hold_time, self.return_file, suffix_name)
else:
file_name = '{}_{}_{}_hold_{}_{}_{}.txt' \
.format(self.sector_name, self.if_hedge, now_time, self.hold_time, self.return_file, suffix_name)
log_save_file = os.path.join(result_save_path, 'log', file_name)
result_save_file = os.path.join(result_save_path, 'result', file_name)
para_save_file = os.path.join(result_save_path, 'para', file_name)
para_dict = dict()
para_ready_df = pd.DataFrame(list(self.create_all_para_(change_list, ratio_list, tech_list)))
total_para_num = len(para_ready_df)
if self.if_save:
self.create_log_save_path(log_save_file)
self.create_log_save_path(result_save_file)
self.create_log_save_path(para_save_file)
para_dict['para_ready_df'] = para_ready_df
para_dict['change_list'] = change_list
para_dict['ratio_list'] = ratio_list
para_dict['tech_list'] = tech_list
pd.to_pickle(para_dict, para_save_file)
else:
log_save_file = os.path.join(result_save_path, 'log', file_name)
result_save_file = os.path.join(result_save_path, 'result', file_name)
para_save_file = os.path.join(result_save_path, 'para', file_name)
para_tested_df = pd.read_table(log_save_file, sep='|', header=None, index_col=0)
para_all_df = pd.read_pickle(para_save_file)
total_para_num = len(para_all_df)
para_ready_df = para_all_df.loc[sorted(list(set(para_all_df.index) - set(para_tested_df.index)))]
print(file_name)
print(f'para_num:{len(para_ready_df)}')
return para_ready_df, log_save_file, result_save_file, total_para_num
class FactorTestSector(FactorTest):
def __init__(self, *args):
super(FactorTestSector, self).__init__(*args)
def load_tech_factor(self, file_name):
load_path = os.path.join('/media/hdd1/DAT_PreCalc/PreCalc_whs/' + self.sector_name)
target_df = pd.read_pickle(os.path.join(load_path, file_name + '.pkl')) \
.reindex(index=self.xinx, columns=self.xnms)
if self.if_only_long:
target_df = target_df[target_df > 0]
return target_df
def load_daily_factor(self, file_name):
load_path = f'{self.root_path}/EM_Funda/daily/'
tmp_df = bt.AZ_Load_csv(os.path.join(load_path, file_name + '.csv')) \
.reindex(index=self.xinx, columns=self.xnms)
target_df = self.row_extre(tmp_df, self.sector_df, 0.3)
if self.if_only_long:
target_df = target_df[target_df > 0]
return target_df
def load_jerry_factor(self, file_name):
factor_path = '/mnt/mfs/temp/dat_jerry/signal'
raw_df = bt.AZ_Load_csv(f'{factor_path}/{file_name}')
a = list(set(raw_df.iloc[-1, :100].dropna().values))
tmp_df = raw_df.reindex(index=self.xinx, columns=self.xnms)
if len(a) > 5:
target_df = self.row_extre(tmp_df, self.sector_df, 0.3)
else:
target_df = tmp_df
pass
if self.if_only_long:
target_df = target_df[target_df > 0]
return target_df
def load_whs_factor(self, file_name):
load_path = f'{self.root_path}/EM_Funda/dat_whs/'
tmp_df = bt.AZ_Load_csv(os.path.join(load_path, file_name + '.csv')) \
.reindex(index=self.xinx, columns=self.xnms)
target_df = self.row_extre(tmp_df, self.sector_df, 0.3)
if self.if_only_long:
target_df = target_df[target_df > 0]
return target_df
def load_remy_factor(self, file_name):
load_path = f'{self.root_path}/EM_Funda/DERIVED_F1'
raw_df = bt.AZ_Load_csv(f'{load_path}/{file_name}')
a = list(set(raw_df.iloc[-1, :100].dropna().values))
tmp_df = raw_df.reindex(index=self.xinx, columns=self.xnms)
if len(a) > 5:
target_df = self.row_extre(tmp_df, self.sector_df, 0.3)
else:
target_df = tmp_df
pass
if self.if_only_long:
target_df = target_df[target_df > 0]
return target_df
def single_test(self, name_1):
factor_1 = getattr(self, my_factor_dict[name_1])(name_1)
daily_pos = self.deal_mix_factor(factor_1).shift(2)
in_condition, out_condition, ic, sharpe_q_in_df_u, sharpe_q_in_df_m, sharpe_q_in_df_d, pot_in, \
fit_ratio, leve_ratio, sp_in, sharpe_q_out, pnl_df = filter_all(self.cut_date, daily_pos, self.return_choose,
if_return_pnl=True,
if_only_long=self.if_only_long)
if bt.AZ_Sharpe_y(pnl_df) > 0:
return 1
else:
return -1
def single_test_c(self, name_list):
mix_factor = pd.DataFrame()
for i in range(len(name_list)):
tmp_name = name_list[i]
buy_sell_way = self.single_test(tmp_name)
tmp_factor = getattr(self, my_factor_dict[tmp_name])(tmp_name)
mix_factor = mix_factor.add(tmp_factor * buy_sell_way, fill_value=0)
# daily_pos = self.deal_mix_factor(mix_factor).shift(2)
# in_condition, out_condition, ic, sharpe_q_in_df_u, sharpe_q_in_df_m, sharpe_q_in_df_d, pot_in, \
# fit_ratio, leve_ratio, sp_in, sharpe_q_out, pnl_df = \
# filter_all(self.cut_date, daily_pos, self.return_choose, if_return_pnl=True, if_only_long=False)
# print(in_condition, out_condition, ic, sharpe_q_in_df_u, sharpe_q_in_df_m, sharpe_q_in_df_d,
# pot_in, fit_ratio, leve_ratio, sp_in, sharpe_q_out)
return mix_factor
def single_test_real(self, name_list):
mix_factor = pd.DataFrame()
for i in range(len(name_list)):
tmp_name = name_list[i]
# result_list = self.single_test(tmp_name)
# print(tmp_name, result_list)
# print(1)
buy_sell_way = self.single_test(tmp_name)
tmp_factor = getattr(self, my_factor_dict[tmp_name])(tmp_name)
part_daily_pos = self.deal_mix_factor(tmp_factor).shift(2)
mix_factor = mix_factor.add(part_daily_pos * buy_sell_way, fill_value=0)
daily_pos = mix_factor / len(name_list)
in_condition, out_condition, ic, sharpe_q_in_df_u, sharpe_q_in_df_m, sharpe_q_in_df_d, pot_in, \
fit_ratio, leve_ratio, sp_in, sharpe_q_out, pnl_df = \
filter_all(self.cut_date, daily_pos, self.return_choose, if_return_pnl=True, if_only_long=False)
print(in_condition, out_condition, ic, sharpe_q_in_df_u, sharpe_q_in_df_m, sharpe_q_in_df_d,
pot_in, fit_ratio, leve_ratio, sp_in, sharpe_q_out)
return mix_factor
def load_index_data(index_name, xinx):
data = bt.AZ_Load_csv(os.path.join('/mnt/mfs/DAT_EQT', 'EM_Tab09/INDEX_TD_DAILYSYS/CHG.csv'))
target_df = data[index_name].reindex(index=xinx)
return target_df * 0.01
def get_corr_matrix(cut_date=None):
pos_file_list = [x for x in os.listdir('/mnt/mfs/AAPOS') if x.startswith('WHS')]
return_df = bt.AZ_Load_csv('/mnt/mfs/DAT_EQT/EM_Funda/DERIVED_14/aadj_r.csv').astype(float)
index_df_1 = load_index_data('000300', return_df.index).fillna(0)
index_df_2 = load_index_data('000905', return_df.index).fillna(0)
sum_pnl_df = pd.DataFrame()
for pos_file_name in pos_file_list:
pos_df = bt.AZ_Load_csv('/mnt/mfs/AAPOS/{}'.format(pos_file_name))
cond_1 = 'IF01' in pos_df.columns
cond_2 = 'IC01' in pos_df.columns
if cond_1 and cond_2:
hedge_df = 0.5 * index_df_1 + 0.5 * index_df_2
return_df_c = return_df.sub(hedge_df, axis=0)
elif cond_1:
hedge_df = index_df_1
return_df_c = return_df.sub(hedge_df, axis=0)
elif cond_2:
hedge_df = index_df_2
return_df_c = return_df.sub(hedge_df, axis=0)
else:
print('alpha hedge error')
continue
pnl_df = (pos_df.shift(2) * return_df_c).sum(axis=1)
pnl_df.name = pos_file_name
sum_pnl_df = pd.concat([sum_pnl_df, pnl_df], axis=1)
# plot_send_result(pnl_df, bt.AZ_Sharpe_y(pnl_df), 'mix_factor')
if cut_date is not None:
sum_pnl_df = sum_pnl_df[sum_pnl_df.index > cut_date]
return sum_pnl_df
def get_all_pnl_corr(pnl_df, col_name):
all_pnl_df = pd.read_csv('/mnt/mfs/AATST/corr_tst_pnls', sep='|', index_col=0, parse_dates=True)
all_pnl_df_c = pd.concat([all_pnl_df, pnl_df], axis=1)
a = all_pnl_df_c.iloc[-600:].corr()[col_name]
return a[a > 0.71]
def corr_test_fun(pnl_df, alpha_name):
sum_pnl_df = get_corr_matrix(cut_date=None)
sum_pnl_df_c = pd.concat([sum_pnl_df, pnl_df], axis=1)
corr_self = sum_pnl_df_c.corr()[[alpha_name]]
other_corr = get_all_pnl_corr(pnl_df, alpha_name)
print(other_corr)
self_corr = corr_self[corr_self > 0.7].dropna(axis=0)
print(self_corr)
if len(self_corr) >= 2 or len(other_corr) >= 2:
print('FAIL!')
send_email.send_email('FAIL!\n' + self_corr.to_html(),
['[email protected]'],
[],
'[RESULT DEAL]' + alpha_name)
else:
print('SUCCESS!')
send_email.send_email('SUCCESS!\n' + self_corr.to_html(),
['[email protected]'],
[],
'[RESULT DEAL]' + alpha_name)
print('______________________________________')
return 0
def config_test():
# pass 132.43 5.4 5.66 2.9698 2.58
# factor_str = 'vr_original_45days.csv|R_NETPROFIT_s_QYOY|REMFF.24|wgt_return_p120d_0.2|RQYE_p60d_col_extre_0.2' \
# '|R_NETPROFIT_s_QYOY_and_QTTM_0.3|RQMCL_p345d_continue_ud|RZYE_row_extre_0.2|REMTK.11|M1_p1|M1'
# info_str = 'market_top_300plus_industry_10_15|20|False'
# pass 97.91 4.07 4.34 2.601 3.41
# factor_str = 'news_num_df_20|turn_p120d_0.2|RQMCL_p345d_continue_ud|RQYE_p20d_col_extre_0.2' \
# '|R_FairValChg_TotProfit_s_First|MA_LINE_10_5|vr_afternoon_10min_20days|REMTK.06' \
# '|R_NetCashflowPS_s_First|REMFF.06|M1_p1'
# info_str = 'market_top_300plus_industry_10_15|20|False'
# pass 99.89 3.8 3.26 2.4056 3.04 ?????
# factor_str = 'TotRev_and_mcap_QYOY_Y3YGR_0.3|RQMCL_p345d_continue_ud|RQYE_p10d_col_extre_0.2' \
# '|R_OPEX_sales_QYOY_and_QTTM_0.3|RZYE_p10d_col_extre_0.2' \
# '|TVOL_row_extre_0.2|R_NETPROFIT_s_QYOY_and_QTTM_0.3'
# info_str = 'market_top_300plus_industry_10_15|20|False'
# pass 105.39 4.18 2.92 2.5765 2.71 ziwhen10
# factor_str = 'M1|turn_p150d_0.18|ab_sale_mng_exp|REMFF.24|RZCHE_row_extre_0.2|R_ParentProfit_s_YOY_First' \
# '|RQMCL_p345d_continue_ud|evol_p10d|TVOL_row_extre_0.2|REMTK.06|RZYE_p10d_col_extre_0.2'
# info_str = 'market_top_300plus_industry_10_15|20|False'
# 130.81 5.84 5.78 3.2277 2.54
# factor_str = 'REMFF.08|vr_original_45days.csv|RQYE_row_extre_0.2|evol_p10d|M1|R_Cashflow_s_YOY_First|' \
# 'news_num_df_20|wgt_return_p60d_0.2|R_OPEX_sales_QYOY_and_QTTM_0.3|RQYE_p20d_col_extre_0.2' \
# '|vr_afternoon_10min_20days'
# factor_str = 'REMFF.08|RQYE_row_extre_0.2|evol_p10d|R_Cashflow_s_YOY_First|' \
# 'news_num_df_20|wgt_return_p60d_0.2|R_OPEX_sales_QYOY_and_QTTM_0.3|RQYE_p20d_col_extre_0.2'
# info_str = 'market_top_300plus_industry_10_15|20|False'
# factor_str = 'TotRev_and_mcap_QYOY_Y3YGR_0.3|bulletin_num_df_20|RQYE_p20d_col_extre_0.2|REMWB.03' \
# '|bias_turn_p120d|evol_p20d|wgt_return_p20d_0.2|ADX_40_20_10|RZYE_row_extre_0.2|M1_p2|REMWB.05'
# info_str = 'market_top_300plus_industry_20_25_30_35|20|False'
# pass 99.64 5.33 8.85 3.3766 2.45
# factor_str = 'R_EPS_s_YOY_First|continue_ud_p200d|RQYE_p10d_col_extre_0.2|REMFF.20|LIQ_mix.csv|REMWB.03|REMTK.13' \
# '|aadj_r_p345d_continue_ud|wgt_return_p20d_0.2|ADX_40_20_10|REMTK.11'
# info_str = 'market_top_300plus_industry_20_25_30_35|20|False'
# pass 142.46 5.21 3.62 2.7607 2.59
# factor_str = 'aadj_r_p60d_col_extre_0.2|PE_TTM_row_extre_0.2|continue_ud_p20d|TotRev_and_asset_Y3YGR_Y5YGR_0.3' \
# '|R_EBITDA2_QYOY_and_QTTM_0.3|R_OTHERLASSET_QYOY_and_QTTM_0.3|REMTK.16|aadj_r_p10d_col_extre_0.2' \
# '|RQMCL_p345d_continue_ud|R_WorkCapital_QYOY|wgt_return_p20d_0.2'
#
# info_str = 'market_top_300plus_industry_45_50|20|False'
# pass 174.61 5.44 5.15 2.6052 2.67
# factor_str = 'REMTK.21|continue_ud_p20d|REMFF.40|continue_ud_p100d' \
# '|TVOL_p345d_continue_ud|BBANDS_10_1|R_INVESTINCOME_s_QYOY|R_OTHERLASSET_QYOY_and_QTTM_0.3' \
# '|REMFF.20|tab2_9_row_extre_0.3'
# info_str = 'market_top_300plus_industry_45_50|20|False'
# pass 148.13 5.44 3.13 2.8275 1.49
# factor_str = 'REMFF.11|R_WorkCapital_QYOY_and_QTTM_0.3|continue_ud_p100d|aadj_r_p60d_col_extre_0.2' \
# '|R_LOANREC_s_QYOY_and_QTTM_0.3|TVOL_p345d_continue_ud|REMTK.32' \
# '|R_OTHERLASSET_QYOY_and_QTTM_0.3|wgt_return_p20d_0.2'
# info_str = 'market_top_300plus_industry_45_50|20|False'
# pass 117.41 4.48 2.87 2.6127 2.65
# factor_str = 'REMFF.20|R_INVESTINCOME_s_QYOY|REMTK.32|aadj_r_p10d_col_extre_0.2' \
# '|TotRev_and_mcap_intdebt_Y3YGR_Y5YGR_0.3|TVOL_p345d_continue_ud' \
# '|aadj_r_p120d_col_extre_0.2|R_NetAssets_s_YOY_First|continue_ud_p90d'
# info_str = 'market_top_300plus_industry_45_50|20|False'
# pass 152.11 5.24 2.64 2.6867 2.87
# factor_str = 'continue_ud_p100d|REMFF.26|turn_p20d_0.2|aadj_r_p120d_col_extre_0.2|REMTK.06' \
# '|R_LOANREC_s_QYOY_and_QTTM_0.3|TVOL_p345d_continue_ud|R_OTHERLASSET_QYOY_and_QTTM_0.3' \
# '|RQMCL_p345d_continue_ud|wgt_return_p20d_0.2'
# info_str = 'market_top_300plus_industry_45_50|20|False'
# pass 67.37 3.78 4.38 2.9121 2.72
# factor_str = 'PS_TTM_row_extre_0.2|R_WorkCapital_QYOY_and_QTTM_0.3|REMTK.32' \
# '|R_TangAssets_IntDebt_QYOY_and_QTTM_0.3|aadj_r_p120d_col_extre_0.2|R_INVESTINCOME_s_QYOY' \
# '|bar_num_7_df|wgt_return_p20d_0.2|OPCF_and_asset_Y3YGR_Y5YGR_0.3|R_GrossProfit_TTM_QYOY_and_QTTM_0.3'
# info_str = 'market_top_300plus_industry_45_50|5|False'
factor_name_list = factor_str.split('|')
alpha_name = 'WHSZIWHEN11'
sector_name, hold_time, if_only_long = info_str.split('|')
hold_time = int(hold_time)
if if_only_long == 'True':
if_only_long = True
else:
if_only_long = False
cut_date = '20180601'
begin_date = pd.to_datetime('20130101')
end_date = datetime.now()
root_path = '/media/hdd1/DAT_EQT'
# root_path = '/mnt/mfs/DAT_EQT'
if_save = False
if_new_program = True
lag = 2
return_file = ''
if_hedge = True
if sector_name.startswith('market_top_300plus'):
if_weight = 1
ic_weight = 0
elif sector_name.startswith('market_top_300to800plus'):
if_weight = 0
ic_weight = 1
else:
if_weight = 0.5
ic_weight = 0.5
time_para_dict = dict()
main = FactorTestSector(root_path, if_save, if_new_program, begin_date, cut_date, end_date, time_para_dict,
sector_name, hold_time, lag, return_file, if_hedge, if_only_long, if_weight, ic_weight)
# mix_factor = main.single_test_c(factor_name_list)
# sum_pos_df_new = main.deal_mix_factor(mix_factor)
sum_pos_df_new = main.single_test_real(factor_name_list)
if if_weight != 0:
sum_pos_df_new['IF01'] = -if_weight * sum_pos_df_new.sum(axis=1)
if ic_weight != 0:
sum_pos_df_new['IC01'] = -ic_weight * sum_pos_df_new.sum(axis=1)
pnl_df = (sum_pos_df_new.shift(2) * main.return_choose).sum(axis=1)
pnl_df.name = alpha_name
plot_send_result(pnl_df, bt.AZ_Sharpe_y(pnl_df), alpha_name)
corr_test_fun(pnl_df, alpha_name)
# sum_pos_df_new.round(10).fillna(0).to_csv(f'/mnt/mfs/AAPOS/{alpha_name}.pos', sep='|', index_label='Date')
return sum_pos_df_new
my_factor_dict = dict({
'RZCHE_p120d_col_extre_0.2': 'load_tech_factor',
'RZCHE_p60d_col_extre_0.2': 'load_tech_factor',
'RZCHE_p20d_col_extre_0.2': 'load_tech_factor',
'RZCHE_p10d_col_extre_0.2': 'load_tech_factor',
'RZCHE_p345d_continue_ud': 'load_tech_factor',
'RZCHE_row_extre_0.2': 'load_tech_factor',
'RQCHL_p120d_col_extre_0.2': 'load_tech_factor',
'RQCHL_p60d_col_extre_0.2': 'load_tech_factor',
'RQCHL_p20d_col_extre_0.2': 'load_tech_factor',
'RQCHL_p10d_col_extre_0.2': 'load_tech_factor',
'RQCHL_p345d_continue_ud': 'load_tech_factor',
'RQCHL_row_extre_0.2': 'load_tech_factor',
'RQYL_p120d_col_extre_0.2': 'load_tech_factor',
'RQYL_p60d_col_extre_0.2': 'load_tech_factor',
'RQYL_p20d_col_extre_0.2': 'load_tech_factor',
'RQYL_p10d_col_extre_0.2': 'load_tech_factor',
'RQYL_p345d_continue_ud': 'load_tech_factor',
'RQYL_row_extre_0.2': 'load_tech_factor',
'RQYE_p120d_col_extre_0.2': 'load_tech_factor',
'RQYE_p60d_col_extre_0.2': 'load_tech_factor',
'RQYE_p20d_col_extre_0.2': 'load_tech_factor',
'RQYE_p10d_col_extre_0.2': 'load_tech_factor',
'RQYE_p345d_continue_ud': 'load_tech_factor',
'RQYE_row_extre_0.2': 'load_tech_factor',
'RQMCL_p120d_col_extre_0.2': 'load_tech_factor',
'RQMCL_p60d_col_extre_0.2': 'load_tech_factor',
'RQMCL_p20d_col_extre_0.2': 'load_tech_factor',
'RQMCL_p10d_col_extre_0.2': 'load_tech_factor',
'RQMCL_p345d_continue_ud': 'load_tech_factor',
'RQMCL_row_extre_0.2': 'load_tech_factor',
'RZYE_p120d_col_extre_0.2': 'load_tech_factor',
'RZYE_p60d_col_extre_0.2': 'load_tech_factor',
'RZYE_p20d_col_extre_0.2': 'load_tech_factor',
'RZYE_p10d_col_extre_0.2': 'load_tech_factor',
'RZYE_p345d_continue_ud': 'load_tech_factor',
'RZYE_row_extre_0.2': 'load_tech_factor',
'RZMRE_p120d_col_extre_0.2': 'load_tech_factor',
'RZMRE_p60d_col_extre_0.2': 'load_tech_factor',
'RZMRE_p20d_col_extre_0.2': 'load_tech_factor',
'RZMRE_p10d_col_extre_0.2': 'load_tech_factor',
'RZMRE_p345d_continue_ud': 'load_tech_factor',
'RZMRE_row_extre_0.2': 'load_tech_factor',
'RZRQYE_p120d_col_extre_0.2': 'load_tech_factor',
'RZRQYE_p60d_col_extre_0.2': 'load_tech_factor',
'RZRQYE_p20d_col_extre_0.2': 'load_tech_factor',
'RZRQYE_p10d_col_extre_0.2': 'load_tech_factor',
'RZRQYE_p345d_continue_ud': 'load_tech_factor',
'RZRQYE_row_extre_0.2': 'load_tech_factor',
'WILLR_200_40': 'load_tech_factor',
'WILLR_200_30': 'load_tech_factor',
'WILLR_200_20': 'load_tech_factor',
'WILLR_140_40': 'load_tech_factor',
'WILLR_140_30': 'load_tech_factor',
'WILLR_140_20': 'load_tech_factor',
'WILLR_100_40': 'load_tech_factor',
'WILLR_100_30': 'load_tech_factor',
'WILLR_100_20': 'load_tech_factor',
'WILLR_40_40': 'load_tech_factor',
'WILLR_40_30': 'load_tech_factor',
'WILLR_40_20': 'load_tech_factor',
'WILLR_20_40': 'load_tech_factor',
'WILLR_20_30': 'load_tech_factor',
'WILLR_20_20': 'load_tech_factor',
'WILLR_10_40': 'load_tech_factor',
'WILLR_10_30': 'load_tech_factor',
'WILLR_10_20': 'load_tech_factor',
'BBANDS_10_2': 'load_tech_factor',
'BBANDS_10_1.5': 'load_tech_factor',
'BBANDS_10_1': 'load_tech_factor',
'MACD_20_60_18': 'load_tech_factor',
'BBANDS_200_2': 'load_tech_factor',
'BBANDS_200_1.5': 'load_tech_factor',
'BBANDS_200_1': 'load_tech_factor',
'BBANDS_140_2': 'load_tech_factor',
'BBANDS_140_1.5': 'load_tech_factor',
'BBANDS_140_1': 'load_tech_factor',
'BBANDS_100_2': 'load_tech_factor',
'BBANDS_100_1.5': 'load_tech_factor',
'BBANDS_100_1': 'load_tech_factor',
'BBANDS_40_2': 'load_tech_factor',
'BBANDS_40_1.5': 'load_tech_factor',
'BBANDS_40_1': 'load_tech_factor',
'BBANDS_20_2': 'load_tech_factor',
'BBANDS_20_1.5': 'load_tech_factor',
'BBANDS_20_1': 'load_tech_factor',
'MA_LINE_160_60': 'load_tech_factor',
'MA_LINE_120_60': 'load_tech_factor',
'MA_LINE_100_40': 'load_tech_factor',
'MA_LINE_60_20': 'load_tech_factor',
'MA_LINE_10_5': 'load_tech_factor',
'MACD_12_26_9': 'load_tech_factor',
'intra_up_vwap_col_score_row_extre_0.3': 'load_tech_factor',
'intra_up_vol_col_score_row_extre_0.3': 'load_tech_factor',
'intra_up_div_dn_col_score_row_extre_0.3': 'load_tech_factor',
'intra_up_div_daily_col_score_row_extre_0.3': 'load_tech_factor',
'intra_up_15_bar_vwap_col_score_row_extre_0.3': 'load_tech_factor',
'intra_up_15_bar_vol_col_score_row_extre_0.3': 'load_tech_factor',
'intra_up_15_bar_div_dn_15_bar_col_score_row_extre_0.3': 'load_tech_factor',
'intra_up_15_bar_div_daily_col_score_row_extre_0.3': 'load_tech_factor',
'intra_dn_vwap_col_score_row_extre_0.3': 'load_tech_factor',
'intra_dn_vol_col_score_row_extre_0.3': 'load_tech_factor',
'intra_dn_div_daily_col_score_row_extre_0.3': 'load_tech_factor',
'intra_dn_15_bar_vwap_col_score_row_extre_0.3': 'load_tech_factor',
'intra_dn_15_bar_vol_col_score_row_extre_0.3': 'load_tech_factor',
'intra_dn_15_bar_div_daily_col_score_row_extre_0.3': 'load_tech_factor',
'intra_up_vwap_row_extre_0.3': 'load_tech_factor',
'intra_up_vol_row_extre_0.3': 'load_tech_factor',
'intra_up_div_dn_row_extre_0.3': 'load_tech_factor',
'intra_up_div_daily_row_extre_0.3': 'load_tech_factor',
'intra_up_15_bar_vwap_row_extre_0.3': 'load_tech_factor',
'intra_up_15_bar_vol_row_extre_0.3': 'load_tech_factor',
'intra_up_15_bar_div_dn_15_bar_row_extre_0.3': 'load_tech_factor',
'intra_up_15_bar_div_daily_row_extre_0.3': 'load_tech_factor',
'intra_dn_vwap_row_extre_0.3': 'load_tech_factor',
'intra_dn_vol_row_extre_0.3': 'load_tech_factor',
'intra_dn_div_daily_row_extre_0.3': 'load_tech_factor',
'intra_dn_15_bar_vwap_row_extre_0.3': 'load_tech_factor',
'intra_dn_15_bar_vol_row_extre_0.3': 'load_tech_factor',
'intra_dn_15_bar_div_daily_row_extre_0.3': 'load_tech_factor',
'tab5_15_row_extre_0.3': 'load_tech_factor',
'tab5_14_row_extre_0.3': 'load_tech_factor',
'tab5_13_row_extre_0.3': 'load_tech_factor',
'tab4_5_row_extre_0.3': 'load_tech_factor',
'tab4_2_row_extre_0.3': 'load_tech_factor',
'tab4_1_row_extre_0.3': 'load_tech_factor',
'tab2_11_row_extre_0.3': 'load_tech_factor',
'tab2_9_row_extre_0.3': 'load_tech_factor',
'tab2_8_row_extre_0.3': 'load_tech_factor',
'tab2_7_row_extre_0.3': 'load_tech_factor',
'tab2_4_row_extre_0.3': 'load_tech_factor',
'tab2_1_row_extre_0.3': 'load_tech_factor',
'tab1_9_row_extre_0.3': 'load_tech_factor',
'tab1_8_row_extre_0.3': 'load_tech_factor',
'tab1_7_row_extre_0.3': 'load_tech_factor',
'tab1_5_row_extre_0.3': 'load_tech_factor',
'tab1_2_row_extre_0.3': 'load_tech_factor',
'tab1_1_row_extre_0.3': 'load_tech_factor',
'RSI_200_30': 'load_tech_factor',
'RSI_140_30': 'load_tech_factor',
'RSI_100_30': 'load_tech_factor',
'RSI_40_30': 'load_tech_factor',
'RSI_200_10': 'load_tech_factor',
'RSI_140_10': 'load_tech_factor',
'RSI_100_10': 'load_tech_factor',
'RSI_40_10': 'load_tech_factor',
'ATR_200_0.2': 'load_tech_factor',
'ATR_140_0.2': 'load_tech_factor',
'ATR_100_0.2': 'load_tech_factor',
'ATR_40_0.2': 'load_tech_factor',
'ADOSC_60_160_0': 'load_tech_factor',
'ADOSC_60_120_0': 'load_tech_factor',
'ADOSC_40_100_0': 'load_tech_factor',
'ADOSC_20_60_0': 'load_tech_factor',
'MFI_200_70_30': 'load_tech_factor',
'MFI_140_70_30': 'load_tech_factor',
'MFI_100_70_30': 'load_tech_factor',
'MFI_40_70_30': 'load_tech_factor',
'CMO_200_0': 'load_tech_factor',
'CMO_140_0': 'load_tech_factor',
'CMO_100_0': 'load_tech_factor',
'CMO_40_0': 'load_tech_factor',
'AROON_200_80': 'load_tech_factor',
'AROON_140_80': 'load_tech_factor',
'AROON_100_80': 'load_tech_factor',
'AROON_40_80': 'load_tech_factor',
'ADX_200_20_10': 'load_tech_factor',
'ADX_140_20_10': 'load_tech_factor',
'ADX_100_20_10': 'load_tech_factor',
'ADX_40_20_10': 'load_tech_factor',
'TotRev_and_mcap_intdebt_QYOY_Y3YGR_0.3': 'load_tech_factor',
'TotRev_and_asset_QYOY_Y3YGR_0.3': 'load_tech_factor',
'TotRev_and_mcap_QYOY_Y3YGR_0.3': 'load_tech_factor',
'TotRev_and_mcap_intdebt_Y3YGR_Y5YGR_0.3': 'load_tech_factor',
'TotRev_and_asset_Y3YGR_Y5YGR_0.3': 'load_tech_factor',
'TotRev_and_mcap_Y3YGR_Y5YGR_0.3': 'load_tech_factor',
'NetProfit_and_mcap_intdebt_QYOY_Y3YGR_0.3': 'load_tech_factor',
'NetProfit_and_asset_QYOY_Y3YGR_0.3': 'load_tech_factor',
'NetProfit_and_mcap_QYOY_Y3YGR_0.3': 'load_tech_factor',
'NetProfit_and_mcap_intdebt_Y3YGR_Y5YGR_0.3': 'load_tech_factor',
'NetProfit_and_asset_Y3YGR_Y5YGR_0.3': 'load_tech_factor',
'NetProfit_and_mcap_Y3YGR_Y5YGR_0.3': 'load_tech_factor',
'EBIT_and_mcap_intdebt_QYOY_Y3YGR_0.3': 'load_tech_factor',
'EBIT_and_asset_QYOY_Y3YGR_0.3': 'load_tech_factor',
'EBIT_and_mcap_QYOY_Y3YGR_0.3': 'load_tech_factor',
'EBIT_and_mcap_intdebt_Y3YGR_Y5YGR_0.3': 'load_tech_factor',
'EBIT_and_asset_Y3YGR_Y5YGR_0.3': 'load_tech_factor',
'EBIT_and_mcap_Y3YGR_Y5YGR_0.3': 'load_tech_factor',
'OPCF_and_mcap_intdebt_QYOY_Y3YGR_0.3': 'load_tech_factor',
'OPCF_and_asset_QYOY_Y3YGR_0.3': 'load_tech_factor',
'OPCF_and_mcap_QYOY_Y3YGR_0.3': 'load_tech_factor',
'OPCF_and_mcap_intdebt_Y3YGR_Y5YGR_0.3': 'load_tech_factor',
'OPCF_and_asset_Y3YGR_Y5YGR_0.3': 'load_tech_factor',
'OPCF_and_mcap_Y3YGR_Y5YGR_0.3': 'load_tech_factor',
'R_OTHERLASSET_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_WorkCapital_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_TangAssets_IntDebt_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_SUMLIAB_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_ROE1_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_OPEX_sales_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_OperProfit_YOY_First_and_QTTM_0.3': 'load_tech_factor',
'R_OperCost_sales_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_OPCF_TTM_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_NETPROFIT_s_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_NetInc_s_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_NetAssets_s_YOY_First_and_QTTM_0.3': 'load_tech_factor',
'R_LOANREC_s_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_LTDebt_WorkCap_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_INVESTINCOME_s_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_IntDebt_Mcap_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_GSCF_sales_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_GrossProfit_TTM_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_FINANCEEXP_s_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_FairVal_TotProfit_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_ESTATEINVEST_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_EPSDiluted_YOY_First_and_QTTM_0.3': 'load_tech_factor',
'R_EBITDA2_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_CostSales_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_CFO_s_YOY_First_and_QTTM_0.3': 'load_tech_factor',
'R_Cashflow_s_YOY_First_and_QTTM_0.3': 'load_tech_factor',
'R_ASSETDEVALUELOSS_s_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_ACCOUNTREC_QYOY_and_QTTM_0.3': 'load_tech_factor',
'R_ACCOUNTPAY_QYOY_and_QTTM_0.3': 'load_tech_factor',
'CCI_p150d_limit_12': 'load_tech_factor',
'CCI_p120d_limit_12': 'load_tech_factor',
'CCI_p60d_limit_12': 'load_tech_factor',
'CCI_p20d_limit_12': 'load_tech_factor',
'MACD_40_160': 'load_tech_factor',
'MACD_40_200': 'load_tech_factor',
'MACD_20_200': 'load_tech_factor',
'MACD_20_100': 'load_tech_factor',
'MACD_10_30': 'load_tech_factor',
'bias_turn_p120d': 'load_tech_factor',
'bias_turn_p60d': 'load_tech_factor',
'bias_turn_p20d': 'load_tech_factor',
'turn_p150d_0.18': 'load_tech_factor',
'turn_p30d_0.24': 'load_tech_factor',
'turn_p120d_0.2': 'load_tech_factor',
'turn_p60d_0.2': 'load_tech_factor',
'turn_p20d_0.2': 'load_tech_factor',
'log_price_0.2': 'load_tech_factor',
'wgt_return_p120d_0.2': 'load_tech_factor',
'wgt_return_p60d_0.2': 'load_tech_factor',
'wgt_return_p20d_0.2': 'load_tech_factor',
'return_p90d_0.2': 'load_tech_factor',
'return_p30d_0.2': 'load_tech_factor',
'return_p120d_0.2': 'load_tech_factor',
'return_p60d_0.2': 'load_tech_factor',
'return_p20d_0.2': 'load_tech_factor',
'PBLast_p120d_col_extre_0.2': 'load_tech_factor',
'PBLast_p60d_col_extre_0.2': 'load_tech_factor',
'PBLast_p20d_col_extre_0.2': 'load_tech_factor',
'PBLast_p10d_col_extre_0.2': 'load_tech_factor',
'PBLast_p345d_continue_ud': 'load_tech_factor',
'PBLast_row_extre_0.2': 'load_tech_factor',
'PS_TTM_p120d_col_extre_0.2': 'load_tech_factor',
'PS_TTM_p60d_col_extre_0.2': 'load_tech_factor',
'PS_TTM_p20d_col_extre_0.2': 'load_tech_factor',
'PS_TTM_p10d_col_extre_0.2': 'load_tech_factor',
'PS_TTM_p345d_continue_ud': 'load_tech_factor',
'PS_TTM_row_extre_0.2': 'load_tech_factor',
'PE_TTM_p120d_col_extre_0.2': 'load_tech_factor',
'PE_TTM_p60d_col_extre_0.2': 'load_tech_factor',
'PE_TTM_p20d_col_extre_0.2': 'load_tech_factor',
'PE_TTM_p10d_col_extre_0.2': 'load_tech_factor',
'PE_TTM_p345d_continue_ud': 'load_tech_factor',
'PE_TTM_row_extre_0.2': 'load_tech_factor',
'volume_moment_p20120d': 'load_tech_factor',
'volume_moment_p1040d': 'load_tech_factor',
'volume_moment_p530d': 'load_tech_factor',
'moment_p50300d': 'load_tech_factor',
'moment_p30200d': 'load_tech_factor',
'moment_p40200d': 'load_tech_factor',
'moment_p20200d': 'load_tech_factor',
'moment_p20100d': 'load_tech_factor',
'moment_p10100d': 'load_tech_factor',
'moment_p1060d': 'load_tech_factor',
'moment_p510d': 'load_tech_factor',
'continue_ud_p200d': 'load_tech_factor',
'evol_p200d': 'load_tech_factor',
'vol_count_down_p200d': 'load_tech_factor',
'vol_p200d': 'load_tech_factor',
'continue_ud_p100d': 'load_tech_factor',
'evol_p100d': 'load_tech_factor',
'vol_count_down_p100d': 'load_tech_factor',
'vol_p100d': 'load_tech_factor',
'continue_ud_p90d': 'load_tech_factor',
'evol_p90d': 'load_tech_factor',
'vol_count_down_p90d': 'load_tech_factor',
'vol_p90d': 'load_tech_factor',
'continue_ud_p50d': 'load_tech_factor',
'evol_p50d': 'load_tech_factor',
'vol_count_down_p50d': 'load_tech_factor',
'vol_p50d': 'load_tech_factor',
'continue_ud_p30d': 'load_tech_factor',
'evol_p30d': 'load_tech_factor',
'vol_count_down_p30d': 'load_tech_factor',
'vol_p30d': 'load_tech_factor',
'continue_ud_p120d': 'load_tech_factor',
'evol_p120d': 'load_tech_factor',
'vol_count_down_p120d': 'load_tech_factor',
'vol_p120d': 'load_tech_factor',
'continue_ud_p60d': 'load_tech_factor',
'evol_p60d': 'load_tech_factor',
'vol_count_down_p60d': 'load_tech_factor',
'vol_p60d': 'load_tech_factor',
'continue_ud_p20d': 'load_tech_factor',
'evol_p20d': 'load_tech_factor',
'vol_count_down_p20d': 'load_tech_factor',
'vol_p20d': 'load_tech_factor',
'continue_ud_p10d': 'load_tech_factor',
'evol_p10d': 'load_tech_factor',
'vol_count_down_p10d': 'load_tech_factor',
'vol_p10d': 'load_tech_factor',
'volume_count_down_p120d': 'load_tech_factor',
'volume_count_down_p60d': 'load_tech_factor',
'volume_count_down_p20d': 'load_tech_factor',
'volume_count_down_p10d': 'load_tech_factor',
'price_p120d_hl': 'load_tech_factor',
'price_p60d_hl': 'load_tech_factor',
'price_p20d_hl': 'load_tech_factor',
'price_p10d_hl': 'load_tech_factor',
'aadj_r_p120d_col_extre_0.2': 'load_tech_factor',
'aadj_r_p60d_col_extre_0.2': 'load_tech_factor',
'aadj_r_p20d_col_extre_0.2': 'load_tech_factor',
'aadj_r_p10d_col_extre_0.2': 'load_tech_factor',
'aadj_r_p345d_continue_ud': 'load_tech_factor',
'aadj_r_p345d_continue_ud_pct': 'load_tech_factor',
'aadj_r_row_extre_0.2': 'load_tech_factor',
'TVOL_p90d_col_extre_0.2': 'load_tech_factor',
'TVOL_p30d_col_extre_0.2': 'load_tech_factor',
'TVOL_p120d_col_extre_0.2': 'load_tech_factor',
'TVOL_p60d_col_extre_0.2': 'load_tech_factor',
'TVOL_p20d_col_extre_0.2': 'load_tech_factor',
'TVOL_p10d_col_extre_0.2': 'load_tech_factor',
'TVOL_p345d_continue_ud': 'load_tech_factor',
'TVOL_row_extre_0.2': 'load_tech_factor',
'R_ACCOUNTPAY_QYOY': 'load_daily_factor',
'R_ACCOUNTREC_QYOY': 'load_daily_factor',
'R_ASSETDEVALUELOSS_s_QYOY': 'load_daily_factor',
'R_AssetDepSales_s_First': 'load_daily_factor',
'R_BusinessCycle_First': 'load_daily_factor',
'R_CFOPS_s_First': 'load_daily_factor',
'R_CFO_TotRev_s_First': 'load_daily_factor',
'R_CFO_s_YOY_First': 'load_daily_factor',
'R_Cashflow_s_YOY_First': 'load_daily_factor',
'R_CostSales_QYOY': 'load_daily_factor',
'R_CostSales_s_First': 'load_daily_factor',
'R_CurrentAssetsTurnover_QTTM': 'load_daily_factor',
'R_DaysReceivable_First': 'load_daily_factor',
'R_DebtAssets_QTTM': 'load_daily_factor',
'R_DebtEqt_First': 'load_daily_factor',
'R_EBITDA2_QYOY': 'load_daily_factor',
'R_EBITDA_IntDebt_QTTM': 'load_daily_factor',
'R_EBITDA_sales_TTM_First': 'load_daily_factor',
'R_EBIT_sales_QTTM': 'load_daily_factor',
'R_EPS_s_First': 'load_daily_factor',
'R_EPS_s_YOY_First': 'load_daily_factor',
'R_ESTATEINVEST_QYOY': 'load_daily_factor',
'R_FCFTot_Y3YGR': 'load_daily_factor',
'R_FINANCEEXP_s_QYOY': 'load_daily_factor',
'R_FairValChgPnL_s_First': 'load_daily_factor',
'R_FairValChg_TotProfit_s_First': 'load_daily_factor',
'R_FairVal_TotProfit_QYOY': 'load_daily_factor',
'R_FairVal_TotProfit_TTM_First': 'load_daily_factor',
'R_FinExp_sales_s_First': 'load_daily_factor',
'R_GSCF_sales_s_First': 'load_daily_factor',
'R_GrossProfit_TTM_QYOY': 'load_daily_factor',
'R_INVESTINCOME_s_QYOY': 'load_daily_factor',
'R_LTDebt_WorkCap_QTTM': 'load_daily_factor',
'R_MgtExp_sales_s_First': 'load_daily_factor',
'R_NETPROFIT_s_QYOY': 'load_daily_factor',
'R_NOTICEDATE_First': 'load_daily_factor',
'R_NetAssets_s_POP_First': 'load_daily_factor',
'R_NetAssets_s_YOY_First': 'load_daily_factor',
'R_NetCashflowPS_s_First': 'load_daily_factor',
'R_NetIncRecur_QYOY': 'load_daily_factor',
'R_NetIncRecur_s_First': 'load_daily_factor',
'R_NetInc_TotProfit_s_First': 'load_daily_factor',
'R_NetInc_s_First': 'load_daily_factor',
'R_NetInc_s_QYOY': 'load_daily_factor',
'R_NetMargin_s_YOY_First': 'load_daily_factor',
'R_NetProfit_sales_s_First': 'load_daily_factor',
'R_NetROA_TTM_First': 'load_daily_factor',
'R_NetROA_s_First': 'load_daily_factor',
'R_NonOperProft_TotProfit_s_First': 'load_daily_factor',
'R_OPCF_NetInc_s_First': 'load_daily_factor',
'R_OPCF_TTM_QYOY': 'load_daily_factor',
'R_OPCF_TotDebt_QTTM': 'load_daily_factor',
'R_OPCF_sales_s_First': 'load_daily_factor',
'R_OPEX_sales_TTM_First': 'load_daily_factor',
'R_OPEX_sales_s_First': 'load_daily_factor',
'R_OTHERLASSET_QYOY': 'load_daily_factor',
'R_OperCost_sales_s_First': 'load_daily_factor',
'R_OperProfit_YOY_First': 'load_daily_factor',
'R_OperProfit_s_POP_First': 'load_daily_factor',
'R_OperProfit_s_YOY_First': 'load_daily_factor',
'R_OperProfit_sales_s_First': 'load_daily_factor',
'R_ParentProfit_s_POP_First': 'load_daily_factor',
'R_ParentProfit_s_YOY_First': 'load_daily_factor',
'R_ROENetIncRecur_s_First': 'load_daily_factor',
'R_ROE_s_First': 'load_daily_factor',
'R_RecurNetProft_NetProfit_s_First': 'load_daily_factor',
'R_RevenuePS_s_First': 'load_daily_factor',
'R_RevenueTotPS_s_First': 'load_daily_factor',
'R_Revenue_s_POP_First': 'load_daily_factor',
'R_Revenue_s_YOY_First': 'load_daily_factor',
'R_SUMLIAB_QYOY': 'load_daily_factor',
'R_SUMLIAB_Y3YGR': 'load_daily_factor',
'R_SalesCost_s_First': 'load_daily_factor',
'R_SalesGrossMGN_QTTM': 'load_daily_factor',
'R_SalesGrossMGN_s_First': 'load_daily_factor',
'R_SalesNetMGN_s_First': 'load_daily_factor',
'R_TangAssets_TotLiab_QTTM': 'load_daily_factor',
'R_Tax_TotProfit_QTTM': 'load_daily_factor',
'R_Tax_TotProfit_s_First': 'load_daily_factor',
'R_TotAssets_s_YOY_First': 'load_daily_factor',
'R_TotLiab_s_YOY_First': 'load_daily_factor',
'R_TotRev_TTM_Y3YGR': 'load_daily_factor',
'R_TotRev_s_POP_First': 'load_daily_factor',
'R_TotRev_s_YOY_First': 'load_daily_factor',
'R_WorkCapital_QYOY': 'load_daily_factor',
'bar_num_7_df': 'load_whs_factor',
'bar_num_12_df': 'load_whs_factor',
'repurchase': 'load_whs_factor',
'dividend': 'load_whs_factor',
'repurchase_news_title': 'load_whs_factor',
'repurchase_news_summary': 'load_whs_factor',
'dividend_news_title': 'load_whs_factor',
'dividend_news_summary': 'load_whs_factor',
'staff_changes_news_title': 'load_whs_factor',
'staff_changes_news_summary': 'load_whs_factor',
'funds_news_title': 'load_whs_factor',
'funds_news_summary': 'load_whs_factor',
'meeting_decide_news_title': 'load_whs_factor',
'meeting_decide_news_summary': 'load_whs_factor',
'restricted_shares_news_title': 'load_whs_factor',
'restricted_shares_news_summary': 'load_whs_factor',
'son_company_news_title': 'load_whs_factor',
'son_company_news_summary': 'load_whs_factor',
'suspend_news_title': 'load_whs_factor',
'suspend_news_summary': 'load_whs_factor',
'shares_news_title': 'load_whs_factor',
'': 'load_whs_factor',
'shares_news_summary': 'load_whs_factor',
'ab_inventory': 'load_whs_factor',
'ab_rec': 'load_whs_factor',
'ab_others_rec': 'load_whs_factor',
'ab_ab_pre_rec': 'load_whs_factor',
'ab_sale_mng_exp': 'load_whs_factor',
'ab_grossprofit': 'load_whs_factor',
'lsgg_num_df_5': 'load_whs_factor',
'lsgg_num_df_20': 'load_whs_factor',
'lsgg_num_df_60': 'load_whs_factor',
'bulletin_num_df': 'load_whs_factor',
'bulletin_num_df_5': 'load_whs_factor',
'bulletin_num_df_20': 'load_whs_factor',
'bulletin_num_df_60': 'load_whs_factor',
'news_num_df_5': 'load_whs_factor',
'news_num_df_20': 'load_whs_factor',
'news_num_df_60': 'load_whs_factor',
'staff_changes': 'load_whs_factor',
'funds': 'load_whs_factor',
'meeting_decide': 'load_whs_factor',
'restricted_shares': 'load_whs_factor',
'son_company': 'load_whs_factor',
'suspend': 'load_whs_factor',
'shares': 'load_whs_factor',
'buy_key_title__word': 'load_whs_factor',
'sell_key_title_word': 'load_whs_factor',
'buy_summary_key_word': 'load_whs_factor',
'sell_summary_key_word': 'load_whs_factor',
})
my_factor_dict_2 = dict({
'REMTK.40': 'load_remy_factor',
'REMTK.39': 'load_remy_factor',
'REMTK.38': 'load_remy_factor',
'REMTK.37': 'load_remy_factor',
'REMTK.36': 'load_remy_factor',
'REMTK.35': 'load_remy_factor',
'REMTK.34': 'load_remy_factor',
'REMTK.33': 'load_remy_factor',
'REMTK.32': 'load_remy_factor',
'REMTK.31': 'load_remy_factor',
'REMFF.40': 'load_remy_factor',
'REMFF.39': 'load_remy_factor',
'REMFF.38': 'load_remy_factor',
'REMFF.37': 'load_remy_factor',
'REMFF.36': 'load_remy_factor',
'REMFF.35': 'load_remy_factor',
'REMFF.34': 'load_remy_factor',
'REMFF.33': 'load_remy_factor',
'REMFF.32': 'load_remy_factor',
'REMFF.31': 'load_remy_factor',
'REMWB.12': 'load_remy_factor',
'REMWB.11': 'load_remy_factor',
'REMWB.10': 'load_remy_factor',
'REMWB.09': 'load_remy_factor',
'REMWB.08': 'load_remy_factor',
'REMWB.07': 'load_remy_factor',
'REMWB.06': 'load_remy_factor',
'REMWB.05': 'load_remy_factor',
'REMWB.04': 'load_remy_factor',
'REMWB.03': 'load_remy_factor',
'REMWB.02': 'load_remy_factor',
'REMWB.01': 'load_remy_factor',
'REMTK.30': 'load_remy_factor',
'REMTK.29': 'load_remy_factor',
'REMTK.28': 'load_remy_factor',
'REMTK.27': 'load_remy_factor',
'REMTK.26': 'load_remy_factor',
'REMTK.25': 'load_remy_factor',
'REMTK.24': 'load_remy_factor',
'REMTK.23': 'load_remy_factor',
'REMTK.22': 'load_remy_factor',
'REMTK.21': 'load_remy_factor',
'REMTK.20': 'load_remy_factor',
'REMTK.19': 'load_remy_factor',
'REMTK.18': 'load_remy_factor',
'REMTK.17': 'load_remy_factor',
'REMTK.16': 'load_remy_factor',
'REMTK.15': 'load_remy_factor',
'REMTK.14': 'load_remy_factor',
'REMTK.13': 'load_remy_factor',
'REMTK.12': 'load_remy_factor',
'REMTK.11': 'load_remy_factor',
'REMTK.10': 'load_remy_factor',
'REMTK.09': 'load_remy_factor',
'REMTK.08': 'load_remy_factor',
'REMTK.07': 'load_remy_factor',
'REMTK.06': 'load_remy_factor',
'REMTK.05': 'load_remy_factor',
'REMTK.04': 'load_remy_factor',
'REMTK.03': 'load_remy_factor',
'REMTK.02': 'load_remy_factor',
'REMTK.01': 'load_remy_factor',
'REMFF.30': 'load_remy_factor',
'REMFF.29': 'load_remy_factor',
'REMFF.28': 'load_remy_factor',
'REMFF.27': 'load_remy_factor',
'REMFF.26': 'load_remy_factor',
'REMFF.25': 'load_remy_factor',
'REMFF.24': 'load_remy_factor',
'REMFF.23': 'load_remy_factor',
'REMFF.22': 'load_remy_factor',
'REMFF.21': 'load_remy_factor',
'REMFF.20': 'load_remy_factor',
'REMFF.19': 'load_remy_factor',
'REMFF.18': 'load_remy_factor',
'REMFF.17': 'load_remy_factor',
'REMFF.16': 'load_remy_factor',
'REMFF.15': 'load_remy_factor',
'REMFF.14': 'load_remy_factor',
'REMFF.13': 'load_remy_factor',
'REMFF.12': 'load_remy_factor',
'REMFF.11': 'load_remy_factor',
'REMFF.10': 'load_remy_factor',
'REMFF.09': 'load_remy_factor',
'REMFF.08': 'load_remy_factor',
'REMFF.07': 'load_remy_factor',
'REMFF.06': 'load_remy_factor',
'REMFF.05': 'load_remy_factor',
'REMFF.04': 'load_remy_factor',
'REMFF.03': 'load_remy_factor',
'REMFF.02': 'load_remy_factor',
'REMFF.01': 'load_remy_factor'
})
jerry_factor_dict = dict({
'LIQ_all_original.csv': 'load_jerry_factor',
'LIQ_all_pure.csv': 'load_jerry_factor',
'LIQ_mix.csv': 'load_jerry_factor',
'LIQ_p1_original.csv': 'load_jerry_factor',
'LIQ_p1_pure.csv': 'load_jerry_factor',
'LIQ_p2_original.csv': 'load_jerry_factor',
'LIQ_p2_pure.csv': 'load_jerry_factor',
'LIQ_p3_original.csv': 'load_jerry_factor',
'LIQ_p3_pure.csv': 'load_jerry_factor',
'LIQ_p4_original.csv': 'load_jerry_factor',
'LIQ_p4_pure.csv': 'load_jerry_factor',
'M0': 'load_jerry_factor',
'M1': 'load_jerry_factor',
'M1_p1': 'load_jerry_factor',
'M1_p2': 'load_jerry_factor',
'M1_p3': 'load_jerry_factor',
'M1_p4': 'load_jerry_factor',
'vr_afternoon_10min_20days': 'load_jerry_factor',
'vr_afternoon_last10min_20days.csv': 'load_jerry_factor',
'vr_original_20days.csv': 'load_jerry_factor',
'vr_original_45days.csv': 'load_jerry_factor',
'vr_original_75days.csv': 'load_jerry_factor',
})
my_factor_dict.update(my_factor_dict_2)
my_factor_dict.update(jerry_factor_dict)
if __name__ == '__main__':
t1 = time.time()
sum_pos_df = config_test()
t2 = time.time()
print(round(t2 - t1, 4))
| [
"[email protected]"
]
| |
d6f5168e7ed6ddd0d588ee89ae179faafdae37c6 | d78dfc5089717fc242bbd7097f507d811abb4260 | /USA/script.icechannel.Thevideome.settings/default.py | b7d904cce475928faec807af89e23a0002229f7a | []
| no_license | tustxk/AddOnRepo | 995b980a9ec737e2c25bed423fc83f710c697e40 | 6b86a06cb37e6e10b4119584dd7311ebc2318e54 | refs/heads/master | 2022-10-08T21:34:34.632346 | 2016-10-28T09:48:01 | 2016-10-28T09:48:01 | 70,684,775 | 1 | 1 | null | 2022-10-01T16:27:13 | 2016-10-12T09:31:16 | Python | UTF-8 | Python | false | false | 169 | py | addon_id="script.icechannel.Thevideome.settings"
addon_name="iStream - Thevideome - Settings"
import xbmcaddon
addon = xbmcaddon.Addon(id=addon_id)
addon.openSettings()
| [
"[email protected]"
]
| |
57adbfd2865b7cf8540897ff6ca3685bbaf4dfb0 | 164457b943d0b426e9a5e2eb57779e4e37f2d1bb | /the_tale/accounts/workers/accounts_manager.py | 84d4482f67e8a0b8ffab01b81c7cb415dffd6c34 | [
"BSD-2-Clause-Views"
]
| permissive | lshestov/the-tale | 64334fd99a442ad736d9e8a38e8f0fb52d0ebab6 | 6229edfec6420307975269be9926c68ecdefb930 | refs/heads/master | 2021-01-18T08:38:44.147294 | 2015-10-27T18:43:10 | 2015-10-27T18:43:10 | 50,228,827 | 0 | 0 | null | 2016-01-23T07:38:54 | 2016-01-23T07:38:54 | null | UTF-8 | Python | false | false | 3,166 | py | # coding: utf-8
import time
import datetime
from dext.settings import settings
from the_tale.common.utils.workers import BaseWorker
from the_tale.common import postponed_tasks
from the_tale.accounts.prototypes import AccountPrototype, RandomPremiumRequestPrototype
from the_tale.accounts.conf import accounts_settings
class Worker(BaseWorker):
GET_CMD_TIMEOUT = 60
def clean_queues(self):
super(Worker, self).clean_queues()
self.stop_queue.queue.purge()
def initialize(self):
self.initialized = True
postponed_tasks.PostponedTaskPrototype.reset_all()
self.logger.info('ACCOUNT_MANAGER INITIALIZED')
def process_no_cmd(self):
# is send premium expired notifications needed
if (time.time() - float(settings.get(accounts_settings.SETTINGS_PREV_PREIMIUM_EXPIRED_NOTIFICATION_RUN_TIME_KEY, 0)) > 23.5*60*60 and
accounts_settings.PREMIUM_EXPIRED_NOTIFICATION_RUN_TIME <= datetime.datetime.now().hour <= accounts_settings.PREMIUM_EXPIRED_NOTIFICATION_RUN_TIME+1):
settings[accounts_settings.SETTINGS_PREV_PREIMIUM_EXPIRED_NOTIFICATION_RUN_TIME_KEY] = str(time.time())
self.run_send_premium_expired_notifications()
return
self.run_random_premium_requests_processing()
def run_send_premium_expired_notifications(self):
AccountPrototype.send_premium_expired_notifications()
def run_random_premium_requests_processing(self):
while True:
request = RandomPremiumRequestPrototype.get_unprocessed()
if request is None:
return
self.logger.info('process random premium request %d' % request.id)
if not request.process():
self.logger.info('request %d not processed' % request.id)
return
else:
self.logger.info('request %d processed' % request.id)
def cmd_task(self, task_id):
return self.send_cmd('task', {'task_id': task_id})
def process_task(self, task_id):
task = postponed_tasks.PostponedTaskPrototype.get_by_id(task_id)
task.process(self.logger)
task.do_postsave_actions()
def cmd_run_account_method(self, account_id, method_name, data):
return self.send_cmd('run_account_method', {'account_id': account_id,
'method_name': method_name,
'data': data})
def process_run_account_method(self, account_id, method_name, data):
if account_id is not None:
account = AccountPrototype.get_by_id(account_id)
getattr(account, method_name)(**data)
account.save()
else:
# here we can process classmethods, if they appear in future
pass
def cmd_stop(self):
return self.send_cmd('stop')
def process_stop(self):
self.initialized = False
self.stop_required = True
self.stop_queue.put({'code': 'stopped', 'worker': 'accounts_manager'}, serializer='json', compression=None)
self.logger.info('ACCOUNTS MANAGER STOPPED')
| [
"[email protected]"
]
| |
7cf3515e7f6034a2c7c8f4d75546e29fa79cc092 | 1e58c8aaff5bb1273caaa73c49c07fd61ebd4439 | /wavencoder/__init__.py | ff0dd47ac2d71605c97213e27e6d38be784f8314 | [
"MIT"
]
| permissive | samsudinng/wavencoder | 9870d6dd86cb126b170c9a6af93acee4acbbd633 | a64e16444ed25b5491fd2ba0c9f1409671e12e5e | refs/heads/master | 2023-03-01T22:42:42.477643 | 2021-02-08T11:23:00 | 2021-02-08T11:23:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,024 | py | __version__ = '0.0.6'
from wavencoder.models.wav2vec import Wav2Vec
from wavencoder.models.wav2vec2 import Wav2Vec2Model
from wavencoder.models.sincnet import SincNet, SincConvLayer
from wavencoder.models.lstm_classifier import LSTM_Classifier
from wavencoder.models.lstm_classifier import LSTM_Attn_Classifier
from wavencoder.models.baseline import CNN1d
from wavencoder.models.attention import DotAttention, SoftAttention
from wavencoder.models.rawnet import RawNet2Model
from wavencoder.trainer.classification_trainer import train
from wavencoder.trainer.classification_trainer import test_predict_classifier
from wavencoder.trainer.classification_trainer import test_evaluate_classifier
from wavencoder.transforms.noise import AdditiveNoise
from wavencoder.transforms.speed import SpeedChange
from wavencoder.transforms.clip import Clipping
from wavencoder.transforms.pad_crop import Pad, Crop, PadCrop
from wavencoder.transforms.reverberation import Reverberation
from wavencoder.transforms.compose import Compose
| [
"[email protected]"
]
| |
70456061b62a6c44867abca2486de5c1e3cbbd30 | 2316ce8a21d44a5d09284968ef42530633dc10d2 | /sample_code/ep264/rev04/t.py | bb11896bd80b2ae0441665c1f237b272afbbc397 | []
| no_license | AlexanderWinkelmeier/explains | 160de2c41fc5fc0156b482b41f89644dc585c4f3 | d47ec53e384e4303a2d8e71fab9073a1a8d2d6bc | refs/heads/master | 2023-07-30T04:55:31.234482 | 2021-09-15T02:59:42 | 2021-09-15T02:59:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | import os.path
import tempfile
SOME_FILE = 'foo.txt'
fd, temp_path = tempfile.mkstemp(dir=os.path.dirname(SOME_FILE))
try:
with open(fd, 'w') as f:
f.write('these are the new contents\n')
os.replace(temp_path, SOME_FILE)
except BaseException:
os.remove(temp_path)
raise
| [
"[email protected]"
]
| |
ac47410c081854dcc9bc0251f7925ae5e152c61f | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/fv/poddhcpserverinfo.py | 578744b8cd584e1c2bc24ce6e7cb39c73bd5bc04 | []
| no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 7,213 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class PodDhcpServerInfo(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.fv.PodDhcpServerInfo")
meta.moClassName = "fvPodDhcpServerInfo"
meta.rnFormat = "podDhcpServerInfo-%(nodeId)s"
meta.category = MoCategory.REGULAR
meta.label = "Dhcp Server info of the current POD"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Inst")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Inst", "fault-"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.fv.PodConnPDef")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.rnPrefixes = [
('podDhcpServerInfo-', True),
]
prop = PropMeta("str", "PodDhcpServerDn", "PodDhcpServerDn", 47391, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("PodDhcpServerDn", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dhcpIssues", "dhcpIssues", 47392, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("incorrect-pod-dhcp-server-configuration", "nodeid-of-fabricpoddhcpserver-configured-is-not-a-vtor", 1)
prop._addConstant("none", "none", 0)
meta.props.add("dhcpIssues", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "nodeId", "nodeId", 44472, PropCategory.REGULAR)
prop.label = "node id of Dhcp server"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 16000)]
prop.defaultValue = 1
prop.defaultValueStr = "1"
meta.props.add("nodeId", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "serverType", "serverType", 44473, PropCategory.REGULAR)
prop.label = "Dhcp server Type Primary/Secondary"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("primary", "primary", 1)
prop._addConstant("secondary", "secondary", 2)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("serverType", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "nodeId"))
def __init__(self, parentMoOrDn, nodeId, markDirty=True, **creationProps):
namingVals = [nodeId]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
]
| |
675e01ef775c7565dc433efce7f5f97e1b532ad7 | d5966d109ace494481513304439a0bd738565dc9 | /tornado/test/web_test.py | 137516fb99023461f1167ad5f9d98a13df903e2f | [
"Apache-2.0"
]
| permissive | nottombrown/tornado | 33dc9ed845ae9288e2226d06f8a872f4880eb596 | d2b05aea0b68338ab21279ced867cb637df0ffae | refs/heads/master | 2021-01-17T22:04:25.014115 | 2011-08-04T18:08:44 | 2011-08-04T18:08:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,433 | py | from tornado.escape import json_decode, utf8, to_unicode, recursive_unicode, native_str
from tornado.iostream import IOStream
from tornado.template import DictLoader
from tornado.testing import LogTrapTestCase, AsyncHTTPTestCase
from tornado.util import b, bytes_type
from tornado.web import RequestHandler, _O, authenticated, Application, asynchronous, url, HTTPError
import binascii
import logging
import re
import socket
import sys
class CookieTestRequestHandler(RequestHandler):
# stub out enough methods to make the secure_cookie functions work
def __init__(self):
# don't call super.__init__
self._cookies = {}
self.application = _O(settings=dict(cookie_secret='0123456789'))
def get_cookie(self, name):
return self._cookies.get(name)
def set_cookie(self, name, value, expires_days=None):
self._cookies[name] = value
class SecureCookieTest(LogTrapTestCase):
def test_round_trip(self):
handler = CookieTestRequestHandler()
handler.set_secure_cookie('foo', b('bar'))
self.assertEqual(handler.get_secure_cookie('foo'), b('bar'))
def test_cookie_tampering_future_timestamp(self):
handler = CookieTestRequestHandler()
# this string base64-encodes to '12345678'
handler.set_secure_cookie('foo', binascii.a2b_hex(b('d76df8e7aefc')))
cookie = handler._cookies['foo']
match = re.match(b(r'12345678\|([0-9]+)\|([0-9a-f]+)'), cookie)
assert match
timestamp = match.group(1)
sig = match.group(2)
self.assertEqual(handler._cookie_signature('foo', '12345678',
timestamp), sig)
# shifting digits from payload to timestamp doesn't alter signature
# (this is not desirable behavior, just confirming that that's how it
# works)
self.assertEqual(
handler._cookie_signature('foo', '1234', b('5678') + timestamp),
sig)
# tamper with the cookie
handler._cookies['foo'] = utf8('1234|5678%s|%s' % (timestamp, sig))
# it gets rejected
assert handler.get_secure_cookie('foo') is None
class CookieTest(AsyncHTTPTestCase, LogTrapTestCase):
def get_app(self):
class SetCookieHandler(RequestHandler):
def get(self):
# Try setting cookies with different argument types
# to ensure that everything gets encoded correctly
self.set_cookie("str", "asdf")
self.set_cookie("unicode", u"qwer")
self.set_cookie("bytes", b("zxcv"))
class GetCookieHandler(RequestHandler):
def get(self):
self.write(self.get_cookie("foo"))
class SetCookieDomainHandler(RequestHandler):
def get(self):
# unicode domain and path arguments shouldn't break things
# either (see bug #285)
self.set_cookie("unicode_args", "blah", domain=u"foo.com",
path=u"/foo")
class SetCookieSpecialCharHandler(RequestHandler):
def get(self):
self.set_cookie("equals", "a=b")
self.set_cookie("semicolon", "a;b")
self.set_cookie("quote", 'a"b')
return Application([
("/set", SetCookieHandler),
("/get", GetCookieHandler),
("/set_domain", SetCookieDomainHandler),
("/special_char", SetCookieSpecialCharHandler),
])
def test_set_cookie(self):
response = self.fetch("/set")
self.assertEqual(response.headers.get_list("Set-Cookie"),
["str=asdf; Path=/",
"unicode=qwer; Path=/",
"bytes=zxcv; Path=/"])
def test_get_cookie(self):
response = self.fetch("/get", headers={"Cookie": "foo=bar"})
self.assertEqual(response.body, b("bar"))
response = self.fetch("/get", headers={"Cookie": 'foo="bar"'})
self.assertEqual(response.body, b("bar"))
def test_set_cookie_domain(self):
response = self.fetch("/set_domain")
self.assertEqual(response.headers.get_list("Set-Cookie"),
["unicode_args=blah; Domain=foo.com; Path=/foo"])
def test_cookie_special_char(self):
response = self.fetch("/special_char")
headers = response.headers.get_list("Set-Cookie")
self.assertEqual(len(headers), 3)
self.assertEqual(headers[0], 'equals="a=b"; Path=/')
# python 2.7 octal-escapes the semicolon; older versions leave it alone
self.assertTrue(headers[1] in ('semicolon="a;b"; Path=/',
'semicolon="a\\073b"; Path=/'),
headers[1])
self.assertEqual(headers[2], 'quote="a\\"b"; Path=/')
data = [('foo=a=b', 'a=b'),
('foo="a=b"', 'a=b'),
('foo="a;b"', 'a;b'),
#('foo=a\\073b', 'a;b'), # even encoded, ";" is a delimiter
('foo="a\\073b"', 'a;b'),
('foo="a\\"b"', 'a"b'),
]
for header, expected in data:
logging.info("trying %r", header)
response = self.fetch("/get", headers={"Cookie": header})
self.assertEqual(response.body, utf8(expected))
class AuthRedirectRequestHandler(RequestHandler):
def initialize(self, login_url):
self.login_url = login_url
def get_login_url(self):
return self.login_url
@authenticated
def get(self):
# we'll never actually get here because the test doesn't follow redirects
self.send_error(500)
class AuthRedirectTest(AsyncHTTPTestCase, LogTrapTestCase):
def get_app(self):
return Application([('/relative', AuthRedirectRequestHandler,
dict(login_url='/login')),
('/absolute', AuthRedirectRequestHandler,
dict(login_url='http://example.com/login'))])
def test_relative_auth_redirect(self):
self.http_client.fetch(self.get_url('/relative'), self.stop,
follow_redirects=False)
response = self.wait()
self.assertEqual(response.code, 302)
self.assertEqual(response.headers['Location'], '/login?next=%2Frelative')
def test_absolute_auth_redirect(self):
self.http_client.fetch(self.get_url('/absolute'), self.stop,
follow_redirects=False)
response = self.wait()
self.assertEqual(response.code, 302)
self.assertTrue(re.match(
'http://example.com/login\?next=http%3A%2F%2Flocalhost%3A[0-9]+%2Fabsolute',
response.headers['Location']), response.headers['Location'])
class ConnectionCloseHandler(RequestHandler):
def initialize(self, test):
self.test = test
@asynchronous
def get(self):
self.test.on_handler_waiting()
def on_connection_close(self):
self.test.on_connection_close()
class ConnectionCloseTest(AsyncHTTPTestCase, LogTrapTestCase):
def get_app(self):
return Application([('/', ConnectionCloseHandler, dict(test=self))])
def test_connection_close(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.connect(("localhost", self.get_http_port()))
self.stream = IOStream(s, io_loop=self.io_loop)
self.stream.write(b("GET / HTTP/1.0\r\n\r\n"))
self.wait()
def on_handler_waiting(self):
logging.info('handler waiting')
self.stream.close()
def on_connection_close(self):
logging.info('connection closed')
self.stop()
class EchoHandler(RequestHandler):
def get(self, path):
# Type checks: web.py interfaces convert argument values to
# unicode strings (by default, but see also decode_argument).
# In httpserver.py (i.e. self.request.arguments), they're left
# as bytes. Keys are always native strings.
for key in self.request.arguments:
assert type(key) == str, repr(key)
for value in self.request.arguments[key]:
assert type(value) == bytes_type, repr(value)
for value in self.get_arguments(key):
assert type(value) == unicode, repr(value)
assert type(path) == unicode, repr(path)
self.write(dict(path=path,
args=recursive_unicode(self.request.arguments)))
class RequestEncodingTest(AsyncHTTPTestCase, LogTrapTestCase):
def get_app(self):
return Application([("/(.*)", EchoHandler)])
def test_question_mark(self):
# Ensure that url-encoded question marks are handled properly
self.assertEqual(json_decode(self.fetch('/%3F').body),
dict(path='?', args={}))
self.assertEqual(json_decode(self.fetch('/%3F?%3F=%3F').body),
dict(path='?', args={'?': ['?']}))
def test_path_encoding(self):
# Path components and query arguments should be decoded the same way
self.assertEqual(json_decode(self.fetch('/%C3%A9?arg=%C3%A9').body),
{u"path":u"\u00e9",
u"args": {u"arg": [u"\u00e9"]}})
class TypeCheckHandler(RequestHandler):
def prepare(self):
self.errors = {}
self.check_type('status', self.get_status(), int)
# get_argument is an exception from the general rule of using
# type str for non-body data mainly for historical reasons.
self.check_type('argument', self.get_argument('foo'), unicode)
self.check_type('cookie_key', self.cookies.keys()[0], str)
self.check_type('cookie_value', self.cookies.values()[0].value, str)
# secure cookies
self.check_type('xsrf_token', self.xsrf_token, bytes_type)
self.check_type('xsrf_form_html', self.xsrf_form_html(), str)
self.check_type('reverse_url', self.reverse_url('typecheck', 'foo'), str)
self.check_type('request_summary', self._request_summary(), str)
def get(self, path_component):
# path_component uses type unicode instead of str for consistency
# with get_argument()
self.check_type('path_component', path_component, unicode)
self.write(self.errors)
def post(self, path_component):
self.check_type('path_component', path_component, unicode)
self.write(self.errors)
def check_type(self, name, obj, expected_type):
actual_type = type(obj)
if expected_type != actual_type:
self.errors[name] = "expected %s, got %s" % (expected_type,
actual_type)
class DecodeArgHandler(RequestHandler):
def decode_argument(self, value, name=None):
assert type(value) == bytes_type, repr(value)
# use self.request.arguments directly to avoid recursion
if 'encoding' in self.request.arguments:
return value.decode(to_unicode(self.request.arguments['encoding'][0]))
else:
return value
def get(self, arg):
def describe(s):
if type(s) == bytes_type:
return ["bytes", native_str(binascii.b2a_hex(s))]
elif type(s) == unicode:
return ["unicode", s]
raise Exception("unknown type")
self.write({'path': describe(arg),
'query': describe(self.get_argument("foo")),
})
class LinkifyHandler(RequestHandler):
def get(self):
self.render("linkify.html", message="http://example.com")
class UIModuleResourceHandler(RequestHandler):
def get(self):
self.render("page.html", entries=[1,2])
class OptionalPathHandler(RequestHandler):
def get(self, path):
self.write({"path": path})
class FlowControlHandler(RequestHandler):
# These writes are too small to demonstrate real flow control,
# but at least it shows that the callbacks get run.
@asynchronous
def get(self):
self.write("1")
self.flush(callback=self.step2)
def step2(self):
self.write("2")
self.flush(callback=self.step3)
def step3(self):
self.write("3")
self.finish()
class WebTest(AsyncHTTPTestCase, LogTrapTestCase):
def get_app(self):
loader = DictLoader({
"linkify.html": "{% module linkify(message) %}",
"page.html": """\
<html><head></head><body>
{% for e in entries %}
{% module Template("entry.html", entry=e) %}
{% end %}
</body></html>""",
"entry.html": """\
{{ set_resources(embedded_css=".entry { margin-bottom: 1em; }", embedded_javascript="js_embed()", css_files=["/base.css", "/foo.css"], javascript_files="/common.js", html_head="<meta>", html_body='<script src="/analytics.js"/>') }}
<div class="entry">...</div>""",
})
urls = [
url("/typecheck/(.*)", TypeCheckHandler, name='typecheck'),
url("/decode_arg/(.*)", DecodeArgHandler),
url("/decode_arg_kw/(?P<arg>.*)", DecodeArgHandler),
url("/linkify", LinkifyHandler),
url("/uimodule_resources", UIModuleResourceHandler),
url("/optional_path/(.+)?", OptionalPathHandler),
url("/flow_control", FlowControlHandler),
]
return Application(urls,
template_loader=loader,
autoescape="xhtml_escape")
def fetch_json(self, *args, **kwargs):
response = self.fetch(*args, **kwargs)
response.rethrow()
return json_decode(response.body)
def test_types(self):
response = self.fetch("/typecheck/asdf?foo=bar",
headers={"Cookie": "cook=ie"})
data = json_decode(response.body)
self.assertEqual(data, {})
response = self.fetch("/typecheck/asdf?foo=bar", method="POST",
headers={"Cookie": "cook=ie"},
body="foo=bar")
def test_decode_argument(self):
# These urls all decode to the same thing
urls = ["/decode_arg/%C3%A9?foo=%C3%A9&encoding=utf-8",
"/decode_arg/%E9?foo=%E9&encoding=latin1",
"/decode_arg_kw/%E9?foo=%E9&encoding=latin1",
]
for url in urls:
response = self.fetch(url)
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {u'path': [u'unicode', u'\u00e9'],
u'query': [u'unicode', u'\u00e9'],
})
response = self.fetch("/decode_arg/%C3%A9?foo=%C3%A9")
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {u'path': [u'bytes', u'c3a9'],
u'query': [u'bytes', u'c3a9'],
})
def test_uimodule_unescaped(self):
response = self.fetch("/linkify")
self.assertEqual(response.body,
b("<a href=\"http://example.com\">http://example.com</a>"))
def test_uimodule_resources(self):
response = self.fetch("/uimodule_resources")
self.assertEqual(response.body, b("""\
<html><head><link href="/base.css" type="text/css" rel="stylesheet"/><link href="/foo.css" type="text/css" rel="stylesheet"/>
<style type="text/css">
.entry { margin-bottom: 1em; }
</style>
<meta>
</head><body>
<div class="entry">...</div>
<div class="entry">...</div>
<script src="/common.js" type="text/javascript"></script>
<script type="text/javascript">
//<![CDATA[
js_embed()
//]]>
</script>
<script src="/analytics.js"/>
</body></html>"""))
def test_optional_path(self):
self.assertEqual(self.fetch_json("/optional_path/foo"),
{u"path": u"foo"})
self.assertEqual(self.fetch_json("/optional_path/"),
{u"path": None})
def test_flow_control(self):
self.assertEqual(self.fetch("/flow_control").body, b("123"))
class ErrorResponseTest(AsyncHTTPTestCase, LogTrapTestCase):
def get_app(self):
class DefaultHandler(RequestHandler):
def get(self):
if self.get_argument("status", None):
raise HTTPError(int(self.get_argument("status")))
1/0
class WriteErrorHandler(RequestHandler):
def get(self):
if self.get_argument("status", None):
self.send_error(int(self.get_argument("status")))
else:
1/0
def write_error(self, status_code, **kwargs):
self.set_header("Content-Type", "text/plain")
if "exc_info" in kwargs:
self.write("Exception: %s" % kwargs["exc_info"][0].__name__)
else:
self.write("Status: %d" % status_code)
class GetErrorHtmlHandler(RequestHandler):
def get(self):
if self.get_argument("status", None):
self.send_error(int(self.get_argument("status")))
else:
1/0
def get_error_html(self, status_code, **kwargs):
self.set_header("Content-Type", "text/plain")
if "exception" in kwargs:
self.write("Exception: %s" % sys.exc_info()[0].__name__)
else:
self.write("Status: %d" % status_code)
class FailedWriteErrorHandler(RequestHandler):
def get(self):
1/0
def write_error(self, status_code, **kwargs):
raise Exception("exception in write_error")
return Application([
url("/default", DefaultHandler),
url("/write_error", WriteErrorHandler),
url("/get_error_html", GetErrorHtmlHandler),
url("/failed_write_error", FailedWriteErrorHandler),
])
def test_default(self):
response = self.fetch("/default")
self.assertEqual(response.code, 500)
self.assertTrue(b("500: Internal Server Error") in response.body)
response = self.fetch("/default?status=503")
self.assertEqual(response.code, 503)
self.assertTrue(b("503: Service Unavailable") in response.body)
def test_write_error(self):
response = self.fetch("/write_error")
self.assertEqual(response.code, 500)
self.assertEqual(b("Exception: ZeroDivisionError"), response.body)
response = self.fetch("/write_error?status=503")
self.assertEqual(response.code, 503)
self.assertEqual(b("Status: 503"), response.body)
def test_get_error_html(self):
response = self.fetch("/get_error_html")
self.assertEqual(response.code, 500)
self.assertEqual(b("Exception: ZeroDivisionError"), response.body)
response = self.fetch("/get_error_html?status=503")
self.assertEqual(response.code, 503)
self.assertEqual(b("Status: 503"), response.body)
def test_failed_write_error(self):
response = self.fetch("/failed_write_error")
self.assertEqual(response.code, 500)
self.assertEqual(b(""), response.body)
| [
"[email protected]"
]
| |
7ad34a71cf548ff1303f903e8c1a5ba7ad27e6e8 | 631b074ba6b901ba5fb709f8e24acb84a596e777 | /cinder/tests/api/openstack/volume/test_volumes.py | 9563989a91bfa3d21b06cacf38d01659d5bf1120 | [
"Apache-2.0"
]
| permissive | matiu2/cinder | 5ee188a834eea06883103ab97cee50a9ee3a21bb | 1c52fb3041df5661756246705942c60b4b1448d5 | refs/heads/master | 2021-01-18T13:54:34.159533 | 2012-05-04T04:45:20 | 2012-05-04T04:45:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,490 | py | # Copyright 2013 Josh Durgin
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
import webob
from cinder.api.openstack.volume import volumes
from cinder import flags
from cinder import test
from cinder.tests.api.openstack import fakes
from cinder.volume import api as volume_api
FLAGS = flags.FLAGS
NS = '{http://docs.openstack.org/volume/api/v1}'
class VolumeApiTest(test.TestCase):
def setUp(self):
super(VolumeApiTest, self).setUp()
self.controller = volumes.VolumeController()
self.stubs.Set(volume_api.API, 'get_all', fakes.stub_volume_get_all)
self.stubs.Set(volume_api.API, 'get', fakes.stub_volume_get)
self.stubs.Set(volume_api.API, 'delete', fakes.stub_volume_delete)
def test_volume_create(self):
self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
vol = {"size": 100,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
res_dict = self.controller.create(req, body)
expected = {'volume': {'status': 'fakestatus',
'display_description': 'Volume Test Desc',
'availability_zone': 'zone1:host1',
'display_name': 'Volume Test Name',
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'id': '1',
'volume_id': '1'}],
'volume_type': 'vol_type_name',
'snapshot_id': None,
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'size': 100}}
self.assertEqual(res_dict, expected)
def test_volume_create_no_body(self):
body = {}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create,
req,
body)
def test_volume_list(self):
req = fakes.HTTPRequest.blank('/v1/volumes')
res_dict = self.controller.index(req)
expected = {'volumes': [{'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'id': '1',
'volume_id': '1'}],
'volume_type': 'vol_type_name',
'snapshot_id': None,
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'size': 1}]}
self.assertEqual(res_dict, expected)
def test_volume_list_detail(self):
req = fakes.HTTPRequest.blank('/v1/volumes/detail')
res_dict = self.controller.index(req)
expected = {'volumes': [{'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'id': '1',
'volume_id': '1'}],
'volume_type': 'vol_type_name',
'snapshot_id': None,
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'size': 1}]}
self.assertEqual(res_dict, expected)
def test_volume_show(self):
req = fakes.HTTPRequest.blank('/v1/volumes/1')
res_dict = self.controller.show(req, 1)
expected = {'volume': {'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'id': '1',
'volume_id': '1'}],
'volume_type': 'vol_type_name',
'snapshot_id': None,
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'size': 1}}
self.assertEqual(res_dict, expected)
def test_volume_show_no_attachments(self):
def stub_volume_get(self, context, volume_id):
return fakes.stub_volume(volume_id, attach_status='detached')
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
res_dict = self.controller.show(req, 1)
expected = {'volume': {'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'attachments': [],
'volume_type': 'vol_type_name',
'snapshot_id': None,
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'size': 1}}
self.assertEqual(res_dict, expected)
def test_volume_show_no_volume(self):
self.stubs.Set(volume_api.API, "get", fakes.stub_volume_get_notfound)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show,
req,
1)
def test_volume_delete(self):
req = fakes.HTTPRequest.blank('/v1/volumes/1')
resp = self.controller.delete(req, 1)
self.assertEqual(resp.status_int, 202)
def test_volume_delete_no_volume(self):
self.stubs.Set(volume_api.API, "get", fakes.stub_volume_get_notfound)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete,
req,
1)
class VolumeSerializerTest(test.TestCase):
def _verify_volume_attachment(self, attach, tree):
for attr in ('id', 'volume_id', 'server_id', 'device'):
self.assertEqual(str(attach[attr]), tree.get(attr))
def _verify_volume(self, vol, tree):
self.assertEqual(tree.tag, NS + 'volume')
for attr in ('id', 'status', 'size', 'availability_zone', 'created_at',
'display_name', 'display_description', 'volume_type',
'snapshot_id'):
self.assertEqual(str(vol[attr]), tree.get(attr))
for child in tree:
print child.tag
self.assertTrue(child.tag in (NS + 'attachments', NS + 'metadata'))
if child.tag == 'attachments':
self.assertEqual(1, len(child))
self.assertEqual('attachment', child[0].tag)
self._verify_volume_attachment(vol['attachments'][0], child[0])
elif child.tag == 'metadata':
not_seen = set(vol['metadata'].keys())
for gr_child in child:
self.assertTrue(gr_child.tag in not_seen)
self.assertEqual(str(vol['metadata'][gr_child.tag]),
gr_child.text)
not_seen.remove(gr_child.tag)
self.assertEqual(0, len(not_seen))
def test_volume_show_create_serializer(self):
serializer = volumes.VolumeTemplate()
raw_volume = dict(
id='vol_id',
status='vol_status',
size=1024,
availability_zone='vol_availability',
created_at=datetime.datetime.now(),
attachments=[dict(
id='vol_id',
volume_id='vol_id',
server_id='instance_uuid',
device='/foo')],
display_name='vol_name',
display_description='vol_desc',
volume_type='vol_type',
snapshot_id='snap_id',
metadata=dict(
foo='bar',
baz='quux',
),
)
text = serializer.serialize(dict(volume=raw_volume))
print text
tree = etree.fromstring(text)
self._verify_volume(raw_volume, tree)
def test_volume_index_detail_serializer(self):
serializer = volumes.VolumesTemplate()
raw_volumes = [dict(
id='vol1_id',
status='vol1_status',
size=1024,
availability_zone='vol1_availability',
created_at=datetime.datetime.now(),
attachments=[dict(
id='vol1_id',
volume_id='vol1_id',
server_id='instance_uuid',
device='/foo1')],
display_name='vol1_name',
display_description='vol1_desc',
volume_type='vol1_type',
snapshot_id='snap1_id',
metadata=dict(
foo='vol1_foo',
bar='vol1_bar',
),
),
dict(
id='vol2_id',
status='vol2_status',
size=1024,
availability_zone='vol2_availability',
created_at=datetime.datetime.now(),
attachments=[dict(
id='vol2_id',
volume_id='vol2_id',
server_id='instance_uuid',
device='/foo2')],
display_name='vol2_name',
display_description='vol2_desc',
volume_type='vol2_type',
snapshot_id='snap2_id',
metadata=dict(
foo='vol2_foo',
bar='vol2_bar',
),
)]
text = serializer.serialize(dict(volumes=raw_volumes))
print text
tree = etree.fromstring(text)
self.assertEqual(NS + 'volumes', tree.tag)
self.assertEqual(len(raw_volumes), len(tree))
for idx, child in enumerate(tree):
self._verify_volume(raw_volumes[idx], child)
| [
"[email protected]"
]
| |
0457cef64ea7b68406f6e46a7669f6fc1dce58d8 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2518/49823/278350.py | 2cb366ad69dc313778213eda8c71db7c66cfe53f | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | def al(a,b):
l=[]
a=sorted(a)
b=sorted(b)
p,r=0,0
for i in range(len(a)):
while(b[p]<a[i] and p<len(b)-1):
p+=1
if(p==0):
d=abs(b[p]-a[i])
else:
d=min(abs(a[i]-b[p-1]),abs(b[p]-a[i]))
r=max(r,d)
print(r)
if __name__ == '__main__':
al([int(i) for i in input().split(',')],[int(i) for i in input().split(',')])
| [
"[email protected]"
]
| |
87d5ac6dbfe5558297a98172e06f7a77e461a57f | cb56e1554f43ef93b470019e5a36ddc26680d837 | /DjangoAyushh/first_site/first_site/wsgi.py | 2ae3b1b5a0a4a9042a3af49b19677cd72ff23d53 | []
| no_license | Ayush900/initiating-django | 6790ed4fde82a18af661922a7e3f7165a6d10b98 | ea7a2c3f3467dc92f229468fb3de274e1143a3c8 | refs/heads/master | 2020-07-05T20:52:05.169025 | 2019-08-16T18:14:49 | 2019-08-16T18:14:49 | 202,770,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for first_site project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'first_site.settings')
application = get_wsgi_application()
| [
"[email protected]"
]
| |
3b368f4bde0884863f26ff8302e96f97e0254648 | 5897a1b176f9c404fe421c61342f20820f685c07 | /RIS/OSL/zbrush/normalMap.py | 0d231757d9fd9b6a6e7b2b9114f427cc59a996c7 | []
| no_license | NCCA/Renderman | d3269e0c7b5e58a69d8744051100013339516ad0 | ebf17298e0ee56899a3288a3ff8eb4c4a0f806e8 | refs/heads/master | 2023-06-09T07:51:29.251270 | 2023-05-25T11:13:37 | 2023-05-25T11:13:37 | 51,373,926 | 17 | 11 | null | null | null | null | UTF-8 | Python | false | false | 3,215 | py | #!/usr/bin/python
import prman
# import the python functions
import sys
sys.path.append("../../common")
from functions import drawTeapot
import Obj
from Camera import Camera
from Vec4 import Vec4
from Transformation import *
import random
ri = prman.Ri() # create an instance of the RenderMan interface
filename = "__render"
# this is the begining of the rib archive generation we can only
# make RI calls after this function else we get a core dump
ri.Begin("__render")
# now we add the display element using the usual elements
# FILENAME DISPLAY Type Output format
ri.Display("zbrush.exr", "it", "rgba")
ri.Format(1024, 720, 1)
# setup the raytrace / integrators
ri.Hider("raytrace", {"int incremental": [1]})
ri.PixelVariance(0.01)
ri.ShadingRate(0.1)
ri.Integrator("PxrPathTracer", "integrator")
# now set the projection to perspective
ri.Projection(ri.PERSPECTIVE, {ri.FOV: 30})
# Simple translate for our camera
cam = Camera(Vec4(0, 0.9, -3.9), Vec4(0, 0.2, 0), Vec4(0, 1, 0))
cam.place(ri)
# now we start our world
ri.WorldBegin()
# Lighting We need geo to emit light
ri.TransformBegin()
ri.AttributeBegin()
ri.Declare("domeLight", "string")
lightTx = Transformation()
lightTx.setPosition(0, 1, 0)
lightTx.setRotation(90, 0, 0)
lightTx.setScale(1, 1, 1)
ri.ConcatTransform(lightTx.getMatrix())
ri.Light("PxrDomeLight", "domeLight", {"float exposure": [1.0], "string lightColorMap": ["../../disney/studio2.tx"]})
ri.AttributeEnd()
ri.TransformEnd()
# load mesh
troll = Obj.Obj("../../meshes/troll.obj")
tx = Transformation()
ri.Pattern("PxrTexture", "TrollColour", {"string filename": "../../meshes/TrollColour.tx"})
ri.Pattern("PxrTexture", "TrollSpecular", {"string filename": "../../meshes/TrollSpec.tx"})
ri.Pattern("PxrTexture", "TrollNMap", {"string filename": "../../meshes/TrollNormal.tx"})
ri.Pattern("PxrNormalMap", "TrollBump", {"string filename": "../../meshes/TrollNormal.tx", "float bumpScale": [2]})
ri.Bxdf(
"PxrDisney",
"bxdf",
{
"reference color baseColor": ["TrollColour:resultRGB"],
"reference color subsurfaceColor": ["TrollSpecular:resultRGB"],
"float subsurface": [0.4],
"reference normal bumpNormal": ["TrollBump:resultN"],
"float metallic": [0.1],
"float specular": [0.1],
"float roughness": [0.3],
},
)
ypos = 0.55
ri.TransformBegin()
tx.setPosition(-1, ypos, 0)
tx.setRotation(0, -45, 0)
ri.ConcatTransform(tx.getMatrix())
troll.Polygon(ri)
ri.TransformEnd()
ri.TransformBegin()
tx.setPosition(0, ypos, 0)
tx.setRotation(0, 45, 0)
ri.ConcatTransform(tx.getMatrix())
troll.Polygon(ri)
ri.TransformEnd()
ri.TransformBegin()
tx.setPosition(1, ypos, 0)
tx.setRotation(0, 200, 0)
ri.ConcatTransform(tx.getMatrix())
troll.Polygon(ri)
ri.TransformEnd()
# floor
ri.TransformBegin()
ri.Bxdf(
"PxrDisney",
"bxdf",
{
"color baseColor": [1, 1, 1],
"float roughness": [0.2],
},
)
# ri.Bxdf( "PxrDiffuse","bxdf", { "reference color diffuseColor" : ["colourChecker:resultRGB"] })
s = 12.0
face = [-s, 0, -s, s, 0, -s, -s, 0, s, s, 0, s]
ri.Patch("bilinear", {"P": face})
ri.TransformEnd()
# end our world
ri.WorldEnd()
# and finally end the rib file
ri.End()
| [
"[email protected]"
]
| |
53e96ad958d483b7b85fb9c3119b9e98031ef73c | 922b6d67ca8dcc1573bddd0aa7193107f42b6207 | /dms/web/base.py | 4d5a1c7e76049df5a818300081908190a9e6437b | [
"MIT"
]
| permissive | zhmsg/dms | 40c91ea3945fd8dfcd0b056f4bcf324774c4e88c | a1ae1430893d9dde8f45bba0e50818f0224fcd8a | refs/heads/master | 2023-09-03T23:00:44.615748 | 2023-08-25T00:13:59 | 2023-08-25T00:13:59 | 42,572,830 | 0 | 2 | MIT | 2022-07-06T19:54:32 | 2015-09-16T07:50:17 | JavaScript | UTF-8 | Python | false | false | 3,419 | py | # !/usr/bin/env python
# coding: utf-8
from flask import Blueprint, g, Response, jsonify, redirect
from flask_login import login_required
import functools
from flask_helper.view import View as OView
from dms.utils.log import getLogger
from dms.utils.manager import Explorer
__author__ = 'zhouhenglc'
class RegisterData(object):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = object.__new__(cls, *args)
return cls._instance
def __init__(self):
self._dict = {}
def get(self, key, default=None):
return self._dict.get(key, default)
def set(self, key, value):
self._dict[key] = value
def set_default(self, key, default):
if key not in self._dict:
self._dict[key] = default
def append(self, key, value):
_values = self.get(key)
if not _values:
_values = []
_values.append(value)
self.set(key, _values)
def update(self, key, **kwargs):
_values = self.get(key)
if not _values:
_values = {}
_values.update(**kwargs)
self.set(key, _values)
REGISTER_DATA = RegisterData()
explorer = Explorer.get_instance()
class View(OView):
def __init__(self, name, import_name, *args, **kwargs):
self.auth_required = kwargs.pop('auth_required', True)
self.required_resource = kwargs.pop('required_resource', [])
super().__init__(name, import_name, *args, **kwargs)
if self.auth_required:
@self.before_request
@login_required
def before_request():
for rr in self.required_resource:
if rr in explorer.missing_config:
redirect_url = "/config?keys=%s" % \
",".join(explorer.missing_config[rr])
return redirect(redirect_url)
def get_global_endpoint(self, endpoint=None, view_func=None):
if endpoint:
sub_endpoint = endpoint
elif view_func:
sub_endpoint = view_func.func_name
else:
return None
g_endpoint = "%s.%s" % (self.name, sub_endpoint)
return g_endpoint
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
if view_func:
@functools.wraps(view_func)
def inner(*args, **kwargs):
r = view_func(*args, **kwargs)
if isinstance(r, Response):
return r
elif isinstance(r, bool):
return 'True' if r else 'False'
elif isinstance(r, dict):
return jsonify(r)
elif isinstance(r, list):
rs = []
for item in r:
if hasattr(item, 'to_dict'):
rs.append(item.to_dict())
else:
rs.append(item)
return jsonify(rs)
elif hasattr(r, 'to_json'):
return r.to_json()
elif hasattr(r, 'to_dict'):
return jsonify(r.to_dict())
return r
OView.add_url_rule(self, rule, endpoint, inner, **options)
else:
OView.add_url_rule(self, rule, endpoint, view_func, **options)
| [
"[email protected]"
]
| |
6dd5e90c13cbc8921188a2a55e954bfeb8c45d71 | 21b5ad37b812ed78799d4efc1649579cc83d32fb | /pro/migrations/0007_auto_20200222_1157.py | 4b4329dc3c21c4faddc276aeb4688a4472386e24 | []
| no_license | SaifulAbir/django-js-api | b6f18c319f8109884e71095ad49e08e50485bb25 | fbf174b9cde2e7d25b4898f511df9c6f96d406cf | refs/heads/master | 2023-02-12T16:09:21.508702 | 2021-01-14T09:05:15 | 2021-01-14T09:05:15 | 329,713,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | # Generated by Django 3.0.3 on 2020-02-22 11:57
from django.db import migrations, models
import p7.validators
class Migration(migrations.Migration):
dependencies = [
('pro', '0006_merge_20200222_1117'),
]
operations = [
migrations.RenameField(
model_name='professional',
old_name='user_id',
new_name='user',
),
migrations.AlterField(
model_name='professional',
name='password',
field=models.CharField(max_length=255, validators=[p7.validators.check_valid_password, p7.validators.MinLengthValidator(8)]),
),
]
| [
"[email protected]"
]
| |
0663ca2468470dd94deb42af8ca3dab1a2cc3333 | 8e97cb7c8668a9061683ea3ba893dab32029fac9 | /pytorch_toolkit/person_reidentification/data/datamanager.py | 75b80c905990d162e028c8e00d6e2abce522f5de | [
"Apache-2.0"
]
| permissive | DmitriySidnev/openvino_training_extensions | e01703bea292f11ffc20d50a1a06f0565059d5c7 | c553a56088f0055baba838b68c9299e19683227e | refs/heads/develop | 2021-06-14T06:32:12.373813 | 2020-05-13T13:25:15 | 2020-05-13T13:25:15 | 180,546,423 | 0 | 1 | Apache-2.0 | 2019-04-15T13:39:48 | 2019-04-10T09:17:55 | Python | UTF-8 | Python | false | false | 5,895 | py | """
MIT License
Copyright (c) 2018 Kaiyang Zhou
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import torch
from torchreid.data.datamanager import DataManager
from torchreid.data.datasets import __image_datasets
from .datasets.globalme import GlobalMe
from .transforms import build_transforms
from .sampler import build_train_sampler
__image_datasets['globalme'] = GlobalMe
def init_image_dataset(name, **kwargs):
"""Initializes an image dataset."""
avai_datasets = list(__image_datasets.keys())
if name not in avai_datasets:
raise ValueError('Invalid dataset name. Received "{}", '
'but expected to be one of {}'.format(name, avai_datasets))
return __image_datasets[name](**kwargs)
class ImageDataManagerWithTransforms(DataManager):
data_type = 'image'
def __init__(self, root='', sources=None, targets=None, height=256, width=128, transforms='random_flip',
norm_mean=None, norm_std=None, use_gpu=True, split_id=0, combineall=False,
batch_size_train=32, batch_size_test=32, workers=4, num_instances=4, train_sampler='',
cuhk03_labeled=False, cuhk03_classic_split=False, market1501_500k=False, apply_masks_to_test=False):
super(ImageDataManagerWithTransforms, self).__init__(
sources=sources, targets=targets, height=height, width=width,
transforms=None, norm_mean=norm_mean, norm_std=norm_std, use_gpu=use_gpu
)
self.transform_tr, self.transform_te = build_transforms(
self.height, self.width, transforms=transforms,
norm_mean=norm_mean, norm_std=norm_std,
apply_masks_to_test=apply_masks_to_test
)
print('=> Loading train (source) dataset')
trainset = []
for name in self.sources:
trainset_ = init_image_dataset(
name,
transform=self.transform_tr,
mode='train',
combineall=combineall,
root=root,
split_id=split_id,
cuhk03_labeled=cuhk03_labeled,
cuhk03_classic_split=cuhk03_classic_split,
market1501_500k=market1501_500k
)
trainset.append(trainset_)
trainset = sum(trainset)
self._num_train_pids = trainset.num_train_pids
self._num_train_cams = trainset.num_train_cams
train_sampler = build_train_sampler(
trainset.train, train_sampler,
batch_size=batch_size_train,
num_instances=num_instances
)
self.trainloader = torch.utils.data.DataLoader(
trainset,
sampler=train_sampler,
batch_size=batch_size_train,
shuffle=False,
num_workers=workers,
pin_memory=self.use_gpu,
drop_last=True
)
print('=> Loading test (target) dataset')
self.testloader = {name: {'query': None, 'gallery': None} for name in self.targets}
self.testdataset = {name: {'query': None, 'gallery': None} for name in self.targets}
for name in self.targets:
# build query loader
queryset = init_image_dataset(
name,
transform=self.transform_te,
mode='query',
combineall=combineall,
root=root,
split_id=split_id,
cuhk03_labeled=cuhk03_labeled,
cuhk03_classic_split=cuhk03_classic_split,
market1501_500k=market1501_500k
)
self.testloader[name]['query'] = torch.utils.data.DataLoader(
queryset,
batch_size=batch_size_test,
shuffle=False,
num_workers=workers,
pin_memory=self.use_gpu,
drop_last=False
)
# build gallery loader
galleryset = init_image_dataset(
name,
transform=self.transform_te,
mode='gallery',
combineall=combineall,
verbose=False,
root=root,
split_id=split_id,
cuhk03_labeled=cuhk03_labeled,
cuhk03_classic_split=cuhk03_classic_split,
market1501_500k=market1501_500k
)
self.testloader[name]['gallery'] = torch.utils.data.DataLoader(
galleryset,
batch_size=batch_size_test,
shuffle=False,
num_workers=workers,
pin_memory=self.use_gpu,
drop_last=False
)
self.testdataset[name]['query'] = queryset.query
self.testdataset[name]['gallery'] = galleryset.gallery
print('\n')
print(' **************** Summary ****************')
print(' train : {}'.format(self.sources))
print(' # train datasets : {}'.format(len(self.sources)))
print(' # train ids : {}'.format(self.num_train_pids))
print(' # train images : {}'.format(len(trainset)))
print(' # train cameras : {}'.format(self.num_train_cams))
print(' test : {}'.format(self.targets))
print(' *****************************************')
print('\n')
| [
"[email protected]"
]
| |
6383995e35ee51c384da1285d358de91724811e2 | 2432996ac1615cd36d61f0feeff8a359d2b438d8 | /env/lib/python3.8/site-packages/_pyinstaller_hooks_contrib/hooks/stdhooks/hook-rdflib.py | 1ef29499af98b492b37a7bc902fb9532e1abc901 | [
"GPL-1.0-or-later",
"GPL-2.0-or-later",
"GPL-2.0-only",
"Apache-2.0"
]
| permissive | Parveshdhull/AutoTyper | dd65d53ece7c13fbc1ead7ce372947483e05e2e3 | 7fabb30e15b770d790b69c2e4eaf9bbf5a4d180c | refs/heads/main | 2023-05-08T14:10:35.404160 | 2023-05-07T20:43:15 | 2023-05-07T20:43:15 | 315,415,751 | 26 | 18 | Apache-2.0 | 2023-05-07T20:43:16 | 2020-11-23T19:13:05 | Python | UTF-8 | Python | false | false | 539 | py | # ------------------------------------------------------------------
# Copyright (c) 2020 PyInstaller Development Team.
#
# This file is distributed under the terms of the GNU General Public
# License (version 2.0 or later).
#
# The full license is available in LICENSE.GPL.txt, distributed with
# this software.
#
# SPDX-License-Identifier: GPL-2.0-or-later
# ------------------------------------------------------------------
from PyInstaller.utils.hooks import collect_submodules
hiddenimports = collect_submodules('rdflib.plugins')
| [
"[email protected]"
]
| |
cf1e3075185cefc817f86f6636ba6ca84b9a73ae | 2ff7e53d5e512cd762217ca54317982e07a2bb0c | /eve/devtools/script/behaviortools/clientdebugadaptors.py | 2a22a85a0875ed2b83664cddb9e4a59eb4130b2b | []
| no_license | nanxijw/Clara-Pretty-One-Dick | 66d3d69426642b79e8fd4cc8e0bec23adeeca6d6 | 50de3488a2140343c364efc2615cf6e67f152be0 | refs/heads/master | 2021-01-19T09:25:07.555284 | 2015-02-17T21:49:33 | 2015-02-17T21:49:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,637 | py | #Embedded file name: eve/devtools/script/behaviortools\clientdebugadaptors.py
import logging
from brennivin.messenger import Messenger
import eve.common.script.net.eveMoniker as moniker
from eve.devtools.script.behaviortools.debugwindow import BehaviorDebugWindow
import uthread2
logger = logging.getLogger(__name__)
EVENT_BEHAVIOR_DEBUG_UPDATE = 'OnBehaviorDebugUpdate'
EVENT_BEHAVIOR_DEBUG_CONNECT_REQUEST = 'OnBehaviorDebugConnectRequest'
EVENT_BEHAVIOR_DEBUG_DISCONNECT_REQUEST = 'OnBehaviorDebugDisconnectRequest'
class UpdateListener(object):
def __init__(self):
self.messenger = Messenger()
self.behaviorDebuggersByItemId = {}
sm.RegisterForNotifyEvent(self, EVENT_BEHAVIOR_DEBUG_UPDATE)
sm.RegisterForNotifyEvent(self, EVENT_BEHAVIOR_DEBUG_CONNECT_REQUEST)
sm.RegisterForNotifyEvent(self, EVENT_BEHAVIOR_DEBUG_DISCONNECT_REQUEST)
def AddObserverForItemId(self, itemId, handler):
if itemId in self.messenger.signalsByMessageName:
self.messenger.signalsByMessageName[itemId].clear()
self.messenger.SubscribeToMessage(itemId, handler)
def RemoveObserverForItemId(self, itemId, handler):
try:
self.messenger.UnsubscribeFromMessage(itemId, handler)
except:
logger.error('Failed to remove observer itemID=%s handler=%s', itemId, handler)
def OnBehaviorDebugUpdate(self, itemID, *args, **kwargs):
self.messenger.SendMessage(itemID, *args, **kwargs)
def TryConnectDebugger(self, itemID):
try:
debugger = ClientBehaviorDebugger(itemID)
debugger.Connect()
self.behaviorDebuggersByItemId[itemID] = debugger
except:
logger.exception('failed to connect to debugger for itemID=%s', itemID)
def OnBehaviorDebugConnectRequest(self, itemIDs):
itemIDs = sorted(itemIDs)
for itemID in itemIDs:
self.TryConnectDebugger(itemID)
def TryDisconnectDebugger(self, itemID):
try:
debugger = self.behaviorDebuggersByItemId.pop(itemID)
debugger.Disconnect()
except:
logger.exception('failed to disconnect to debugger for itemID=%s', itemID)
def OnBehaviorDebugDisconnectRequest(self, itemIDs):
for itemID in itemIDs:
self.TryDisconnectDebugger(itemID)
def HasDebugger(self, itemID):
return itemID in self.behaviorDebuggersByItemId
updateListener = UpdateListener()
class ClientBehaviorDebugger(object):
def __init__(self, itemID):
self.itemID = itemID
self.tree = []
self.treeMap = {}
self.events = []
self.debugWindow = None
self.isConnected = False
def Connect(self):
logger.debug('Debugger connecting to behavior of entity %s', self.itemID)
updateListener.AddObserverForItemId(self.itemID, self.OnBehaviorDebugUpdate)
entityLocation = moniker.GetEntityLocation()
treeData = entityLocation.EnableBehaviorDebugging(self.itemID)
self.isConnected = True
uthread2.StartTasklet(self.SetupDebugTree, treeData)
def Disconnect(self):
logger.debug('Debugger disconnecting from behavior of entity %s', self.itemID)
try:
updateListener.RemoveObserverForItemId(self.itemID, self.OnBehaviorDebugUpdate)
entityLocation = moniker.GetEntityLocation()
entityLocation.DisableBehaviorDebugging(self.itemID)
self.isConnected = False
if self.debugWindow is not None:
self.debugWindow.Close()
sm.UnregisterForNotifyEvent(self, 'OnSessionChanged')
except:
logger.exception('Failed while disconnecting :(')
def OnBehaviorDebugUpdate(self, events, taskStatuses, tasksSeen, blackboards, *args, **kwargs):
if self.debugWindow is None:
return
self.debugWindow.LoadEvents(events)
self.debugWindow.UpdateStatuses(taskStatuses)
self.debugWindow.UpdateTasksSeen(tasksSeen)
self.debugWindow.LoadBlackboard(blackboards)
def SetupDebugTree(self, treeData):
self.debugWindow = BehaviorDebugWindow.Open(windowID='BehaviorDebugWindow_%d' % self.itemID)
self.debugWindow.SetController(self)
self.debugWindow.LoadBehaviorTree(treeData)
sm.RegisterForNotifyEvent(self, 'OnSessionChanged')
def IsConnected(self):
return self.isConnected
def OnSessionChanged(self, isRemote, sess, change):
if 'solarsystemid2' in change:
if self.debugWindow is not None:
self.debugWindow.Close()
| [
"[email protected]"
]
| |
e60c607287bab75ad3c8bd40437cacd67838444e | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/streamanalytics/v20200301/input.py | 3a86e730433d5039270923b5be2f82279ac23e72 | [
"Apache-2.0",
"BSD-3-Clause"
]
| permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 10,175 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['InputInitArgs', 'Input']
@pulumi.input_type
class InputInitArgs:
def __init__(__self__, *,
job_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
input_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union['ReferenceInputPropertiesArgs', 'StreamInputPropertiesArgs']]] = None):
"""
The set of arguments for constructing a Input resource.
:param pulumi.Input[str] job_name: The name of the streaming job.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] input_name: The name of the input.
:param pulumi.Input[str] name: Resource name
:param pulumi.Input[Union['ReferenceInputPropertiesArgs', 'StreamInputPropertiesArgs']] properties: The properties that are associated with an input. Required on PUT (CreateOrReplace) requests.
"""
pulumi.set(__self__, "job_name", job_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if input_name is not None:
pulumi.set(__self__, "input_name", input_name)
if name is not None:
pulumi.set(__self__, "name", name)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="jobName")
def job_name(self) -> pulumi.Input[str]:
"""
The name of the streaming job.
"""
return pulumi.get(self, "job_name")
@job_name.setter
def job_name(self, value: pulumi.Input[str]):
pulumi.set(self, "job_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="inputName")
def input_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the input.
"""
return pulumi.get(self, "input_name")
@input_name.setter
def input_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "input_name", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Resource name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Union['ReferenceInputPropertiesArgs', 'StreamInputPropertiesArgs']]]:
"""
The properties that are associated with an input. Required on PUT (CreateOrReplace) requests.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Union['ReferenceInputPropertiesArgs', 'StreamInputPropertiesArgs']]]):
pulumi.set(self, "properties", value)
class Input(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
input_name: Optional[pulumi.Input[str]] = None,
job_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union[pulumi.InputType['ReferenceInputPropertiesArgs'], pulumi.InputType['StreamInputPropertiesArgs']]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
An input object, containing all information associated with the named input. All inputs are contained under a streaming job.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] input_name: The name of the input.
:param pulumi.Input[str] job_name: The name of the streaming job.
:param pulumi.Input[str] name: Resource name
:param pulumi.Input[Union[pulumi.InputType['ReferenceInputPropertiesArgs'], pulumi.InputType['StreamInputPropertiesArgs']]] properties: The properties that are associated with an input. Required on PUT (CreateOrReplace) requests.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: InputInitArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An input object, containing all information associated with the named input. All inputs are contained under a streaming job.
:param str resource_name: The name of the resource.
:param InputInitArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(InputInitArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
input_name: Optional[pulumi.Input[str]] = None,
job_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union[pulumi.InputType['ReferenceInputPropertiesArgs'], pulumi.InputType['StreamInputPropertiesArgs']]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = InputInitArgs.__new__(InputInitArgs)
__props__.__dict__["input_name"] = input_name
if job_name is None and not opts.urn:
raise TypeError("Missing required property 'job_name'")
__props__.__dict__["job_name"] = job_name
__props__.__dict__["name"] = name
__props__.__dict__["properties"] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:streamanalytics/v20200301:Input"), pulumi.Alias(type_="azure-native:streamanalytics:Input"), pulumi.Alias(type_="azure-nextgen:streamanalytics:Input"), pulumi.Alias(type_="azure-native:streamanalytics/v20160301:Input"), pulumi.Alias(type_="azure-nextgen:streamanalytics/v20160301:Input"), pulumi.Alias(type_="azure-native:streamanalytics/v20170401preview:Input"), pulumi.Alias(type_="azure-nextgen:streamanalytics/v20170401preview:Input")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Input, __self__).__init__(
'azure-native:streamanalytics/v20200301:Input',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Input':
"""
Get an existing Input resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = InputInitArgs.__new__(InputInitArgs)
__props__.__dict__["name"] = None
__props__.__dict__["properties"] = None
__props__.__dict__["type"] = None
return Input(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output[Any]:
"""
The properties that are associated with an input. Required on PUT (CreateOrReplace) requests.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
| [
"[email protected]"
]
| |
237f1bd2dc487fe60a3de9660c545f74da8c252b | c4702d1a06640555829b367852138cc93ba4a161 | /dym_report_other_receivable/report/dym_report_xls.py | e815ff128933ca57add0dc5c6cc764a60b2f11eb | []
| no_license | Rizalimami/dym | 0ecadf9c049b22ebfebf92e4eab6eaad17dd3e26 | af1bcf7b77a3212bc8a8a0e41e6042a134587ed4 | refs/heads/master | 2020-04-08T10:56:43.605698 | 2018-11-27T06:44:08 | 2018-11-27T06:44:08 | 159,287,876 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 17,005 | py | import xlwt
from datetime import datetime
from openerp.osv import orm
from openerp.addons.report_xls.report_xls import report_xls
from openerp.addons.report_xls.utils import rowcol_to_cell, _render
from .dym_report import dym_report_other_receivable_print
from openerp.tools.translate import translate
import logging
_logger = logging.getLogger(__name__)
import string
_ir_translation_name = 'report.other.receivable'
class dym_report_other_receivable_print_xls(dym_report_other_receivable_print):
def __init__(self, cr, uid, name, context):
super(dym_report_other_receivable_print_xls, self).__init__(
cr, uid, name, context=context)
move_line_obj = self.pool.get('account.voucher')
self.context = context
wl_overview = move_line_obj._report_xls_other_receivable_fields(
cr, uid, context)
tmpl_upd_overview = move_line_obj._report_xls_arap_overview_template(
cr, uid, context)
wl_details = move_line_obj._report_xls_arap_details_fields(
cr, uid, context)
tmpl_upd_details = move_line_obj._report_xls_arap_overview_template(
cr, uid, context)
self.localcontext.update({
'datetime': datetime,
'wanted_list_overview': wl_overview,
'template_update_overview': tmpl_upd_overview,
'wanted_list_details': wl_details,
'template_update_details': tmpl_upd_details,
'_': self._,
})
def _(self, src):
lang = self.context.get('lang', 'en_US')
return translate(
self.cr, _ir_translation_name, 'report', lang, src) or src
class report_other_receivable_xls(report_xls):
def __init__(self, name, table, rml=False,
parser=False, header=True, store=False):
super(report_other_receivable_xls, self).__init__(
name, table, rml, parser, header, store)
# Cell Styles
_xs = self.xls_styles
# header
# Report Column Headers format
rh_cell_format = _xs['bold'] + _xs['fill'] + _xs['borders_all']
self.rh_cell_style = xlwt.easyxf(rh_cell_format)
self.rh_cell_style_center = xlwt.easyxf(
rh_cell_format + _xs['center'])
self.rh_cell_style_right = xlwt.easyxf(rh_cell_format + _xs['right'])
# Partner Column Headers format
fill_blue = 'pattern: pattern solid, fore_color 27;'
ph_cell_format = _xs['bold'] + _xs['fill'] + _xs['borders_all']
self.ph_cell_style = xlwt.easyxf(ph_cell_format)
self.ph_cell_style_decimal = xlwt.easyxf(
ph_cell_format + _xs['right'],
num_format_str=report_xls.decimal_format)
# Partner Column Data format
pd_cell_format = _xs['borders_all']
self.pd_cell_style = xlwt.easyxf(pd_cell_format)
self.pd_cell_style_center = xlwt.easyxf(
pd_cell_format + _xs['center'])
self.pd_cell_style_date = xlwt.easyxf(
pd_cell_format + _xs['left'],
num_format_str=report_xls.date_format)
self.pd_cell_style_decimal = xlwt.easyxf(
pd_cell_format + _xs['right'],
num_format_str=report_xls.decimal_format)
# totals
rt_cell_format = _xs['bold'] + _xs['fill'] + _xs['borders_all']
self.rt_cell_style = xlwt.easyxf(rt_cell_format)
self.rt_cell_style_right = xlwt.easyxf(rt_cell_format + _xs['right'])
self.rt_cell_style_decimal = xlwt.easyxf(
rt_cell_format + _xs['right'],
num_format_str=report_xls.decimal_format)
# XLS Template
self.col_specs_template_overview = {
'no': {
'header': [1, 5, 'text', _render("_('No')")],
'lines': [1, 0, 'number', _render("p['no']")],
'totals': [1, 5, 'text', None]},
'branch_status': {
'header': [1, 10, 'text', _render("_('Branch Status')"),None,self.rh_cell_style_center],
'lines': [1, 0, 'text', _render("p['branch_status'] or 'n/a'")],
'totals': [1, 0, 'text', None]},
'branch_id': {
'header': [1, 22, 'text', _render("_('Cabang')")],
'lines': [1, 0, 'text', _render("p['branch_id']")],
'totals': [1, 22, 'text', _render("_('Total')")]},
'number': {
'header': [1, 22, 'text', _render("_('Number')")],
'lines': [1, 0, 'text', _render("p['number']")],
'totals': [1, 22, 'text', None]},
'division': {
'header': [1, 22, 'text', _render("_('Divisi')")],
'lines': [1, 0, 'text', _render("p['division']")],
'totals': [1, 22, 'text', None]},
'partner_code': {
'header': [1, 22, 'text', _render("_('Customer')")],
'lines': [1, 0, 'text', _render("p['partner_code']")],
'totals': [1, 22, 'text', None]},
'partner_name': {
'header': [1, 22, 'text', _render("_('Nama Customer')")],
'lines': [1, 0, 'text', _render("p['partner_name']")],
'totals': [1, 22, 'text', None]},
'journal_name': {
'header': [1, 22, 'text', _render("_('Journal')")],
'lines': [1, 0, 'text', _render("p['journal_name']")],
'totals': [1, 22, 'text', None]},
'account_code': {
'header': [1, 22, 'text', _render("_('No COA')")],
'lines': [1, 0, 'text', _render("p['account_code']")],
'totals': [1, 22, 'text', None]},
'account_name': {
'header': [1, 22, 'text', _render("_('Nama COA')")],
'lines': [1, 0, 'text', _render("p['account_name']")],
'totals': [1, 22, 'text', None]},
'analytic_combination': {
'header': [1, 20, 'text', _render("_('Analytic Combination')"),None,self.rh_cell_style_center],
'lines': [1, 0, 'text', _render("p['analytic_combination'] or ''")],
'totals': [1, 0, 'text', None]},
'analytic_1': {
'header': [1, 20, 'text', _render("_('Analytic Company')"),None,self.rh_cell_style_center],
'lines': [1, 0, 'text', _render("p['analytic_1'] or ''")],
'totals': [1, 0, 'text', None]},
'analytic_2': {
'header': [1, 20, 'text', _render("_('Analytic Bisnis Unit')"),None,self.rh_cell_style_center],
'lines': [1, 0, 'text', _render("p['analytic_2'] or ''")],
'totals': [1, 0, 'text', None]},
'analytic_3': {
'header': [1, 20, 'text', _render("_('Analytic Branch')"),None,self.rh_cell_style_center],
'lines': [1, 0, 'text', _render("p['analytic_3'] or ''")],
'totals': [1, 0, 'text', None]},
'analytic_4': {
'header': [1, 20, 'text', _render("_('Analytic Cost Center')"),None,self.rh_cell_style_center],
'lines': [1, 0, 'text', _render("p['analytic_4'] or ''")],
'totals': [1, 0, 'text', None]},
'memo': {
'header': [1, 22, 'text', _render("_('Memo')")],
'lines': [1, 0, 'text', _render("p['memo']")],
'totals': [1, 0, 'text', None]},
'ref': {
'header': [1, 22, 'text', _render("_('Ref')")],
'lines': [1, 0, 'text', _render("p['ref']")],
'totals': [1, 22, 'text', None]},
'date': {
'header': [1, 22, 'text', _render("_('Tanggal')")],
'lines': [1, 0, 'text', _render("p['date']")],
'totals': [1, 22, 'text', None]},
'date_due': {
'header': [1, 22, 'text', _render("_('Tgl Jatuh Tempo')")],
'lines': [1, 0, 'text', _render("p['date_due']")],
'totals': [1, 22, 'text', None]},
'state': {
'header': [1, 22, 'text', _render("_('Status')")],
'lines': [1, 0, 'text', _render("p['state']")],
'totals': [1, 22, 'text', None]},
'total': {
'header': [1, 22, 'text', _render("_('Total')")],
'lines': [1, 0, 'number', _render("p['total']"), None, self.pd_cell_style_decimal],
'totals': [1, 22, 'number', _render("p['total']"), None, self.rt_cell_style_decimal]},
'dpp': {
'header': [1, 22, 'text', _render("_('DPP')")],
'lines': [1, 0, 'number', _render("p['dpp']"), None, self.pd_cell_style_decimal],
'totals': [1, 22, 'number', _render("p['dpp']"), None, self.rt_cell_style_decimal]},
'ppn': {
'header': [1, 22, 'text', _render("_('PPn')")],
'lines': [1, 0, 'number', _render("p['ppn']"), None, self.pd_cell_style_decimal],
'totals': [1, 22, 'number', _render("p['ppn']"), None, self.rt_cell_style_decimal]},
'pph': {
'header': [1, 22, 'text', _render("_('PPh')")],
'lines': [1, 0, 'number', _render("p['pph']"), None, self.pd_cell_style_decimal],
'totals': [1, 22, 'number', _render("p['pph']"), None, self.rt_cell_style_decimal]},
'piutang': {
'header': [1, 22, 'text', _render("_('Piutang')")],
'lines': [1, 0, 'number', _render("p['piutang']"), None, self.pd_cell_style_decimal],
'totals': [1, 22, 'number', _render("p['piutang']"), None, self.rt_cell_style_decimal]},
'residual': {
'header': [1, 22, 'text', _render("_('Residual')")],
'lines': [1, 0, 'number', _render("p['residual']"), None, self.pd_cell_style_decimal],
'totals': [1, 22, 'number', _render("p['residual']"), None, self.rt_cell_style_decimal]},
}
# XLS Template
self.col_specs_template_details = {
}
def generate_xls_report(self, _p, _xs, data, objects, wb):
wanted_list_overview = _p.wanted_list_overview
wanted_list_details = _p.wanted_list_details
self.col_specs_template_overview.update(_p.template_update_overview)
self.col_specs_template_details.update(_p.template_update_details)
_ = _p._
for r in _p.reports:
title_short = r['title_short'].replace('/', '-')
ws_o = wb.add_sheet(title_short)
for ws in [ws_o]:
ws.panes_frozen = True
ws.remove_splits = True
ws.portrait = 0 # Landscape
ws.fit_width_to_pages = 1
row_pos_o = 0
row_pos_d = 0
# set print header/footer
for ws in [ws_o]:
ws.header_str = self.xls_headers['standard']
ws.footer_str = self.xls_footers['standard']
# Title
## Company ##
cell_style = xlwt.easyxf(_xs['left'])
report_name = ' '.join(
[_p.company.name, r['title'],
_p.report_info])
c_specs_o = [
('report_name', 1, 0, 'text', report_name),
]
row_data = self.xls_row_template(c_specs_o, ['report_name'])
row_pos_o = self.xls_write_row(
ws_o, row_pos_o, row_data, row_style=cell_style)
## Text + Tgl ##
cell_style = xlwt.easyxf(_xs['xls_title'])
report_name = ' '.join(
[_('LAPORAN Other Receivable Per Tanggal'), _(str(datetime.today().date())),
_p.report_info])
c_specs_o = [
('report_name', 1, 0, 'text', report_name),
]
row_data = self.xls_row_template(c_specs_o, ['report_name'])
row_pos_o = self.xls_write_row(
ws_o, row_pos_o, row_data, row_style=cell_style)
## Tanggal Jtp Start Date & End Date ##
cell_style = xlwt.easyxf(_xs['left'])
report_name = ' '.join(
[_('Tanggal Jatuh Tempo'), _('-' if data['start_date'] == False else str(data['start_date'])), _('s/d'), _('-' if data['end_date'] == False else str(data['end_date'])),
_p.report_info])
c_specs_o = [
('report_name', 1, 0, 'text', report_name),
]
row_data = self.xls_row_template(c_specs_o, ['report_name'])
row_pos_o = self.xls_write_row(
ws_o, row_pos_o, row_data, row_style=cell_style)
## Tanggal Trx Start Date & End Date ##
cell_style = xlwt.easyxf(_xs['left'])
report_name = ' '.join(
[_('Tanggal Transaksi'), _('-' if data['trx_start_date'] == False else str(data['trx_start_date'])), _('s/d'), _('-' if data['trx_end_date'] == False else str(data['trx_end_date'])),
_p.report_info])
c_specs_o = [
('report_name', 1, 0, 'text', report_name),
]
row_data = self.xls_row_template(c_specs_o, ['report_name'])
row_pos_o = self.xls_write_row(
ws_o, row_pos_o, row_data, row_style=cell_style)
row_pos_o += 1
# Report Column Headers
c_specs_o = map(
lambda x: self.render(
x, self.col_specs_template_overview, 'header',
render_space={'_': _p._}),
wanted_list_overview)
row_data = self.xls_row_template(
c_specs_o, [x[0] for x in c_specs_o])
row_pos_o = self.xls_write_row(
ws_o, row_pos_o, row_data, row_style=self.rh_cell_style,
set_column_size=True)
ws_o.set_horz_split_pos(row_pos_o)
row_data_begin = row_pos_o
# Columns and Rows
no = 0
for p in r['id_ai']:
c_specs_o = map(
lambda x: self.render(
x, self.col_specs_template_overview, 'lines'),
wanted_list_overview)
for x in c_specs_o :
if x[0] == 'no' :
no += 1
x[4] = no
row_data = self.xls_row_template(
c_specs_o, [x[0] for x in c_specs_o])
row_pos_o = self.xls_write_row(
ws_o, row_pos_o, row_data, row_style=self.pd_cell_style)
row_data_end = row_pos_o
# Totals
ws_o.write(row_pos_o, 0, None, self.rt_cell_style_decimal)
ws_o.write(row_pos_o, 1, 'Totals', self.ph_cell_style)
ws_o.write(row_pos_o, 2, None, self.rt_cell_style_decimal)
ws_o.write(row_pos_o, 3, None, self.rt_cell_style_decimal)
ws_o.write(row_pos_o, 4, None, self.rt_cell_style_decimal)
ws_o.write(row_pos_o, 5, None, self.rt_cell_style_decimal)
ws_o.write(row_pos_o, 6, None, self.rt_cell_style_decimal)
ws_o.write(row_pos_o, 7, None, self.rt_cell_style_decimal)
ws_o.write(row_pos_o, 8, None, self.rt_cell_style_decimal)
ws_o.write(row_pos_o, 9, None, self.rt_cell_style_decimal)
ws_o.write(row_pos_o, 10, xlwt.Formula("SUM(K"+str(row_data_begin)+":K"+str(row_data_end)+")"), self.rt_cell_style_decimal)
ws_o.write(row_pos_o, 11, xlwt.Formula("SUM(L"+str(row_data_begin)+":L"+str(row_data_end)+")"), self.rt_cell_style_decimal)
ws_o.write(row_pos_o, 12, xlwt.Formula("SUM(M"+str(row_data_begin)+":M"+str(row_data_end)+")"), self.rt_cell_style_decimal)
ws_o.write(row_pos_o, 13, xlwt.Formula("SUM(N"+str(row_data_begin)+":N"+str(row_data_end)+")"), self.rt_cell_style_decimal)
ws_o.write(row_pos_o, 14, xlwt.Formula("SUM(O"+str(row_data_begin)+":O"+str(row_data_end)+")"), self.rt_cell_style_decimal)
ws_o.write(row_pos_o, 15, None, self.rt_cell_style_decimal)
ws_o.write(row_pos_o, 16, None, self.rt_cell_style_decimal)
ws_o.write(row_pos_o, 17, None, self.rt_cell_style_decimal)
ws_o.write(row_pos_o, 18, None, self.rt_cell_style_decimal)
ws_o.write(row_pos_o, 19, None, self.rt_cell_style_decimal)
ws_o.write(row_pos_o, 20, None, self.rt_cell_style_decimal)
ws_o.write(row_pos_o, 21, None, self.rt_cell_style_decimal)
ws_o.write(row_pos_o, 22, None, self.rt_cell_style_decimal)
# Footer
ws_o.write(row_pos_o + 1, 0, None)
ws_o.write(row_pos_o + 2, 0, _p.report_date + ' ' + str(self.pool.get('res.users').browse(self.cr, self.uid, self.uid).name))
report_other_receivable_xls('report.Laporan Other Receivable', 'account.voucher', parser = dym_report_other_receivable_print_xls)
| [
"[email protected]"
]
| |
f410057cae7ae8e1339c8dac17c74dc88a9d8708 | fb1e852da0a026fb59c8cb24aeb40e62005501f1 | /decoding/GAD/fairseq/data/audio/raw_audio_dataset.py | 1d92e4966bddce95d492eae411952a4a9ca2c9bd | [
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
]
| permissive | microsoft/unilm | 134aa44867c5ed36222220d3f4fd9616d02db573 | b60c741f746877293bb85eed6806736fc8fa0ffd | refs/heads/master | 2023-08-31T04:09:05.779071 | 2023-08-29T14:07:57 | 2023-08-29T14:07:57 | 198,350,484 | 15,313 | 2,192 | MIT | 2023-08-19T11:33:20 | 2019-07-23T04:15:28 | Python | UTF-8 | Python | false | false | 5,267 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
import numpy as np
import torch
import torch.nn.functional as F
from .. import FairseqDataset
logger = logging.getLogger(__name__)
class RawAudioDataset(FairseqDataset):
def __init__(
self,
sample_rate,
max_sample_size=None,
min_sample_size=0,
shuffle=True,
pad=False,
normalize=False,
):
super().__init__()
self.sample_rate = sample_rate
self.sizes = []
self.max_sample_size = (
max_sample_size if max_sample_size is not None else sys.maxsize
)
self.min_sample_size = min_sample_size
self.pad = pad
self.shuffle = shuffle
self.normalize = normalize
def __getitem__(self, index):
raise NotImplementedError()
def __len__(self):
return len(self.sizes)
def postprocess(self, feats, curr_sample_rate):
if feats.dim() == 2:
feats = feats.mean(-1)
if curr_sample_rate != self.sample_rate:
raise Exception(f"sample rate: {curr_sample_rate}, need {self.sample_rate}")
assert feats.dim() == 1, feats.dim()
if self.normalize:
with torch.no_grad():
feats = F.layer_norm(feats, feats.shape)
return feats
def crop_to_max_size(self, wav, target_size):
size = len(wav)
diff = size - target_size
if diff <= 0:
return wav
start = np.random.randint(0, diff + 1)
end = size - diff + start
return wav[start:end]
def collater(self, samples):
samples = [s for s in samples if s["source"] is not None]
if len(samples) == 0:
return {}
sources = [s["source"] for s in samples]
sizes = [len(s) for s in sources]
if self.pad:
target_size = min(max(sizes), self.max_sample_size)
else:
target_size = min(min(sizes), self.max_sample_size)
collated_sources = sources[0].new_zeros(len(sources), target_size)
padding_mask = (
torch.BoolTensor(collated_sources.shape).fill_(False) if self.pad else None
)
for i, (source, size) in enumerate(zip(sources, sizes)):
diff = size - target_size
if diff == 0:
collated_sources[i] = source
elif diff < 0:
assert self.pad
collated_sources[i] = torch.cat(
[source, source.new_full((-diff,), 0.0)]
)
padding_mask[i, diff:] = True
else:
collated_sources[i] = self.crop_to_max_size(source, target_size)
input = {"source": collated_sources}
if self.pad:
input["padding_mask"] = padding_mask
return {"id": torch.LongTensor([s["id"] for s in samples]), "net_input": input}
def num_tokens(self, index):
return self.size(index)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
if self.pad:
return self.sizes[index]
return min(self.sizes[index], self.max_sample_size)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)[::-1]
class FileAudioDataset(RawAudioDataset):
def __init__(
self,
manifest_path,
sample_rate,
max_sample_size=None,
min_sample_size=0,
shuffle=True,
pad=False,
normalize=False,
):
super().__init__(
sample_rate=sample_rate,
max_sample_size=max_sample_size,
min_sample_size=min_sample_size,
shuffle=shuffle,
pad=pad,
normalize=normalize,
)
self.fnames = []
self.line_inds = set()
skipped = 0
with open(manifest_path, "r") as f:
self.root_dir = f.readline().strip()
for i, line in enumerate(f):
items = line.strip().split("\t")
assert len(items) == 2, line
sz = int(items[1])
if min_sample_size is not None and sz < min_sample_size:
skipped += 1
continue
self.fnames.append(items[0])
self.line_inds.add(i)
self.sizes.append(sz)
logger.info(f"loaded {len(self.fnames)}, skipped {skipped} samples")
def __getitem__(self, index):
import soundfile as sf
fname = os.path.join(self.root_dir, self.fnames[index])
wav, curr_sample_rate = sf.read(fname)
feats = torch.from_numpy(wav).float()
feats = self.postprocess(feats, curr_sample_rate)
return {"id": index, "source": feats}
| [
"[email protected]"
]
| |
8a0079e3aa82f90784eb504cb7938500429a6149 | 1221b8856b9faddb5bcf7beebbfed119ead4704e | /doxygen/test/test_contents.py | 6b93c0b220761428511142961e174786410322ed | [
"MIT"
]
| permissive | mikezackles/m.css | 985582ac54b6a9d2af3ded372bf9270f000381e7 | 30bd87036d10b0590f0589dbbccbd283b710a711 | refs/heads/master | 2020-03-29T01:19:01.434881 | 2019-03-03T21:23:22 | 2019-03-03T21:23:22 | 149,382,787 | 0 | 0 | MIT | 2018-09-19T02:47:54 | 2018-09-19T02:47:54 | null | UTF-8 | Python | false | false | 20,832 | py | #
# This file is part of m.css.
#
# Copyright © 2017, 2018 Vladimír Vondruš <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import os
import pickle
import shutil
import subprocess
import unittest
from hashlib import sha1
from distutils.version import LooseVersion
from . import BaseTestCase, IntegrationTestCase, doxygen_version
class Typography(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'typography', *args, **kwargs)
def test(self):
self.run_dox2html5(wildcard='indexpage.xml')
self.assertEqual(*self.actual_expected_contents('index.html'))
class Blocks(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'blocks', *args, **kwargs)
def test(self):
self.run_dox2html5(wildcard='*.xml')
self.assertEqual(*self.actual_expected_contents('index.html'))
self.assertEqual(*self.actual_expected_contents('todo.html'))
# Multiple xrefitems should be merged into one
self.assertEqual(*self.actual_expected_contents('File_8h.html'))
self.assertEqual(*self.actual_expected_contents('old.html'))
class Code(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'code', *args, **kwargs)
def test(self):
self.run_dox2html5(wildcard='indexpage.xml')
self.assertEqual(*self.actual_expected_contents('index.html'))
def test_warnings(self):
self.run_dox2html5(wildcard='warnings.xml')
self.assertEqual(*self.actual_expected_contents('warnings.html'))
class CodeLanguage(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'code_language', *args, **kwargs)
@unittest.skipUnless(LooseVersion(doxygen_version()) > LooseVersion("1.8.13"),
"https://github.com/doxygen/doxygen/pull/621")
def test(self):
self.run_dox2html5(wildcard='indexpage.xml')
self.assertEqual(*self.actual_expected_contents('index.html'))
@unittest.skipUnless(LooseVersion(doxygen_version()) > LooseVersion("1.8.13"),
"https://github.com/doxygen/doxygen/pull/623")
def test_ansi(self):
self.run_dox2html5(wildcard='ansi.xml')
self.assertEqual(*self.actual_expected_contents('ansi.html'))
@unittest.skipUnless(LooseVersion(doxygen_version()) > LooseVersion("1.8.13"),
"https://github.com/doxygen/doxygen/pull/621")
def test_warnings(self):
self.run_dox2html5(wildcard='warnings.xml')
self.assertEqual(*self.actual_expected_contents('warnings.html'))
class Image(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'image', *args, **kwargs)
def test(self):
self.run_dox2html5(wildcard='indexpage.xml')
self.assertEqual(*self.actual_expected_contents('index.html'))
self.assertTrue(os.path.exists(os.path.join(self.path, 'html', 'tiny.png')))
def test_warnings(self):
self.run_dox2html5(wildcard='warnings.xml')
self.assertEqual(*self.actual_expected_contents('warnings.html'))
class Math(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'math', *args, **kwargs)
@unittest.skipUnless(shutil.which('latex'),
"Math rendering requires LaTeX installed")
def test(self):
self.run_dox2html5(wildcard='indexpage.xml')
self.assertEqual(*self.actual_expected_contents('index.html'))
@unittest.skipUnless(shutil.which('latex'),
"Math rendering requires LaTeX installed")
def test_latex_error(self):
with self.assertRaises(subprocess.CalledProcessError) as context:
self.run_dox2html5(wildcard='error.xml')
class MathCached(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'math_cached', *args, **kwargs)
# Actually generated from $ \frac{\tau}{2} $ tho
self.tau_half_hash = sha1("""$ \pi $""".encode('utf-8')).digest()
self.tau_half = """<?xml version='1.0' encoding='UTF-8'?>
<!-- This file was generated by dvisvgm 2.1.3 -->
<svg height='15.3267pt' version='1.1' viewBox='1.19551 -8.1387 4.67835 12.2613' width='5.84794pt' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'>
<defs>
<path d='M2.50262 -2.90909H3.92927C4.05679 -2.90909 4.14446 -2.90909 4.22416 -2.97285C4.3198 -3.06052 4.34371 -3.16413 4.34371 -3.21196C4.34371 -3.43512 4.14446 -3.43512 4.00897 -3.43512H1.60199C1.43462 -3.43512 1.13176 -3.43512 0.74122 -3.05255C0.454296 -2.76563 0.231133 -2.399 0.231133 -2.34321C0.231133 -2.27148 0.286924 -2.24757 0.350685 -2.24757C0.430386 -2.24757 0.446326 -2.27148 0.494147 -2.33524C0.884682 -2.90909 1.35492 -2.90909 1.53823 -2.90909H2.22366L1.53823 -0.70137C1.48244 -0.518057 1.37883 -0.191283 1.37883 -0.151432C1.37883 0.0318804 1.5462 0.0956413 1.64184 0.0956413C1.93674 0.0956413 1.98456 -0.183313 2.00847 -0.302864L2.50262 -2.90909Z' id='g0-28'/>
<path d='M2.24757 -1.6259C2.37509 -1.74545 2.70984 -2.00847 2.83736 -2.12005C3.33151 -2.57435 3.80174 -3.0127 3.80174 -3.73798C3.80174 -4.68643 3.00473 -5.30012 2.00847 -5.30012C1.05205 -5.30012 0.422416 -4.57484 0.422416 -3.8655C0.422416 -3.47497 0.73325 -3.41918 0.844832 -3.41918C1.0122 -3.41918 1.25928 -3.53873 1.25928 -3.84159C1.25928 -4.25604 0.860772 -4.25604 0.765131 -4.25604C0.996264 -4.83786 1.53026 -5.03711 1.9208 -5.03711C2.66202 -5.03711 3.04458 -4.40747 3.04458 -3.73798C3.04458 -2.90909 2.46276 -2.30336 1.52229 -1.33898L0.518057 -0.302864C0.422416 -0.215193 0.422416 -0.199253 0.422416 0H3.57061L3.80174 -1.42665H3.55467C3.53076 -1.26725 3.467 -0.868742 3.37136 -0.71731C3.32354 -0.653549 2.71781 -0.653549 2.59029 -0.653549H1.17161L2.24757 -1.6259Z' id='g1-50'/>
</defs>
<g id='page1'>
<use x='1.19551' xlink:href='#g0-28' y='-4.70713'/>
<rect height='0.478187' width='4.67835' x='1.19551' y='-3.22789'/>
<use x='1.4176' xlink:href='#g1-50' y='4.12263'/>
</g>
</svg>"""
# Actually generated from \[ a^3 + b^3 \neq c^3 \] tho
self.fermat_hash = sha1("""\[ a^2 + b^2 = c^2 \]""".encode('utf-8')).digest()
self.fermat = """<?xml version='1.0' encoding='UTF-8'?>
<!-- This file was generated by dvisvgm 2.1.3 -->
<svg height='15.4964pt' version='1.1' viewBox='164.011 -12.3971 60.0231 12.3971' width='75.0289pt' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'>
<defs>
<path d='M7.53176 -8.09365C7.6274 -8.26102 7.6274 -8.28493 7.6274 -8.3208C7.6274 -8.40448 7.55567 -8.5599 7.38829 -8.5599C7.24483 -8.5599 7.20897 -8.48817 7.12528 -8.3208L1.75741 2.11606C1.66177 2.28344 1.66177 2.30735 1.66177 2.34321C1.66177 2.43885 1.74545 2.58232 1.90087 2.58232C2.04433 2.58232 2.0802 2.51059 2.16389 2.34321L7.53176 -8.09365Z' id='g0-54'/>
<path d='M3.59851 -1.42267C3.53873 -1.21943 3.53873 -1.19552 3.37136 -0.968369C3.10834 -0.633624 2.58232 -0.119552 2.02042 -0.119552C1.53026 -0.119552 1.25529 -0.561893 1.25529 -1.26725C1.25529 -1.92478 1.6259 -3.26376 1.85305 -3.76588C2.25953 -4.60274 2.82142 -5.03313 3.28767 -5.03313C4.07671 -5.03313 4.23213 -4.0528 4.23213 -3.95716C4.23213 -3.94521 4.19626 -3.78979 4.18431 -3.76588L3.59851 -1.42267ZM4.36364 -4.48319C4.23213 -4.79402 3.90934 -5.27223 3.28767 -5.27223C1.93674 -5.27223 0.478207 -3.52677 0.478207 -1.75741C0.478207 -0.573848 1.17161 0.119552 1.98456 0.119552C2.64209 0.119552 3.20399 -0.394521 3.53873 -0.789041C3.65828 -0.0836862 4.22017 0.119552 4.57883 0.119552S5.22441 -0.0956413 5.4396 -0.526027C5.63088 -0.932503 5.79826 -1.66177 5.79826 -1.70959C5.79826 -1.76936 5.75044 -1.81719 5.6787 -1.81719C5.57111 -1.81719 5.55915 -1.75741 5.51133 -1.57808C5.332 -0.872727 5.10486 -0.119552 4.61469 -0.119552C4.268 -0.119552 4.24408 -0.430386 4.24408 -0.669489C4.24408 -0.944458 4.27995 -1.07597 4.38755 -1.54222C4.47123 -1.8411 4.53101 -2.10411 4.62665 -2.45081C5.06899 -4.24408 5.17659 -4.67447 5.17659 -4.7462C5.17659 -4.91357 5.04508 -5.04508 4.86575 -5.04508C4.48319 -5.04508 4.38755 -4.62665 4.36364 -4.48319Z' id='g1-97'/>
<path d='M2.76164 -7.99801C2.7736 -8.04583 2.79751 -8.11756 2.79751 -8.17733C2.79751 -8.29689 2.67796 -8.29689 2.65405 -8.29689C2.64209 -8.29689 2.21171 -8.26102 1.99651 -8.23711C1.79328 -8.22516 1.61395 -8.20125 1.39875 -8.18929C1.11183 -8.16538 1.02814 -8.15342 1.02814 -7.93823C1.02814 -7.81868 1.1477 -7.81868 1.26725 -7.81868C1.87696 -7.81868 1.87696 -7.71108 1.87696 -7.59153C1.87696 -7.50785 1.78132 -7.16115 1.7335 -6.94595L1.44658 -5.79826C1.32702 -5.32005 0.645579 -2.60623 0.597758 -2.39103C0.537983 -2.09215 0.537983 -1.88892 0.537983 -1.7335C0.537983 -0.514072 1.21943 0.119552 1.99651 0.119552C3.38331 0.119552 4.81793 -1.66177 4.81793 -3.39527C4.81793 -4.49514 4.19626 -5.27223 3.29963 -5.27223C2.67796 -5.27223 2.11606 -4.75816 1.88892 -4.51905L2.76164 -7.99801ZM2.00847 -0.119552C1.6259 -0.119552 1.20747 -0.406476 1.20747 -1.33898C1.20747 -1.7335 1.24334 -1.96065 1.45853 -2.79751C1.4944 -2.95293 1.68568 -3.71806 1.7335 -3.87347C1.75741 -3.96912 2.46276 -5.03313 3.27572 -5.03313C3.80174 -5.03313 4.04085 -4.5071 4.04085 -3.88543C4.04085 -3.31158 3.7061 -1.96065 3.40722 -1.33898C3.10834 -0.6934 2.55841 -0.119552 2.00847 -0.119552Z' id='g1-98'/>
<path d='M4.67447 -4.49514C4.44732 -4.49514 4.33973 -4.49514 4.17235 -4.35168C4.10062 -4.29191 3.96912 -4.11258 3.96912 -3.9213C3.96912 -3.68219 4.14844 -3.53873 4.37559 -3.53873C4.66252 -3.53873 4.98531 -3.77783 4.98531 -4.25604C4.98531 -4.82989 4.43537 -5.27223 3.61046 -5.27223C2.04433 -5.27223 0.478207 -3.56264 0.478207 -1.86501C0.478207 -0.824907 1.12379 0.119552 2.34321 0.119552C3.96912 0.119552 4.99726 -1.1477 4.99726 -1.30311C4.99726 -1.37484 4.92553 -1.43462 4.87771 -1.43462C4.84184 -1.43462 4.82989 -1.42267 4.72229 -1.31507C3.95716 -0.298879 2.82142 -0.119552 2.36712 -0.119552C1.54222 -0.119552 1.2792 -0.836862 1.2792 -1.43462C1.2792 -1.85305 1.48244 -3.0127 1.91283 -3.82565C2.22366 -4.38755 2.86924 -5.03313 3.62242 -5.03313C3.77783 -5.03313 4.43537 -5.00922 4.67447 -4.49514Z' id='g1-99'/>
<path d='M2.01644 -2.66202C2.64608 -2.66202 3.04458 -2.19975 3.04458 -1.36289C3.04458 -0.366625 2.4787 -0.071731 2.05629 -0.071731C1.61793 -0.071731 1.02017 -0.231133 0.74122 -0.653549C1.02814 -0.653549 1.2274 -0.836862 1.2274 -1.09988C1.2274 -1.35492 1.04408 -1.53823 0.789041 -1.53823C0.573848 -1.53823 0.350685 -1.40274 0.350685 -1.08394C0.350685 -0.326775 1.16364 0.167372 2.07223 0.167372C3.13225 0.167372 3.87347 -0.565878 3.87347 -1.36289C3.87347 -2.02441 3.34745 -2.63014 2.5345 -2.80548C3.16413 -3.02864 3.63437 -3.57061 3.63437 -4.20822S2.91706 -5.30012 2.08817 -5.30012C1.23537 -5.30012 0.589788 -4.83786 0.589788 -4.23213C0.589788 -3.93724 0.789041 -3.80971 0.996264 -3.80971C1.24334 -3.80971 1.40274 -3.98506 1.40274 -4.21619C1.40274 -4.51108 1.1477 -4.62267 0.972354 -4.63064C1.3071 -5.06899 1.9208 -5.0929 2.06426 -5.0929C2.27148 -5.0929 2.87721 -5.02914 2.87721 -4.20822C2.87721 -3.65031 2.64608 -3.31557 2.5345 -3.18804C2.29539 -2.94097 2.11208 -2.92503 1.6259 -2.89315C1.47447 -2.88518 1.41071 -2.87721 1.41071 -2.7736C1.41071 -2.66202 1.48244 -2.66202 1.61793 -2.66202H2.01644Z' id='g2-51'/>
<path d='M4.77011 -2.76164H8.06974C8.23711 -2.76164 8.4523 -2.76164 8.4523 -2.97684C8.4523 -3.20399 8.24907 -3.20399 8.06974 -3.20399H4.77011V-6.50361C4.77011 -6.67098 4.77011 -6.88618 4.55492 -6.88618C4.32777 -6.88618 4.32777 -6.68294 4.32777 -6.50361V-3.20399H1.02814C0.860772 -3.20399 0.645579 -3.20399 0.645579 -2.98879C0.645579 -2.76164 0.848817 -2.76164 1.02814 -2.76164H4.32777V0.537983C4.32777 0.705355 4.32777 0.920548 4.54296 0.920548C4.77011 0.920548 4.77011 0.71731 4.77011 0.537983V-2.76164Z' id='g3-43'/>
<path d='M8.06974 -3.87347C8.23711 -3.87347 8.4523 -3.87347 8.4523 -4.08867C8.4523 -4.31582 8.24907 -4.31582 8.06974 -4.31582H1.02814C0.860772 -4.31582 0.645579 -4.31582 0.645579 -4.10062C0.645579 -3.87347 0.848817 -3.87347 1.02814 -3.87347H8.06974ZM8.06974 -1.64981C8.23711 -1.64981 8.4523 -1.64981 8.4523 -1.86501C8.4523 -2.09215 8.24907 -2.09215 8.06974 -2.09215H1.02814C0.860772 -2.09215 0.645579 -2.09215 0.645579 -1.87696C0.645579 -1.64981 0.848817 -1.64981 1.02814 -1.64981H8.06974Z' id='g3-61'/>
</defs>
<g id='page1'>
<use x='164.011' xlink:href='#g1-97' y='-2.3246'/>
<use x='170.156' xlink:href='#g2-51' y='-7.26078'/>
<use x='177.545' xlink:href='#g3-43' y='-2.3246'/>
<use x='189.306' xlink:href='#g1-98' y='-2.3246'/>
<use x='194.283' xlink:href='#g2-51' y='-7.26078'/>
<use x='202.336' xlink:href='#g0-54' y='-2.3246'/>
<use x='202.336' xlink:href='#g3-61' y='-2.3246'/>
<use x='214.762' xlink:href='#g1-99' y='-2.3246'/>
<use x='219.8' xlink:href='#g2-51' y='-7.26078'/>
</g>
</svg>"""
# This is using the cache, so doesn't matter if LaTeX is found or not
def test(self):
math_cache = (0, 5, {
self.tau_half_hash: (5, 0.3448408333333333, self.tau_half),
self.fermat_hash: (5, 0.0, self.fermat),
b'does not exist': (5, 0.0, 'something')})
with open(os.path.join(self.path, 'xml/math.cache'), 'wb') as f:
pickle.dump(math_cache, f)
self.run_dox2html5(wildcard='math.xml')
self.assertEqual(*self.actual_expected_contents('math.html'))
# Expect that after the operation the global cache age is bumped,
# unused entries removed and used entries age bumped as well
with open(os.path.join(self.path, 'xml/math.cache'), 'rb') as f:
math_cache_actual = pickle.load(f)
math_cache_expected = (0, 6, {
self.tau_half_hash: (6, 0.3448408333333333, self.tau_half),
self.fermat_hash: (6, 0.0, self.fermat)})
self.assertEqual(math_cache_actual, math_cache_expected)
@unittest.skipUnless(shutil.which('latex'),
"Math rendering requires LaTeX installed")
def test_uncached(self):
# Write some bullshit there, which gets immediately reset
with open(os.path.join(self.path, 'xml/math.cache'), 'wb') as f:
pickle.dump((1337, 0, {"something different"}), f)
self.run_dox2html5(wildcard='math-uncached.xml')
with open(os.path.join(self.path, 'math.html')) as f:
expected_contents = f.read().strip()
# The file is the same expect for titles of the formulas. Replace them
# and then compare.
with open(os.path.join(self.path, 'html', 'math-uncached.html')) as f:
actual_contents = f.read().strip().replace('a^3 + b^3 \\neq c^3', 'a^2 + b^2 = c^2').replace('\\frac{\\tau}{2}', '\pi')
self.assertEqual(actual_contents, expected_contents)
# Expect that after the operation the global cache is filled
with open(os.path.join(self.path, 'xml/math.cache'), 'rb') as f:
math_cache_actual = pickle.load(f)
math_cache_expected = (0, 0, {
sha1("$ \\frac{\\tau}{2} $".encode('utf-8')).digest():
(0, 0.3448408333333333, self.tau_half),
sha1("\\[ a^3 + b^3 \\neq c^3 \\]".encode('utf-8')).digest():
(0, 0.0, self.fermat)})
self.assertEqual(math_cache_actual, math_cache_expected)
def test_noop(self):
if os.path.exists(os.path.join(self.path, 'xml/math.cache')):
shutil.rmtree(os.path.join(self.path, 'xml/math.cache'))
# Processing without any math
self.run_dox2html5(wildcard='indexpage.xml')
# There should be no file generated
self.assertFalse(os.path.exists(os.path.join(self.path, 'xml/math.cache')))
class Tagfile(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'tagfile', *args, **kwargs)
def test(self):
self.run_dox2html5(wildcard='indexpage.xml')
self.assertEqual(*self.actual_expected_contents('index.html'))
class Custom(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'custom', *args, **kwargs)
def test(self):
self.run_dox2html5(wildcard='indexpage.xml')
self.assertEqual(*self.actual_expected_contents('index.html'))
@unittest.skipUnless(shutil.which('latex'),
"Math rendering requires LaTeX installed")
def test_math(self):
self.run_dox2html5(wildcard='math.xml')
self.assertEqual(*self.actual_expected_contents('math.html'))
class ParseError(BaseTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'parse_error', *args, **kwargs)
def test(self):
self.run_dox2html5(wildcard='broken.xml')
# The index file should be generated, no abort
self.assertTrue(os.path.exists(os.path.join(self.path, 'html', 'index.html')))
class AutobriefCppComments(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'autobrief_cpp_comments', *args, **kwargs)
def test(self):
self.run_dox2html5(wildcard='File_8h.xml')
self.assertEqual(*self.actual_expected_contents('File_8h.html'))
# JAVADOC_AUTOBRIEF should be nuked from orbit. Or implemented from scratch,
# properly.
class AutobriefHr(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'autobrief_hr', *args, **kwargs)
def test(self):
self.run_dox2html5(wildcard='namespaceNamespace.xml')
self.assertEqual(*self.actual_expected_contents('namespaceNamespace.html'))
class AutobriefMultiline(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'autobrief_multiline', *args, **kwargs)
def test(self):
self.run_dox2html5(wildcard='namespaceNamespace.xml')
self.assertEqual(*self.actual_expected_contents('namespaceNamespace.html'))
class AutobriefHeading(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'autobrief_heading', *args, **kwargs)
def test(self):
self.run_dox2html5(wildcard='namespaceNamespace.xml')
self.assertEqual(*self.actual_expected_contents('namespaceNamespace.html'))
class SectionUnderscoreOne(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'section_underscore_one', *args, **kwargs)
def test(self):
self.run_dox2html5(wildcard='indexpage.xml')
self.assertEqual(*self.actual_expected_contents('index.html'))
class SectionsHeadings(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'sections_headings', *args, **kwargs)
def test(self):
self.run_dox2html5(wildcard='indexpage.xml')
self.assertEqual(*self.actual_expected_contents('index.html'))
def test_warnings(self):
self.run_dox2html5(wildcard='warnings.xml')
self.assertEqual(*self.actual_expected_contents('warnings.html'))
def test_functions(self):
self.run_dox2html5(wildcard='File_8h.xml')
self.assertEqual(*self.actual_expected_contents('File_8h.html'))
class AnchorInBothGroupAndNamespace(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'anchor_in_both_group_and_namespace', *args, **kwargs)
def test(self):
self.run_dox2html5(wildcard='namespaceFoo.xml')
self.assertEqual(*self.actual_expected_contents('namespaceFoo.html'))
class UnexpectedSections(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'unexpected_sections', *args, **kwargs)
def test(self):
self.run_dox2html5(wildcard='File_8h.xml')
self.assertEqual(*self.actual_expected_contents('File_8h.html'))
| [
"[email protected]"
]
| |
5bd1c4c635fe32c3791141e9bc42704f35a43e4b | 06ae8168b7067c8f77f06a48a22d158af1657651 | /models.py | 98bc2e3f49a110d63721b232a0145760a06b1461 | []
| no_license | Jiangjao/teaProject | 61e3cab41fab4b1aa8d2b1cfd6c6337c01196497 | 9f14d59d974bf82158a43d19c42b977b393857d2 | refs/heads/master | 2023-08-12T11:38:56.561815 | 2021-10-11T06:30:17 | 2021-10-11T06:30:17 | 347,795,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,027 | py | from django.db import models
class Chemistry(models.Model):
cid = models.IntegerField(blank=True, null=True)
structure = models.CharField(max_length=255, blank=True, null=True)
molecularformula = models.CharField(primary_key=True, max_length=255)
molecularweight = models.CharField(max_length=255, blank=True, null=True)
extra_word = models.TextField(blank=True, null=True)
cas = models.CharField(db_column='CAS', max_length=255, blank=True, null=True) # Field name made lowercase.
pubchem = models.CharField(db_column='PubChem', max_length=255, blank=True, null=True) # Field name made lowercase.
einecs = models.CharField(db_column='EINECS', max_length=255, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'chemistry'
class CodeImages(models.Model):
code_images = models.CharField(primary_key=True, max_length=255)
entryname = models.CharField(max_length=255, blank=True, null=True)
compoundformula = models.CharField(max_length=255, blank=True, null=True)
einecs = models.CharField(db_column='EINECS', max_length=255, blank=True, null=True) # Field name made lowercase.
cid = models.IntegerField(blank=True, null=True)
extraword = models.TextField(db_column='extraWord', blank=True, null=True) # Field name made lowercase.
chinese_name = models.CharField(db_column='Chinese_name', max_length=255, blank=True, null=True) # Field name made lowercase.
mocular_weight = models.FloatField(db_column='mocular weight', blank=True, null=True) # Field renamed to remove unsuitable characters.
cas = models.CharField(db_column='CAS', max_length=255, blank=True, null=True) # Field name made lowercase.
cid_id = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'code_images'
class Test(models.Model):
name = models.CharField(primary_key=True, max_length=255)
class Meta:
managed = False
db_table = 'test'
| [
"[email protected]"
]
| |
b246744037954e4d56a3d62e35f360a31c14f200 | 787022de03a2dd6998c1518673830395b389e3df | /migration/migrator/migrations/system/20190708143708_add_submitty_admin_json.py | bbd3ab8fcc84b2fbfb1be63377cd4a2f875ea629 | [
"BSD-3-Clause",
"MIT"
]
| permissive | Submitty/Submitty | e6b8731656291a025aa77f928eb067bc9a307540 | b223d9e952bcdb8664721a55593bc75e0e3c8c4f | refs/heads/main | 2023-08-31T23:56:11.291752 | 2023-08-31T19:12:18 | 2023-08-31T19:12:18 | 16,236,118 | 592 | 727 | BSD-3-Clause | 2023-09-13T05:36:08 | 2014-01-25T17:43:57 | PHP | UTF-8 | Python | false | false | 715 | py | """
Migration for the Submitty system.
adds submitty admin json
"""
from pathlib import Path
import shutil
import json
import os
def up(config):
submitty_admin_filename = str(Path(config.submitty['submitty_install_dir'], 'config', 'submitty_admin.json'))
if not os.path.isfile(submitty_admin_filename):
submitty_admin_json = {
'submitty_admin_username': '',
'submitty_admin_password': ''
}
with open(submitty_admin_filename,'w') as open_file:
json.dump(submitty_admin_json, open_file, indent=2)
shutil.chown(submitty_admin_filename, 'root', 'submitty_daemon')
os.chmod(submitty_admin_filename, 0o440)
def down(config):
pass
| [
"[email protected]"
]
| |
e11c58e1dcc7848596649d5524206b7ba632f80d | a732c1380c8dc829df5ba57b67456a9b603b0cf4 | /model.py | b9386587de7cded1f73f81a5ce43913598a42395 | []
| no_license | ilkaynazli/job-search-planner | 1266433ce6bb8c249c65dfcdb1d01e4a8d97095d | 6ac5f3c82de1c33d564eea627468e54c99daf968 | refs/heads/master | 2020-04-15T07:59:26.225728 | 2019-01-19T05:16:40 | 2019-01-19T05:16:40 | 164,510,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,025 | py | """
the database model of job search planner web app
"""
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
"""Import SQLAlchemy object from flask_sqlalchemy library and make the
connection to PostgreSQL"""
db = SQLAlchemy() #create an instance of SQLAlchemy object
class User(db.Model):
"""Users of the website"""
__tablename__ = 'users'
user_id = db.Column(db.Integer,
primary_key=True,
autoincrement=True,
)
username = db.Column(db.String(25), nullable=False, unique=True)
password = db.Column(db.String(150), nullable=False)
email = db.Column(db.String(50), nullable=False)
applications = db.relationship('Application')
def __repr__(self):
"""Human readable data"""
return f"<User id: {self.user_id}, \
username: {self.username},\
password: {self.password},\
email: {self.email}>"
class Company(db.Model):
"""Company names, etc for the jobs applied"""
__tablename__ = 'companies'
company_id = db.Column(db.Integer,
primary_key=True,
autoincrement=True,
)
company_name = db.Column(db.String(50), nullable=False)
company_website = db.Column(db.String(150), nullable=True)
applications = db.relationship('Application')
def __repr__(self):
"""Human readable data"""
return f"<Company id: {self.company_id}, \
name: {self.company_name},\
website: {self.company_website}>"
class Application(db.Model):
"""Applications to the companies"""
__tablename__ = 'applications'
application_id = db.Column(db.Integer,
primary_key=True,
autoincrement=True,
)
user_id = db.Column(db.Integer,
db.ForeignKey('users.user_id'),
nullable=False
)
company_id = db.Column(db.Integer,
db.ForeignKey('companies.company_id'),
nullable=False
)
date_applied = db.Column(db.DateTime, nullable=False, default=datetime.utcnow) #check this one later!!!!
position = db.Column(db.String(50), nullable=False)
resume = db.Column(db.String(50), nullable=True) #the location of the file
cover_letter = db.Column(db.String(50), nullable=True) #the location of the file
summary = db.Column(db.Text, nullable=True)
referer_id = db.Column(db.Integer,
db.ForeignKey('referers.referer_id'),
nullable=False
)
user = db.relationship('User')
company = db.relationship('Company')
referer = db.relationship('Referer')
def __repr__(self):
"""Human readable data"""
return f"<Application id: {self.application_id}, \
company id: {self.company_id},\
user id: {self.user_id}, \
date applied: {self.date_applied},\
position: {self.position},\
resume: {self.resume},\
cover letter: {self.cover_letter},\
summary: {self.summary},\
referer id: {self.referer_id}>"
class Referer(db.Model):
"""Contact in company applied"""
__tablename__ = 'referers'
referer_id = db.Column(db.Integer,
primary_key=True,
autoincrement=True,
)
referer_name = db.Column(db.String(75), nullable=False)
application = db.relationship('Application')
def __repr__(self):
"""Human readable data"""
return f"<Referer id: {self.referer_id}, \
referer name: {self.referer_name}>"
class Interview(db.Model):
"""Topics asked in interview"""
__tablename__ = 'interviews'
interview_id = db.Column(db.Integer,
primary_key=True,
autoincrement=True,
)
topic_id = db.Column(db.Integer,
db.ForeignKey('topics.topic_id'),
nullable=False
)
application_id = db.Column(db.Integer,
db.ForeignKey('applications.application_id'),
nullable=False
)
improvement = db.Column(db.Boolean, nullable=False)
application = db.relationship('Application')
topic = db.relationship('Topic')
def __repr__(self):
"""Human readable data"""
return f"<interview id: {self.interview_id},\
topic id: {self.topic_id},\
application id: {self.application_id},\
improvement: {self.improvement}>"
class Topic(db.Model):
"""Interview topics that could be asked"""
__tablename__ = 'topics'
topic_id = db.Column(db.Integer,
primary_key=True,
autoincrement=True,
)
topic = db.Column(db.String(150), nullable=False)
def __repr__(self):
"""Human readable data"""
return f"<Topic id: {self.topic_id},\
topic: {self.topic}>"
def connect_to_db(app, db_name):
"""Connect to database"""
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///' + db_name
app.config['SQLALCHEMY_ECHO'] = True #For debugging purposes keep this True
db.app = app
db.init_app(app)
db_name = 'jobs'
if __name__ == '__main__':
"""For running this interactively"""
from server import app
connect_to_db(app, db_name)
db.create_all()
# example_data()
print('Connected to database.') | [
"[email protected]"
]
| |
cad6221319320a99ab923752183f8b1cd72f2d3c | bfdab27f224d9cac02e319fe55b53172fbf8d1a2 | /motion_editor_core/data/atlas_old/positions/arm/vi_r_arm16.py | 5b8f8c2d722682feff71311b3a7e3510d0fcdcca | []
| no_license | tu-darmstadt-ros-pkg/motion_editor | c18294b4f035f737ff33d1dcbdfa87d4bb4e6f71 | 178a7564b18420748e1ca4413849a44965823655 | refs/heads/master | 2020-04-06T12:37:30.763325 | 2016-09-15T14:11:48 | 2016-09-15T14:11:48 | 35,028,245 | 2 | 3 | null | 2015-05-05T13:20:27 | 2015-05-04T10:18:22 | Python | UTF-8 | Python | false | false | 69 | py | { 'vi_r_arm16': [1.9635, -1.7167, 0.2953, -1.4274, -0.132, -0.8095]}
| [
"[email protected]"
]
| |
402e1a0cedc16d41ef05c7910420031fb8ab7d1d | 872f24199d847f05ddb4d8f7ac69eaed9336a0d5 | /gcwrap/python/scripts/plotcomp.py | 4f41b9a4beaac8ff3b4fc52744c0b37fdf46759a | []
| no_license | schiebel/casa | 8004f7d63ca037b4579af8a8bbfb4fa08e87ced4 | e2ced7349036d8fc13d0a65aad9a77b76bfe55d1 | refs/heads/master | 2016-09-05T16:20:59.022063 | 2015-08-26T18:46:26 | 2015-08-26T18:46:26 | 41,441,084 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,008 | py | from taskinit import casalog, metool, qa, smtool, tbtool
from simutil import simutil
import pylab as pl
import os
import shutil
import tempfile
import matplotlib
def plotcomp(compdict, showplot=True, wantdict=False, symb=',',
include0amp=False, include0bl=False, blunit='', bl0flux=0.0):
"""
Given a dict including
{'clist': component list,
'objname': objname,
'epoch': epoch,
'shape': component shape dict, including direction.
'freqs (GHz)': pl.array of frequencies,
'antennalist': An array configuration file as used by simdata,
'savedfig': False or, if specified, the filename to save the plot to,
'standard': setjy fluxstandard type},
and symb: One of matplotlib's codes for plot symbols: .:,o^v<>s+xDd234hH|_
default: ',': The smallest points I could find,
make a plot of visibility amplitude vs. baseline length for clist at epoch.
If antennalist is not found as is, it will look for antennalist in
os.getenv('CASAPATH').split(' ')[0] + '/data/alma/simmos/'.
showplot: Whether or not to show the plot on screen.
If wantdict is True, it returns a dictionary with the amplitudes and
baselines on success. Otherwise, it returns True or False as its estimated
success value.
include0amp: Force the lower limit of the amplitude axis to 0.
include0bl: Force the lower limit of the baseline length axis to 0.
blunit: unit of the baseline length (='' used the unit in the data or klambda)
bl0flux: Zero baseline flux
"""
def failval():
"""
Returns an appropriate failure value.
Note that mydict.update(plotcomp(wantdict=True, ...)) would give a
confusing error message if plotcomp returned False.
"""
retval = False
if wantdict:
retval = {}
return retval
retval = failval() # Default
try:
clist = compdict['clist']
objname = compdict['objname']
epoch = compdict['epoch']
epstr = mepoch_to_str(epoch)
antennalist = compdict['antennalist']
# Read the configuration info.
if not antennalist:
print "compdict['antennalist'] must be set!"
print "Try something in", os.getenv("CASAPATH").split(' ')[0] + "/data/alma/simmos/"
return failval()
# Try repodir if raw antennalist doesn't work.
if not os.path.exists(antennalist):
repodir = os.getenv("CASAPATH").split(' ')[0] + "/data/alma/simmos/"
antennalist = repodir + antennalist
su = simutil("")
stnx, stny, stnz, diam, padnames, nant, telescopename = su.readantenna(antennalist)
#print "telescopename:", telescopename
# Check that the source is up.
myme = metool()
posobs = myme.observatory(telescopename)
#print "posobs:", posobs
myme.doframe(epoch)
myme.doframe(posobs)
azel = myme.measure(compdict['shape']['direction'], 'azel')
azeldegs = tuple([qa.convert(azel[m], 'deg')['value'] for m in ('m0', 'm1')])
casalog.post("(az, el): (%.2f, %.2f) degrees" % azeldegs)
# riseset blabs to the logger, so introduce it now.
casalog.post('Rise and set times of ' + objname + " from " + telescopename + ':')
approx = ''
if 'JPL' in compdict.get('standard', 'JPL'):
# The object is in the Solar System or not known to be extragalactic.
approx = "APPROXIMATE. The times do not account for the apparent motion of "\
+ objname + "."
casalog.post(" (" + approx + ")")
riset = myme.riseset(compdict['shape']['direction'])
msg = ''
if riset['rise'] == 'above':
msg = objname + " is circumpolar"
elif riset['rise'] == 'below':
msg = objname + ' is not visible from ' + telescopename
if msg:
if approx:
msg += ' around ' + mepoch_to_str(epoch)
casalog.post(msg)
else:
for t in riset:
riset[t]['str'] = mepoch_to_str(riset[t]['utc'])
casalog.post(objname + " rises at %s and sets at %s." % (riset['rise']['str'],
riset['set']['str']))
tmeridian=(riset['rise']['utc']['m0']['value']+riset['set']['utc']['m0']['value'])/2.
casalog.post(objname + ': meridian passage at ' + qa.time(str(tmeridian)+'d')[0])
if approx:
riset['NOTE'] = approx
if not azel['m1']['value'] > 0.0:
casalog.post(objname + " is not visible from " + telescopename + " at " + epstr,
'SEVERE')
if wantdict:
return riset
else:
return False
# Start a temp MS.
workingdir = os.path.abspath(os.path.dirname(clist.rstrip('/')))
tempms = tempfile.mkdtemp(prefix=objname, dir=workingdir)
mysm = smtool()
mysm.open(tempms)
su.setcfg(mysm, telescopename, stnx, stny, stnz, diam,
padnames, posobs)
#print "cfg set"
# Only 1 polarization is wanted for now.
stokes, feeds = su.polsettings(telescopename, 'RR')
casalog.post("stokes, feeds: %s, %s" % (stokes, feeds))
fband = su.bandname(compdict['freqs (GHz)'][0])
chaninc = 1.0
nchan = len(compdict['freqs (GHz)'])
if nchan > 1:
chaninc = (compdict['freqs (GHz)'][-1] - compdict['freqs (GHz)'][0]) / (nchan - 1)
mysm.setspwindow(spwname=fband,
freq=str(compdict['freqs (GHz)'][0]) + 'GHz',
deltafreq=str(chaninc) + 'GHz',
freqresolution='1Hz',
nchannels=nchan, refcode="LSRK",
stokes=stokes)
mysm.setfeed(mode=feeds, pol=[''])
mysm.setlimits(shadowlimit=0.01, elevationlimit='10deg')
mysm.setauto(0.0)
mysm.setfield(sourcename=objname,
sourcedirection=compdict['shape']['direction'],
calcode="OBJ", distance='0m')
mysm.settimes(integrationtime="1s", usehourangle=False,
referencetime=epoch)
# this only creates blank uv entries
mysm.observe(sourcename=objname, spwname=fband,
starttime="-0.5s", stoptime="0.5s", project=objname)
mysm.setdata(fieldid=[0])
mysm.setvp()
casalog.post("done setting up simulation parameters")
mysm.predict(complist=clist) # do actual calculation of visibilities:
mysm.close()
casalog.post("Simulation finished.")
mytb = tbtool()
mytb.open(tempms)
data = mytb.getcol('DATA')[0] # Again, only 1 polarization for now.
data = abs(data)
baselines = mytb.getcol('UVW')[:2,:] # Drop w.
datablunit = mytb.getcolkeywords('UVW')['QuantumUnits']
mytb.close()
#print "Got the data and baselines"
shutil.rmtree(tempms)
if datablunit[1] != datablunit[0]:
casalog.post('The baseline units are mismatched!: %s' % datablunit,
'SEVERE')
return failval()
datablunit = datablunit[0]
# uv dist unit in klambda or m
if datablunit == 'm' and blunit=='klambda':
kl = qa.constants('C')['value']/(compdict['freqs (GHz)'][0]*1e6)
blunit = 'k$\lambda$'
else:
blunit = datablunit
kl = 1.0
pl.ioff()
#baselines = pl.hypot(baselines[0]/kl, baselines[1]/kl)
baselines = pl.hypot(baselines[0], baselines[1])
#if not showplot:
# casalog.post('Sorry, not showing the plot is not yet implemented',
# 'WARN')
if showplot:
pl.ion()
pl.clf()
pl.ioff()
nfreqs = len(compdict['freqs (GHz)'])
for freqnum in xrange(nfreqs):
freq = compdict['freqs (GHz)'][freqnum]
casalog.post("Plotting " + str(freq) + " GHz.")
pl.plot(baselines/kl, data[freqnum], symb, label="%.3g GHz" % freq)
#pl.plot(baselines, data[freqnum], symb, label="%.3g GHz" % freq)
pl.xlabel("Baseline length (" + blunit + ")")
pl.ylabel("Visibility amplitude (Jy)")
if include0amp:
pl.ylim(ymin=0.0)
if include0bl:
pl.xlim(xmin=0.0)
pl.suptitle(objname + " (predicted by %s)" % compdict['standard'], fontsize=14)
#pl.suptitle(objname + " (predicted)", fontsize=14)
# Unlike compdict['antennalist'], antennalist might have had repodir
# prefixed to it.
pl.title('at ' + epstr + ' for ' + os.path.basename(compdict['antennalist']), fontsize=10)
titletxt='($%.0f^\circ$ az, $%.0f^\circ$ el)' % azeldegs
# for comparison of old and new models - omit azeldegs as all in az~0
if bl0flux > 0.0:
if len(compdict['freqs (GHz)']) == 1:
titletxt+='\n bl0 flux:%.3f Jy' % bl0flux
else:
titletxt+='\n bl0 flux:%.3f Jy @ %s GHz' % (bl0flux, compdict['freqs (GHz)'][0])
pl.legend(loc='best', title=titletxt)
#pl.legend(loc='best', title='($%.0f^\circ$ az, $%.0f^\circ$ el)' % azeldegs)
y_formatter=matplotlib.ticker.ScalarFormatter(useOffset=False)
pl.axes().yaxis.set_major_formatter(y_formatter)
if showplot:
pl.ion()
pl.draw()
if compdict.get('savedfig'):
pl.savefig(compdict.get('savedfig'))
casalog.post("Saved plot to " + str(compdict.get('savedfig')))
if wantdict:
retval = {'amps': data,
'antennalist': antennalist, # Absolute path, now.
'azel': azel,
'baselines': baselines,
'blunit': blunit,
'riseset': riset,
'savedfig': compdict.get('savedfig')}
else:
retval = True
except Exception, instance:
casalog.post(str(instance), 'SEVERE')
if os.path.isdir(tempms):
shutil.rmtree(tempms)
return retval
def mepoch_to_str(mepoch, showdate=True, showtime=True, showmjd=True):
"""
Given an epoch as a measure, return it as a nicely formatted string.
"""
tdic = qa.splitdate(mepoch['m0'])
fmt = ""
if showdate:
fmt += '%(year)d-%(month)02d-%(monthday)02d'
if showtime:
if fmt:
fmt += '/'
fmt += '%(hour)02d:%(min)02d:%(sec)02d %(tz)s'
tdic['tz'] = mepoch['refer']
if showmjd:
islast = False
if fmt:
fmt += ' ('
islast = True
fmt += 'MJD %(mjd).2f'
if islast:
fmt += ')'
return fmt % tdic
| [
"[email protected]"
]
| |
ba9793454b72cf6087c048cea652467469da0dc2 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_vagrant.py | 25425a414477258cae648761022543eca0a49624 | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py |
#calss header
class _VAGRANT():
def __init__(self,):
self.name = "VAGRANT"
self.definitions = [u'a person who is poor and does not have a home or job: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
]
| |
d8725cc5ae912b35641e0abbed0cff0095ee939b | a1119965e2e3bdc40126fd92f4b4b8ee7016dfca | /trunk/node_monitor/node_monitor_log_parser.py | babf691c9d4f4de5f349def2f23d6aa61954ef67 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
]
| permissive | SeattleTestbed/attic | 0e33211ddf39efdbcf5573d4fc7fa5201aa7310d | f618a962ce2fd3c4838564e8c62c10924f5df45f | refs/heads/master | 2021-06-10T23:10:47.792847 | 2017-05-15T12:05:43 | 2017-05-15T12:05:43 | 20,154,061 | 0 | 1 | null | 2014-10-16T17:21:06 | 2014-05-25T12:34:00 | Python | UTF-8 | Python | false | false | 22,218 | py | """
<Program Name>
node_monitor_log_parser.py
<Started>
April 14, 2011
<Author>
Steven Portzer
<Purpose>
Loads and processes logs created by node_monitor.py.
<Notes>
The methods in this library use lists and dictionaries containing the
following two data structures:
event: a dict which correspond to one line in the node_events.log log
created by node_monitor.py, has following fields:
'time': a float value, the time the event occurred
'location': the location of the node
'event_type': "advertising", "not_advertising", "contactable", or
"not_contactable" depending on the sort of event it is
'name': either the public key name for advertising events or the md5 hash
of the nodeid for contactability events
nodedict: corresponds to an entry in node_dicts.log, contains the following:
'nodeid': the md5 hash of the node's id
'location': the location of the node
'version': the version of seattle running on the node
'time': when the node information was received
'latency': the time taken to retrieve the information
'vesseldicts': a list of vessel dictionaries containing 'vesselname',
'status', 'ownerkey', and 'userkeys' keys. The 'ownerkey' and 'userkeys'
are hashed using md5 to conserve space and improve human readability.
An important note is that lists of both events and nodedicts will typically be
in nondecreasing order by time since the logs are generated in this format and
all the functions in this library preserve ordering.
"""
def load_nodedicts_from_file(filename):
"""
<Purpose>
Returns a list of the nodedicts stored in the log file with the given name.
<Arguments>
filename:
A string giving the name of the file to load nodedicts from. This should
reference a "node_dicts.log" file created by node_monitor.py.
<Exceptions>
An exception will be raised if the filename is invalid or the log is
improperly formated.
<Side Effects>
None.
<Returns>
A list of nodedicts.
"""
logfile = open(filename, 'r')
logstring = logfile.read()
logfile.close()
return load_nodedicts_from_string(logstring)
def load_nodedicts_from_string(logstring):
"""
<Purpose>
Returns a list of the nodedicts encoded in the given string.
<Arguments>
logstring:
A string containing the nodedicts to load. This should be the contents of
a "node_dicts.log" file created by node_monitor.py.
<Exceptions>
An exception will be raised if the log is improperly formated.
<Side Effects>
None.
<Returns>
A list of nodedicts.
"""
nodedictlist = []
for line in logstring.split('\n'):
if line:
linedata = eval(line)
nodedictlist.append(linedata)
return nodedictlist
def load_events_from_file(filename):
"""
<Purpose>
Returns a list of the events stored in the log file with the given name.
<Arguments>
filename:
A string giving the name of the file to load events from. This should
reference a "node_events.log" file created by node_monitor.py.
<Exceptions>
An exception will be raised if the filename is invalid or the log is
improperly formated.
<Side Effects>
None.
<Returns>
A list of events.
"""
logfile = open(filename, 'r')
logstring = logfile.read()
logfile.close()
return load_events_from_string(logstring)
def load_events_from_string(logstring):
"""
<Purpose>
Returns a list of the events encoded in the given string.
<Arguments>
logstring:
A string containing the events to load. This should be the contents of
a "node_events.log" file created by node_monitor.py.
<Exceptions>
An exception will be raised if the log is improperly formated.
<Side Effects>
None.
<Returns>
A list of events.
"""
eventlist = []
for line in logstring.split('\n'):
if line:
linedata = line.split(" ")
if len(linedata) == 4:
eventdict = {'time':float(linedata[0]), 'location':linedata[1], \
'event_type':linedata[2], 'name':linedata[3]}
eventlist.append(eventdict)
else:
raise Exception("improperly formated line: " + line)
return eventlist
def close_events(eventlist):
"""
<Purpose>
Appends additional events to the end of a copy of a list of events,
guaranteeing that every advertising event will have a corresponding
not_advertising event and every contactable event will have a corresponding
not_contactable event. This guarantee is useful for some types of log
parsing, but the property will fail to hold for nodes which are still
advertising/contactable when the log terminates.
<Arguments>
eventlist:
A list of events. Due to the way events are matched, you should not pass
in lists that have been filtered by the 'time' field.
<Exceptions>
An exception will be raised if the log contains usual orderings or subsets
of events not seen in complete node_events.log logs. If this function fails
for a newly loaded list of events, it's possibly a bug with node_monitor.py.
<Side Effects>
None.
<Returns>
A list of events containing all the events in eventlist in addition to some
"not_advertising" and "not_contactable" events appended to the end. The time
given to the additional events will be the time of the last log entry.
"""
neweventlist = []
# This is a set of identifying information for all locations advertising at a
# given point in the log.
advertising = set()
# This is a set of identifying information for all node contactable at a given
# point in the log.
contactable = set()
for event in eventlist:
# This information should be unique in the sense that only one location/node
# with this identifier can be advertising/contactable at any given time.
eventinfo = (event['location'], event['name'])
if event['event_type'] == "advertising":
if eventinfo in advertising:
raise Exception("Pair " + str(eventinfo) + " already advertising!")
advertising.add(eventinfo)
elif event['event_type'] == "not_advertising":
advertising.remove(eventinfo)
elif event['event_type'] == "contactable":
if eventinfo in contactable:
raise Exception("Pair " + str(eventinfo) + " already contactable!")
contactable.add(eventinfo)
elif event['event_type'] == "not_contactable":
contactable.remove(eventinfo)
else:
raise Exception("Invalid event type: " + str(event['event_type']))
neweventlist.append(event)
endtime = get_last_value(eventlist, 'time')
# Add a stopped advertising event to the end of the log for any location still
# advertising.
for location, name in advertising:
eventdict = {'time':endtime, 'location':location, \
'event_type':"not_advertising", 'name':name,}
neweventlist.append(eventdict)
# Add a not contactable event to the end of the log for any node still
# contactable.
for location, name in contactable:
eventdict = {'time':endtime, 'location':location, \
'event_type':"not_contactable", 'name':name,}
neweventlist.append(eventdict)
return neweventlist
def filter_by_value(inputlist, keystring, valuelist):
"""
<Purpose>
Filter out certain entries from a list of events or nodedicts and return
only those with one of a list of given value for a specific field.
<Arguments>
inputlist:
A list of dicts. Probably a list of events or nodedicts.
keystring:
A key contained in all the dicts in the list. See the definitions for
events and nodedicts at the top of the program for likely values.
valuelist:
A list of values the given key can map to. All other values will be
filtered out from the returned list.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
A list of dicts containing exactly those entires in inputlist for which
entry[keystring] is in valuelist.
"""
resultlist = []
for entry in inputlist:
if entry[keystring] in valuelist:
resultlist.append(entry)
return resultlist
def filter_by_range(inputlist, keystring, start=None, end=None):
"""
<Purpose>
Filter out certain entries from a list of events or nodedicts and return
only those with a field within a certain range. This is mostly just useful
for time measurements.
<Arguments>
inputlist:
A list of dicts. Probably a list of events or nodedicts.
keystring:
A key contained in all the dicts in the list. See the definitions for
events and nodedicts at the top of the program for likely values.
start:
If specified, all entries where keystring maps to a value less than start
will be filtered out of the returned list.
end:
If specified, all entries where keystring maps to a value greater than
end will be filtered out of the returned list.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
A list of dicts containing exactly those entires in inputlist for which
entry[keystring] within the range defined by start and end.
"""
resultlist = []
for entry in inputlist:
if (start is None or entry[keystring] >= start) and \
(end is None or entry[keystring] <= end):
resultlist.append(entry)
return resultlist
def group_by_value(inputlist, keystring):
"""
<Purpose>
Aggregate a list of events or nodedicts by the value of a specific field.
<Arguments>
inputlist:
A list of dicts. Probably a list of events or nodedicts.
keystring:
A key contained in all the dicts in the list. See the definitions for
events and nodedicts at the top of the program for likely values.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
A dict of lists of dicts. Maps values of the keystring field for entries in
inputlist to lists of dicts in inputlist that map keystring to that value.
"""
resultdict = {}
for entry in inputlist:
if entry[keystring] in resultdict:
resultdict[entry[keystring]].append(entry)
else:
resultdict[entry[keystring]] = [entry]
return resultdict
def group_by_node(eventlist):
"""
<Purpose>
Aggregate a list of events by node. Each list in the returned dict will
contain both contactability and advertising events for the given node.
Doing this is kind of messy due to special cases, so this function is
provided for calculating a reasonable partition of events.
<Arguments>
eventlist:
A list of events. Due to the way events are grouped, you should not pass
in lists that have been filtered by the 'time' field.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
A tuple (nodeeventlistdict, uncontactableeventlist). Nodeeventlistdict is a
dict of lists of events that maps md5 hashes of nodeids to lists of events
for that node. Uncontactableeventlist is a list of advertising events for
all the locations that were never contactable, and can probably be discarded
unless you are specifically interested in those sorts of events.
"""
uncontactableeventlist = []
nodeeventlistdict = {}
advertisingdict = {}
# Initialize the list of node events with an empty list for each node.
for event in eventlist:
if event['event_type'] == "contactable":
nodeeventlistdict[event['name']] = []
# Get a list of 'non_contactable' events grouped by node to aid in determining
# which node each advertising event corresponds to.
notcontactabledict = group_by_value(
filter_by_value(eventlist, 'event_type', ['not_contactable']),
'location')
for event in eventlist:
# Contactability events are trivial to group by node.
if event['event_type'] in ["contactable", "not_contactable"]:
nodeeventlistdict[event['name']].append(event)
# For newly advertising locations, use the current node (or next node if
# there is no current one) to be contactable at that location. If only one
# node uses this location this gives the right answer, and if more than one
# node uses the location this is a very reasonable guess.
elif event['event_type'] == "advertising":
if event['location'] in notcontactabledict:
notcontactablelist = notcontactabledict[event['location']]
else:
notcontactablelist = []
while len(notcontactablelist) > 1 and \
notcontactablelist[0]['time'] < event['time']:
notcontactablelist = notcontactablelist[1:]
if notcontactablelist:
nodeid = notcontactablelist[0]['name']
else:
nodeid = None
if nodeid is None:
uncontactableeventlist.append(event)
else:
nodeeventlistdict[nodeid].append(event)
eventinfo = (event['location'], event['name'])
advertisingdict[eventinfo] = nodeid
# For locations stopping advertising, we want to retrieve where we put the
# matching starting advertising event and attach this event to the same node.
elif event['event_type'] == "not_advertising":
eventinfo = (event['location'], event['name'])
nodeid = advertisingdict.pop(eventinfo)
if nodeid is None:
uncontactableeventlist.append(event)
else:
nodeeventlistdict[nodeid].append(event)
else:
raise Exception("Invalid event type: " + str(event['event_type']))
return (nodeeventlistdict, uncontactableeventlist)
def get_advertising(eventlist, timestamp):
"""
<Purpose>
Returns all the locations advertising at a specified time.
<Arguments>
eventlist:
A list of events. Due to the way events are matched, you should not pass
in lists that have been filtered by the 'time' field.
timestamp:
The time for which to get the list of advertising locations.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
A list of 'location' fields for the advertising locations.
"""
advertisingset = set()
for event in eventlist:
if event['time'] > timestamp:
break
eventinfo = (event['location'], event['name'])
if event['event_type'] == "advertising":
advertisingset.add(eventinfo)
elif event['event_type'] == "not_advertising":
advertisingset.remove(eventinfo)
resultset = set()
for eventinfo in advertisingset:
resultset.add(eventinfo[0])
return list(resultset)
def get_contactable(eventlist, timestamp):
"""
<Purpose>
Returns all the nodes contactable at a specified time.
<Arguments>
eventlist:
A list of events. Due to the way events are matched, you should not pass
in lists that have been filtered by the 'time' field.
timestamp:
The time for which to get the list of contactable locations.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
A list of nodeid hashes for the contactable nodes.
"""
contactableset = set()
for event in eventlist:
if event['time'] > timestamp:
break
eventinfo = (event['location'], event['name'])
if event['event_type'] == "contactable":
contactableset.add(eventinfo)
elif event['event_type'] == "not_contactable":
contactableset.remove(eventinfo)
resultset = set()
for eventinfo in contactableset:
resultset.add(eventinfo[1])
return list(resultset)
def get_stat_dict(inputdict, statfunc, *args):
"""
<Purpose>
Calculates some statistic for all the values in inputdict.
<Arguments>
inputdict:
A dictionary. To use any of the statistic functions in this module, this
parameter would be dict with lists of events for the values.
statfunc:
A function which takes for it's first argument values of the same type as
The values contained in inputdict.
*args:
Any additional arguments will be passed to statfunc after the value for
inputdict.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
A dict mapping from keys in inputdict to the value of
statfunc(inputdict[key], *args) for the given key.
"""
resultdict = {}
for entry in inputdict:
resultdict[entry] = statfunc(inputdict[entry], *args)
return resultdict
def filter_by_stat(statdict, valuelist):
"""
<Purpose>
Takes the result of get_stat_dict and filters by the calculated statistic.
<Arguments>
statdict:
A dictionary. Probably one returned by get_stat_dict.
valuelist:
A list of values to allow. All other values are filtered out.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
A list of dicts containing exactly those key value pairs in statdict for
which value is in valuelist.
"""
resultdict = {}
for node in statdict:
if statdict[node] in valuelist:
resultdict[node] = statdict[node]
return resultdict
def filer_by_stat_range(statdict, start=None, end=None):
"""
<Purpose>
Takes the result of get_stat_dict and filters by the calculated statistic.
Instead of finding specific values, this accepts values within a given range.
<Arguments>
statdict:
A dictionary. Probably one returned by get_stat_dict.
start:
If specified, all entries where the value is less than start will be
filtered out of the returned dict.
end:
If specified, all entries where the value is greater than end will be
filtered out of the returned dict.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
A list of dicts containing exactly those key value pairs in statdict for
which value is in the specified range.
"""
resultdict = {}
for node in statdict:
if (start is None or statdict[node] >= start) and \
(end is None or statdict[node] <= end):
resultdict[node] = statdict[node]
return resultdict
def group_by_stat(statdict):
"""
<Purpose>
Takes the result of get_stat_dict and aggregates like values together.
<Arguments>
statdict:
A dictionary. Probably one returned by get_stat_dict.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
A dict mapping from values in statdict to lists of keys that map to that
value.
"""
resultdict = {}
for node in statdict:
if statdict[node] in resultdict:
resultdict[statdict[node]].append(node)
else:
resultdict[statdict[node]] = [node]
return resultdict
def sort_by_stat(statdict):
"""
<Purpose>
Takes the result of get_stat_dict and sorts the keys by their value.
<Arguments>
statdict:
A dictionary. Probably one returned by get_stat_dict.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
A list which sorts keys in statdict according to the natural ordering of
their corresponding values.
"""
def compare(x, y):
if statdict[x] > statdict[y]:
return 1
elif statdict[x] < statdict[y]:
return -1
else:
return 0
nodelist = statdict.keys()
nodelist.sort(compare)
return nodelist
# The remaining functions may be useful on their own, but are intended to be
# passed into get_stat_dict as the statfunc parameter.
# Returns the most recent value of the keystring field for the given list of
# dicts. Inputlist is probably a list of events or nodedicts in this case.
def get_last_value(inputlist, keystring):
if inputlist:
return inputlist[-1][keystring]
else:
return None
# Returns the most initial value of the keystring field for the given list of
# dicts. Inputlist is probably a list of events or nodedicts in this case.
def get_first_value(inputlist, keystring):
if inputlist:
return inputlist[0][keystring]
else:
return None
# Returns the total time spent advertising by the nodes in eventlist over the
# period of time ending with endtime. If endtime is unspecified, it will be
# inferred to be the time of the last entry in eventlist.
def get_total_time_advertising(eventlist, endtime=None):
if endtime is None:
endtime = get_last_value(eventlist, 'time')
advertisinglist = filter_by_value(eventlist, "event_type", ["advertising"])
notadvertisinglist = filter_by_value(eventlist, "event_type", ["not_advertising"])
total = 0
for event in notadvertisinglist:
total += event['time']
for event in advertisinglist:
total -= event['time']
total += endtime*(len(advertisinglist) - len(notadvertisinglist))
return total
# Returns the total number of times some nodes in eventlist started advertising.
def get_number_times_advertising(eventlist):
return len(filter_by_value(eventlist, "event_type", ["advertising"]))
# Returns the average time spent between starting and stopping advertising by
# the nodes in eventlist over the period of time ending with endtime.
def get_average_time_advertising(eventlist, endtime=None):
return get_total_time_advertising(eventlist, endtime) / \
get_number_times_advertising(eventlist)
# Returns the total time spent contactable by the nodes in eventlist over the
# period of time ending with endtime. If endtime is unspecified, it will be
# inferred to be the time of the last entry in eventlist.
def get_total_time_contactable(eventlist, endtime=None):
if endtime is None:
endtime = get_last_value(eventlist, 'time')
contactablelist = filter_by_value(eventlist, "event_type", ["contactable"])
notcontactablelist = filter_by_value(eventlist, "event_type", ["not_contactable"])
total = 0
for event in notcontactablelist:
total += event['time']
for event in contactablelist:
total -= event['time']
total += endtime*(len(contactablelist) - len(notcontactablelist))
return total
# Returns the number of times some nodes in eventlist started being contactable.
def get_number_times_contactable(eventlist):
return len(filter_by_value(eventlist, "event_type", ["contactable"]))
# Returns the average time spent between starting and stopping being contactable
# by the nodes in eventlist over the period of time ending with endtime.
def get_average_time_contactable(eventlist, endtime=None):
return get_total_time_contactable(eventlist, endtime) / \
get_number_times_contactable(eventlist)
| [
"USER@DOMAIN"
]
| USER@DOMAIN |
9bc4cb5d38e560f98cf8e7fd5812eddb7adfb613 | b6c09a1b87074d6e58884211ce24df8ec354da5c | /1637. 两点之间不包含任何点的最宽垂直面积.py | ca285a4325e0ec01f5b84e4b3523ea887d7501c8 | []
| no_license | fengxiaolong886/leetcode | a0ee12d67c4a10fb12d6ca4369762ab5b090cab1 | 4c0897bc06a297fa9225a0c46d8ec9217d876db8 | refs/heads/master | 2023-03-18T22:16:29.212016 | 2021-03-07T03:48:16 | 2021-03-07T03:48:16 | 339,604,263 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | """
给你 n 个二维平面上的点 points ,其中 points[i] = [xi, yi] ,请你返回两点之间内部不包含任何点的 最宽垂直面积 的宽度。
垂直面积 的定义是固定宽度,而 y 轴上无限延伸的一块区域(也就是高度为无穷大)。 最宽垂直面积 为宽度最大的一个垂直面积。
请注意,垂直区域 边上 的点 不在 区域内。
"""
def maxWidthOfVerticalArea(points):
points.sort()
res = 0
n = len(points)
for i in range(1, n):
# print(points[i][0])
res = max(points[i][0] - points[i-1][0], res)
return res
print(maxWidthOfVerticalArea(points = [[8,7],[9,9],[7,4],[9,7]]))
print(maxWidthOfVerticalArea(points = [[3,1],[9,0],[1,0],[1,4],[5,3],[8,8]])) | [
"[email protected]"
]
| |
ade449d196c2f24f481058c91a30d29132d82299 | 14bca3c05f5d8de455c16ec19ac7782653da97b2 | /lib/kubernetes/client/models/v1_service_reference.py | 819f741043d5bbd68d7c07a9e9013f4996efcf56 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | hovu96/splunk_as_a_service_app | 167f50012c8993879afbeb88a1f2ba962cdf12ea | 9da46cd4f45603c5c4f63ddce5b607fa25ca89de | refs/heads/master | 2020-06-19T08:35:21.103208 | 2020-06-16T19:07:00 | 2020-06-16T19:07:00 | 196,641,210 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,993 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ServiceReference(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'namespace': 'str'
}
attribute_map = {
'name': 'name',
'namespace': 'namespace'
}
def __init__(self, name=None, namespace=None):
"""
V1ServiceReference - a model defined in Swagger
"""
self._name = None
self._namespace = None
self.discriminator = None
if name is not None:
self.name = name
if namespace is not None:
self.namespace = namespace
@property
def name(self):
"""
Gets the name of this V1ServiceReference.
Name is the name of the service
:return: The name of this V1ServiceReference.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1ServiceReference.
Name is the name of the service
:param name: The name of this V1ServiceReference.
:type: str
"""
self._name = name
@property
def namespace(self):
"""
Gets the namespace of this V1ServiceReference.
Namespace is the namespace of the service
:return: The namespace of this V1ServiceReference.
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""
Sets the namespace of this V1ServiceReference.
Namespace is the namespace of the service
:param namespace: The namespace of this V1ServiceReference.
:type: str
"""
self._namespace = namespace
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ServiceReference):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
]
| |
97079f55849044be1ccbccb9e0557c71c60e4b04 | ba5e590578a9be8f8942eade9a8466872bf4c2bb | /hezkuntza/wizard/upload_education_teacher.py | 27fb3fab5f64e08e947cc0437d20f949f21eb84e | []
| no_license | babarlhr/hezkuntza_education-12.0 | e0c57285651659906fddf07701c60809f77fa9de | 6b0a75638a667961f7d3fabb294307ddc9a82dd5 | refs/heads/master | 2022-04-06T08:01:25.099591 | 2019-12-13T16:19:39 | 2019-12-13T16:19:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,164 | py | # Copyright 2019 Oihane Crucelaegui - AvanzOSC
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from ._common import _read_binary_file, _format_info,\
_convert_time_str_to_float
from eagle import _, exceptions, fields, models
from datetime import datetime
class UploadEducationTeacher(models.TransientModel):
_name = 'upload.education.teacher'
_description = 'Wizard to Upload Teachers'
file = fields.Binary(
string='Teachers (T06)', filters='*.txt')
def button_upload(self):
lines = _read_binary_file(self.file)
partner_obj = self.env['res.partner'].with_context(active_test=False)
employee_obj = self.env['hr.employee'].with_context(active_test=False)
hr_contract_obj = self.env['hr.contract'].with_context(
active_test=False)
department_obj = self.env['hr.department'].with_context(
active_test=False)
user_obj = self.env['res.users'].with_context(active_test=False)
academic_year_obj = self.env[
'education.academic_year'].with_context(active_test=False)
idtype_obj = self.env[
'education.idtype'].with_context(active_test=False)
position_obj = self.env[
'education.position'].with_context(active_test=False)
designation_obj = self.env[
'education.designation_level'].with_context(active_test=False)
workday_type_obj = self.env[
'education.workday_type'].with_context(active_test=False)
contract_type_obj = self.env[
'education.contract_type'].with_context(active_test=False)
workreason_obj = self.env['education.work_reason'].with_context(
active_test=False)
if not lines:
raise exceptions.Warning(_('Empty file.'))
else:
for line in lines:
if len(line) > 0:
line_type = _format_info(line[:1])
if line_type == '1':
center_code = _format_info(line[3:9])
center = partner_obj.search([
('education_code', '=', center_code),
('educational_category', '=', 'school'),
])
year = _format_info(line[9:13])
academic_year = academic_year_obj.search([
('name', 'ilike', '{}-'.format(year)),
])
if line_type == '2':
op_type = _format_info(line[1:2])
id_type = idtype_obj.search([
('education_code', '=', _format_info(line[2:6]))])
id_number = _format_info(line[6:21])
employee = employee_obj.search([
('edu_idtype_id', '=', id_type.id),
('identification_id', '=', id_number),
])
user = (
employee.user_id or
user_obj.search([
('vat', 'ilike', id_number),
('edu_idtype_id', '=', id_type.id)]))
contract = hr_contract_obj.search([
('employee_id', '=', employee.id),
('ed_center_id', '=', center.id),
('ed_academic_year_id', '=', academic_year.id),
('state', 'not in', ['close', 'cancel'])
])
if op_type == 'M':
# MODIFICACIÓN o ALTA
lastname = _format_info(line[21:71])
lastname2 = _format_info(line[71:121])
firstname = _format_info(line[121:151])
fullname = partner_obj._get_computed_name(
lastname, firstname, lastname2)
position1 = position_obj.search([
('type', '=', 'normal'),
('education_code', '=',
_format_info(line[151:154])),
])
position2 = position_obj.search([
('type', '=', 'normal'),
('education_code', '=',
_format_info(line[154:157])),
])
position3 = position_obj.search([
('type', '=', 'other'),
('education_code', '=',
_format_info(line[157:160])),
])
department_name = _format_info(line[160:310])
department = department_obj.search([
('name', '=', department_name)
])
if not department:
department_obj.create({
'name': department_name,
})
dcode = '0{}'.format(_format_info(line[310:312]))
designation = designation_obj.search([
('education_code', '=', dcode),
])
wcode = '00000{}'.format(
_format_info(line[312:316]))
workday_type = workday_type_obj.search([
('education_code', '=', wcode),
])
work_hours = _convert_time_str_to_float(
_format_info(line[316:321]))
class_hours = _convert_time_str_to_float(
_format_info(line[321:326])
)
age = _format_info(line[326:327]) == '1'
health = _format_info(line[327:328]) == '1'
notes = _format_info(line[328:578])
workreason = workreason_obj.search([
('education_code', '=',
_format_info(line[578:582]))
])
date_str = _format_info(line[582:590])
if date_str != '':
date_start = datetime.strptime(
date_str, '%d%m%Y')
else:
date_start = fields.Datetime.now()
date_start = fields.Datetime.to_string(date_start)
contract_type = contract_type_obj.search([
('education_code', '=',
_format_info(line[590:594])),
])
contract_hours = _convert_time_str_to_float(
_format_info(line[594:599]))
vals = {
'name': fullname,
'department_id': department.id,
}
if not user:
vals.update({
'login': id_number,
'vat': id_number,
'edu_idtype_id': id_type.id,
'lastname': lastname,
'lastname2': lastname2,
'firstname': firstname,
})
user = user_obj.create(vals)
if not employee:
vals.update({
'edu_idtype_id': id_type.id,
'identification_id': id_number,
'user_id': user.id,
'gender': False,
'marital': False,
})
employee = employee_obj.create(vals)
else:
if not employee.user_id:
vals.update({
'user_id': user.id
})
employee.write(vals)
contract_vals = {
'name': '[{}] {}'.format(
academic_year.display_name,
employee.display_name),
'employee_id': employee.id,
'department_id': department.id,
'ed_center_id': center.id,
'ed_academic_year_id': academic_year.id,
'date_start': date_start,
'ed_position_id': position1.id,
'ed_position2_id': position2.id,
'ed_otherposition_id': position3.id,
'ed_designation_id': designation.id,
'ed_workday_type_id': workday_type.id,
'ed_contract_hours': contract_hours,
'ed_work_reason_id': workreason.id,
'ed_contract_type_id': contract_type.id,
'ed_reduction_age': age,
'ed_reduction_health': health,
'ed_work_hours': work_hours,
'ed_class_hours': class_hours,
'notes': notes,
'wage': 0.0,
}
if not contract:
hr_contract_obj.create(contract_vals)
else:
contract.write(contract_vals)
if op_type == 'B':
# BAJA
if contract:
# termination_code = _format_info(line[21:24])
date_end = \
fields.Datetime.to_string(
datetime.strptime(
_format_info(line[24:32]),
'%d%m%Y'))
contract.write({
'date_end': date_end,
})
action = self.env.ref('hr.open_view_employee_list_my')
return action.read()[0]
| [
"[email protected]"
]
| |
c1fe45b3e7445a6563381aa858ccbee35fc7fb33 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2007/programming/languages/perl/XML-SAX/actions.py | 5573fe066768c8ff99b6aad789159fe54b90d0fb | []
| no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2006 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import pisitools
from pisi.actionsapi import perlmodules
def setup():
perlmodules.configure()
def build():
perlmodules.make()
def install():
perlmodules.install()
| [
"[email protected]"
]
| |
9cb597f1e7850ac04cafc87043fdb422489ce144 | 335944885d937316236102a80f76a696b48b51e1 | /scripts/segmentation_pipeline/lifted_features.py | d07d668c6b6cd5f4aa9b74a9dc94ef84ed3125cd | [
"MIT"
]
| permissive | chaubold/nifty | b33153c3ba2dd7907c1f365b76a31471f9313581 | c351624a7f14278eb241fb730f44bdd275563dec | refs/heads/master | 2021-01-11T17:43:23.443748 | 2017-01-22T12:48:21 | 2017-01-22T12:48:21 | 79,821,703 | 0 | 3 | null | 2017-12-22T10:47:30 | 2017-01-23T16:16:55 | C++ | UTF-8 | Python | false | false | 7,996 | py | import vigra
import nifty
import nifty.graph
import nifty.graph.rag
import nifty.graph.agglo
import numpy
import h5py
import sys
nrag = nifty.graph.rag
nagglo = nifty.graph.agglo
from reraise import *
from tools import *
@reraise_with_stack
def multicutFromLocalProbs(raw, rag, localProbs, liftedEdges):
u = liftedEdges[:,0]
v = liftedEdges[:,1]
# accumulate length (todo, implement function to just accumulate length)
eFeatures, nFeatures = nrag.accumulateMeanAndLength(rag, raw, [100,100],1)
eSizes = eFeatures[:,1]
eps = 0.0001
clipped = numpy.clip(localProbs, eps, 1.0-eps)
features = []
for beta in (0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7):
for powers in (0.0, 0.005, 0.1, 0.15, 0.2):
# the weight of the weight
wWeight = eSizes**powers
print "\n\nBETA ",beta
w = numpy.log((1.0-clipped)/(clipped)) + numpy.log((1.0-beta)/(beta)) * wWeight
obj = nifty.graph.multicut.multicutObjective(rag, w)
factory = obj.multicutIlpCplexFactory()
solver = factory.create(obj)
visitor = obj.multicutVerboseVisitor()
#ret = solver.optimize(visitor=visitor)
ret = solver.optimize()
res = ret[u] != ret[v]
features.append(res[:,None])
features = numpy.concatenate(features, axis=1)
mean = numpy.mean(features, axis=1)[:,None]
features = numpy.concatenate([features, mean], axis=1)
return features
@reraise_with_stack
def ucmFromLocalProbs(raw, rag, localProbs, liftedEdges, liftedObj):
u = liftedEdges[:,0]
v = liftedEdges[:,1]
# accumulate length (todo, implement function to just accumulate length)
eFeatures, nFeatures = nrag.accumulateMeanAndLength(rag, raw, [100,100],1)
eSizes = eFeatures[:,1]
nSizes = eFeatures[:,1]
feats = nifty.graph.lifted_multicut.liftedUcmFeatures(
objective=liftedObj,
edgeIndicators=localProbs,
edgeSizes=eSizes,
nodeSizes=nSizes,
sizeRegularizers=[0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45,
0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85,
0.9, 0.95]
)
return feats
@reraise_with_stack
def ucmFromHessian(raw, rag, liftedEdges, liftedObj):
u = liftedEdges[:,0]
v = liftedEdges[:,1]
feats = []
for sigma in [1.0, 2.0, 3.0, 4.0, 5.0]:
pf = vigra.filters.hessianOfGaussianEigenvalues(raw, sigma)[:,:,0]
eFeatures, nFeatures = nrag.accumulateMeanAndLength(rag, pf, [100,100],1)
edgeIndicator = eFeatures[:,0]
eSizes = eFeatures[:,1]
nSizes = eFeatures[:,1]
featsB = nifty.graph.lifted_multicut.liftedUcmFeatures(
objective=liftedObj,
edgeIndicators=edgeIndicator,
edgeSizes=eSizes,
nodeSizes=nSizes,
sizeRegularizers=[0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45,
0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85,
0.9, 0.95]
)
feats.append(featsB)
# different axis ordering as usual
return numpy.concatenate(feats,axis=0)
@reraise_with_stack
def liftedFeaturesFromLocalProbs(raw, rag, localProbs, liftedEdges, liftedObj, featureFile):
mcFeatureFile = featureFile + "mc.h5"
if not hasH5File(mcFeatureFile):
mcFeatures = multicutFromLocalProbs(raw=raw, rag=rag, localProbs=localProbs,
liftedEdges=liftedEdges)
f5 = h5py.File(mcFeatureFile, 'w')
f5['data'] = mcFeatures
f5.close()
else:
mcFeatures = h5Read(mcFeatureFile)
ucmFeatureFile = featureFile + "ucm.h5"
if not hasH5File(ucmFeatureFile):
ucmFeatures = ucmFromLocalProbs(raw=raw, rag=rag, localProbs=localProbs,
liftedEdges=liftedEdges,
liftedObj=liftedObj)
f5 = h5py.File(ucmFeatureFile, 'w')
f5['data'] = ucmFeatures
f5.close()
else:
ucmFeatures = h5Read(ucmFeatureFile)
# combine
features = numpy.concatenate([mcFeatures, ucmFeatures.swapaxes(0,1)],axis=1)
f5 = h5py.File(featureFile, 'w')
f5['data'] = features
f5.close()
@reraise_with_stack
def accumlatedLiftedFeatures(raw, pmap, rag, liftedEdges, liftedObj):
uv = liftedEdges
u = uv[:,0]
v = uv[:,1]
# geometric edge features
geometricFeaturs = nifty.graph.rag.accumulateGeometricNodeFeatures(rag,
blockShape=[75, 75],
numberOfThreads=1)
nodeSize = geometricFeaturs[:,0]
nodeCenter = geometricFeaturs[:,1:2]
nodeAxisA = geometricFeaturs[:,2:4]
nodeAxisA = geometricFeaturs[:,4:6]
diff = (nodeCenter[u,:] - nodeCenter[v,:])**2
diff = numpy.sum(diff,axis=1)
allEdgeFeat = [
# sizes
(nodeSize[u] + nodeSize[v])[:,None],
(numpy.abs(nodeSize[u] - nodeSize[v]))[:,None],
(nodeSize[u] * nodeSize[v])[:,None],
(numpy.minimum(nodeSize[u] , nodeSize[v]))[:,None],
(numpy.maximum(nodeSize[u] , nodeSize[v]))[:,None],
diff[:,None]
]
pixelFeats = [
raw[:,:,None],
]
if pmap is not None:
pixelFeats.append(pmap[:,:,None])
for sigma in (1.0, 2.0, 4.0):
pf = [
vigra.filters.hessianOfGaussianEigenvalues(raw, 1.0*sigma),
vigra.filters.structureTensorEigenvalues(raw, 1.0*sigma, 2.0*sigma),
vigra.filters.gaussianGradientMagnitude(raw, 1.0*sigma)[:,:,None],
vigra.filters.gaussianSmoothing(raw, 1.0*sigma)[:,:,None]
]
pixelFeats.extend(pf)
if pmap is not None:
pixelFeats.append(vigra.filters.gaussianSmoothing(pmap, 1.0*sigma)[:,:,None])
pixelFeats = numpy.concatenate(pixelFeats, axis=2)
for i in range(pixelFeats.shape[2]):
pixelFeat = pixelFeats[:,:,i]
nodeFeat = nifty.graph.rag.accumulateNodeStandartFeatures(
rag=rag, data=pixelFeat.astype('float32'),
minVal=pixelFeat.min(),
maxVal=pixelFeat.max(),
blockShape=[75, 75],
numberOfThreads=10
)
uFeat = nodeFeat[u,:]
vFeat = nodeFeat[v,:]
fList =[
uFeat + vFeat,
uFeat * vFeat,
numpy.abs(uFeat-vFeat),
numpy.minimum(uFeat,vFeat),
numpy.maximum(uFeat,vFeat),
]
edgeFeat = numpy.concatenate(fList, axis=1)
allEdgeFeat.append(edgeFeat)
allEdgeFeat = numpy.concatenate(allEdgeFeat, axis=1)
return allEdgeFeat
@reraise_with_stack
def liftedFeatures(raw, pmap, rag, liftedEdges, liftedObj, distances, featureFile):
ucmFeatureFile = featureFile + "ucm.h5"
if not hasH5File(ucmFeatureFile):
ucmFeatures = ucmFromHessian(raw=raw, rag=rag,
liftedEdges=liftedEdges,
liftedObj=liftedObj)
f5 = h5py.File(ucmFeatureFile, 'w')
f5['data'] = ucmFeatures
f5.close()
else:
ucmFeatures = h5Read(ucmFeatureFile)
accFeatureFile = featureFile + "acc.h5"
if not hasH5File(accFeatureFile):
accFeatrues = accumlatedLiftedFeatures(raw=raw, pmap=pmap,
rag=rag,
liftedEdges=liftedEdges,
liftedObj=liftedObj)
f5 = h5py.File(accFeatureFile, 'w')
f5['data'] = accFeatrues
f5.close()
else:
accFeatrues = h5Read(accFeatureFile)
# combine
features = numpy.concatenate([accFeatrues,distances[:,None], ucmFeatures.swapaxes(0,1)],axis=1)
f5 = h5py.File(featureFile, 'w')
f5['data'] = features
f5.close() | [
"[email protected]"
]
| |
b5b422227dabc1bfe8fc6b9334c87ba02f816f48 | 5abdbe26ad89d50761e505d02c35ea184d79f712 | /users/views.py | a97471d6b3ea8fef81e3388ad3b5f6e8a24fd6db | []
| no_license | liyongjun-brayan/xuexi | 5c00abaeadb46caa4a63fdcd316fabd2d1ebdb15 | b5356a5115b34dc1d5f627215aef780d7d5a0693 | refs/heads/master | 2021-06-25T10:25:12.602434 | 2019-08-27T02:27:23 | 2019-08-27T02:27:23 | 204,632,981 | 1 | 0 | null | 2021-06-10T21:54:15 | 2019-08-27T06:16:39 | Python | UTF-8 | Python | false | false | 1,128 | py | from django.shortcuts import render
# Create your views here.
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.forms import UserCreationForm
def logout_view(request):
"""注销用户"""
logout(request)
return HttpResponseRedirect(reverse('learning_logs:index'))
def register(request):
# 注册新用户
if request.method != 'POST':
# 显示空的注册表
form = UserCreationForm()
else:
# 处理填写好的表单
form= UserCreationForm(data=request.POST)
if form.is_valid():
new_user = form.save()
# 让用户自动登陆,再重定向到主页
authenticated_user= authenticate(username=new_user.username,
password=request.POST['password1'])
login(request, authenticated_user)
return HttpResponseRedirect(reverse('learning_logs:index'))
context = {'form': form}
return render(request, 'users/register.html', context) | [
"[email protected]"
]
| |
823fbcd52596c818081a713b22fef9460caaa729 | 0abcbbac1efc95877f159c65d6f898e749b1bf09 | /MyMusicApp/blog/migrations/0002_auto_20190913_2052.py | ccada31f79d7ce09bbd01e6324ea7b32fcd3506e | [
"MIT",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"bzip2-1.0.6",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-newlib-historical",
"OpenSSL",
"Python-2.0",
"TCL",
"LicenseRef-scancode-python-cwi"
]
| permissive | kells4real/MusicApp | 5055e465b46c39e0687c98b7a8adbb2203ac9156 | 4e4ba065c4f472243413551f63dc4e9eddf7f4a7 | refs/heads/master | 2022-10-07T15:49:38.406106 | 2019-10-24T19:55:52 | 2019-10-24T19:55:52 | 197,428,434 | 0 | 1 | MIT | 2022-10-02T02:23:26 | 2019-07-17T16:48:16 | Python | UTF-8 | Python | false | false | 637 | py | # Generated by Django 2.2.2 on 2019-09-13 19:52
import blog.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='image',
field=models.ImageField(blank=True, default='default.jpg', null=True, upload_to=blog.models.image_upload),
),
migrations.AddField(
model_name='post',
name='slug',
field=models.SlugField(blank=True, max_length=40, null=True, unique=True),
),
]
| [
"[email protected]"
]
| |
ef65cb5846d5c0c185b517fe86785d6f5e79bb80 | 84290c584128de3e872e66dc99b5b407a7a4612f | /Writing Functions in Python/More on Decorators/Print the return type.py | 5e5dcd69342c2f3687cdaafabdcedb9d3637648a | []
| no_license | BautizarCodigo/DataAnalyticEssentials | 91eddc56dd1b457e9e3e1e3db5fbbb2a85d3b789 | 7f5f3d8936dd4945ee0fd854ef17f04a04eb7b57 | refs/heads/main | 2023-04-11T04:42:17.977491 | 2021-03-21T19:05:17 | 2021-03-21T19:05:17 | 349,784,608 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | def print_return_type(func):
# Define wrapper(), the decorated function
def wrapper(*args, **kwargs):
# Call the function being decorated
result = func(*args, **kwargs)
print('{}() returned type {}'.format(
func.__name__, type(result)
))
return result
# Return the decorated function
return wrapper
@print_return_type
def foo(value):
return value
print(foo(42))
print(foo([1, 2, 3]))
print(foo({'a': 42})) | [
"[email protected]"
]
| |
672ab02fe434eb1a41749a43cc63853910b29c5f | a19275ff09caf880e135bce76dc7a0107ec0369e | /catkin_ws/src/robot_python/nodes/send_single_cmd_gaebo_node.py | 0e95c32d3222631780ea123c1986f759e3d06a86 | []
| no_license | xtyzhen/Multi_arm_robot | e201c898a86406c1b1deb82326bb2157d5b28975 | 15daf1a80c781c1c929ba063d779c0928a24b117 | refs/heads/master | 2023-03-21T14:00:24.128957 | 2021-03-10T12:04:36 | 2021-03-10T12:04:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,747 | py | #!/usr/bin/env python
#-*-coding:utf-8-*-
#本文档用于发送关节角度
#程序员:陈永厅
#版权:哈尔滨工业大学
#日期:初稿:2019.11.6
import rospy
from std_msgs.msg import Float64MultiArray
from std_msgs.msg import Float64
import os
import numpy as np
from robot_python import FileOpen
def talker():
#建立节点
pub1 = rospy.Publisher("armc/joint1_effort_controller/command", Float64, queue_size=1)
pub2 = rospy.Publisher("armc/joint2_effort_controller/command", Float64, queue_size=1)
pub3 = rospy.Publisher("armc/joint3_effort_controller/command", Float64, queue_size=1)
pub4 = rospy.Publisher("armc/joint4_effort_controller/command", Float64, queue_size=1)
pub5 = rospy.Publisher("armc/joint5_effort_controller/command", Float64, queue_size=1)
pub6 = rospy.Publisher("armc/joint6_effort_controller/command", Float64, queue_size=1)
pub7 = rospy.Publisher("armc/joint7_effort_controller/command", Float64, queue_size=1)
rospy.init_node("joint_position_command", anonymous=True)
rate = rospy.Rate(100) # 10hz
#读取命令文件
file_path = os.path.abspath("..")
file_name = 'data/position.txt'
path = os.path.join(file_path,file_name)
command_pos = np.array(FileOpen.read(path))
#print command_pos.shape()
#重写数据
kk = len(command_pos[:, 0])
n = len(command_pos[0, :])
print "数据个数:%d" % kk
print "数据长度:%d" % n
command_data = np.zeros([kk,n])
for i in range(kk):
for j in range(n):
command_data[i,j] = command_pos[i,j]
k = 0
while not rospy.is_shutdown():
if k == kk:
break
tip_str = "第 %s 次命令:" % k
rospy.loginfo(tip_str)
joint1_data = Float64()
joint2_data = Float64()
joint3_data = Float64()
joint4_data = Float64()
joint5_data = Float64()
joint6_data = Float64()
joint7_data = Float64()
joint1_data.data = command_data[k, 0]
joint2_data.data = command_data[k, 1]
joint3_data.data = command_data[k, 2]
joint4_data.data = command_data[k, 3]
joint5_data.data = command_data[k, 4]
joint6_data.data = command_data[k, 5]
joint7_data.data = command_data[k, 6]
print "send data:%s" % command_data[k, :]
pub1.publish(joint1_data)
pub2.publish(joint2_data)
pub3.publish(joint3_data)
pub4.publish(joint4_data)
pub5.publish(joint5_data)
pub6.publish(joint6_data)
pub7.publish(joint7_data)
rate.sleep()
k = k + 1
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
| [
"[email protected]"
]
| |
ff78462a33b2ecb3dd40c291fb4d19cfd65795d3 | 50639b8c539b9d69539f9b9016527f831cee213d | /LC/LC17-LetterCombinationsOfPhoneNumber.py | 2031ec7bbc8fc07ceae9ae08c38a89b1b4edec00 | []
| no_license | yaelBrown/pythonSandbox | fe216b2c17d66b6dde22dd45fe2a91f1315f2db4 | abac8cabeb7a2b4fbbe1fc8655f7f52a182eaabe | refs/heads/master | 2023-08-10T09:42:26.249444 | 2023-08-03T21:54:37 | 2023-08-03T21:54:37 | 194,980,832 | 0 | 1 | null | 2023-05-02T18:01:41 | 2019-07-03T04:42:16 | Jupyter Notebook | UTF-8 | Python | false | false | 800 | py | """
Given a string containing digits from 2-9 inclusive, return all possible letter combinations that the number could represent. Return the answer in any order.
A mapping of digits to letters (just like on the telephone buttons) is given below. Note that 1 does not map to any letters.
"""
class Solution:
def letterCombinations(self, digits: str) -> List[str]:
if digits == "": return []
map = {
"0": "",
"1": "",
"2": "abc",
"3": "def",
"4": "ghi",
"5": "jkl",
"6": "mno",
"7": "pqrs",
"8": "tuv",
"9": "wxyz"
}
out = []
for d in digits:
temp = map[d]
#?
| [
"[email protected]"
]
| |
60c2e8e9ff8c5b9bd21b22fa945733c893bb0522 | 3e0e674e07e757dfb23e18ba30dbb440c0966848 | /树二.py | d983ceabbf739253db8b0bd723fcaccfa61ffb57 | []
| no_license | Jasonmes/Algorithm--Advanced | 0bfaa844127ff146997d2dd19b4943be29467fad | 3f29b07b6d55197c5d21f44a474f6e96021cd5b0 | refs/heads/master | 2020-03-27T12:34:49.537881 | 2018-08-29T06:26:38 | 2018-08-29T06:26:38 | 146,554,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Jason Mess
Tree = [['a', 'b'], ['c'], ['b', ['e', 'f']]]
print(Tree[0][1])
print(Tree[2][1][0])
| [
"[email protected]"
]
| |
7e506f8c9cc581c152b857ffffa4687b27b09798 | eb9c56cff249c767d351a6a18ebd100e8343b874 | /algorithms/OR_ELM.py | c1d4e204c239bbd9b2ed953a2abcb8458a0343df | []
| no_license | lyzl2010/Online-Recurrent-Extreme-Learning-Machine | cb95dc406f2c2b67adec5cde626600178bcadf5d | 5597402e6970a16d09537df3d38e60ac96229fae | refs/heads/master | 2020-05-23T06:10:25.112686 | 2018-08-02T02:26:23 | 2018-08-02T02:26:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,459 | py | # ----------------------------------------------------------------------
# Copyright (c) 2017, Jin-Man Park. All rights reserved.
# Contributors: Jin-Man Park and Jong-hwan Kim
# Affiliation: Robot Intelligence Technology Lab.(RITL), Korea Advanced Institute of Science and Technology (KAIST)
# URL: http://rit.kaist.ac.kr
# E-mail: [email protected]
# Citation: Jin-Man Park, and Jong-Hwan Kim. "Online recurrent extreme learning machine and its application to
# time-series prediction." Neural Networks (IJCNN), 2017 International Joint Conference on. IEEE, 2017.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
# ----------------------------------------------------------------------
# This code is originally from Numenta's Hierarchical Temporal Memory (HTM) code
# (Numenta Platform for Intelligent Computing (NuPIC))
# And modified to run Online Recurrent Extreme Learning Machine (OR-ELM)
# ----------------------------------------------------------------------
import numpy as np
from numpy.linalg import pinv
from numpy.linalg import inv
from FOS_ELM import FOSELM
#from plot import orthogonalization
def orthogonalization(Arr):
[Q, S, _] = np.linalg.svd(Arr)
tol = max(Arr.shape) * np.spacing(max(S))
r = np.sum(S > tol)
Q = Q[:, :r]
def sigmoidActFunc(features, weights, bias):
assert(features.shape[1] == weights.shape[1])
(numSamples, numInputs) = features.shape
(numHiddenNeuron, numInputs) = weights.shape
V = np.dot(features, np.transpose(weights))
for i in range(numHiddenNeuron):
V[:, i] += bias[0, i]
H = 1 / (1+np.exp(-V))
return H
def linear_recurrent(features, inputW,hiddenW,hiddenA, bias):
(numSamples, numInputs) = features.shape
(numHiddenNeuron, numInputs) = inputW.shape
V = np.dot(features, np.transpose(inputW)) + np.dot(hiddenA,hiddenW)
for i in range(numHiddenNeuron):
V[:, i] += bias[0, i]
return V
def sigmoidAct_forRecurrent(features,inputW,hiddenW,hiddenA,bias):
(numSamples, numInputs) = features.shape
(numHiddenNeuron, numInputs) = inputW.shape
V = np.dot(features, np.transpose(inputW)) + np.dot(hiddenA,hiddenW)
for i in range(numHiddenNeuron):
V[:, i] += bias[0, i]
H = 1 / (1 + np.exp(-V))
return H
def sigmoidActFunc(V):
H = 1 / (1+np.exp(-V))
return H
class ORELM(object):
def __init__(self, inputs, outputs, numHiddenNeurons, activationFunction, LN=True, AE=True, ORTH=True,
inputWeightForgettingFactor=0.999,
outputWeightForgettingFactor=0.999,
hiddenWeightForgettingFactor=0.999):
self.activationFunction = activationFunction
self.inputs = inputs
self.outputs = outputs
self.numHiddenNeurons = numHiddenNeurons
# input to hidden weights
self.inputWeights = np.random.random((self.numHiddenNeurons, self.inputs))
# hidden layer to hidden layer wieghts
self.hiddenWeights = np.random.random((self.numHiddenNeurons, self.numHiddenNeurons))
# initial hidden layer activation
self.initial_H = np.random.random((1, self.numHiddenNeurons)) * 2 -1
self.H = self.initial_H
self.LN = LN
self.AE = AE
self.ORTH = ORTH
# bias of hidden units
self.bias = np.random.random((1, self.numHiddenNeurons)) * 2 - 1
# hidden to output layer connection
self.beta = np.random.random((self.numHiddenNeurons, self.outputs))
# auxiliary matrix used for sequential learning
self.M = inv(0.00001 * np.eye(self.numHiddenNeurons))
self.forgettingFactor = outputWeightForgettingFactor
self.trace=0
self.thresReset=0.001
if self.AE:
self.inputAE = FOSELM(inputs = inputs,
outputs = inputs,
numHiddenNeurons = numHiddenNeurons,
activationFunction = activationFunction,
LN= LN,
forgettingFactor=inputWeightForgettingFactor,
ORTH = ORTH
)
self.hiddenAE = FOSELM(inputs = numHiddenNeurons,
outputs = numHiddenNeurons,
numHiddenNeurons = numHiddenNeurons,
activationFunction=activationFunction,
LN= LN,
ORTH = ORTH
)
def layerNormalization(self, H, scaleFactor=1, biasFactor=0):
H_normalized = (H-H.mean())/(np.sqrt(H.var() + 0.000001))
H_normalized = scaleFactor*H_normalized+biasFactor
return H_normalized
def __calculateInputWeightsUsingAE(self, features):
self.inputAE.train(features=features,targets=features)
return self.inputAE.beta
def __calculateHiddenWeightsUsingAE(self, features):
self.hiddenAE.train(features=features,targets=features)
return self.hiddenAE.beta
def calculateHiddenLayerActivation(self, features):
"""
Calculate activation level of the hidden layer
:param features feature matrix with dimension (numSamples, numInputs)
:return: activation level (numSamples, numHiddenNeurons)
"""
if self.activationFunction is "sig":
if self.AE:
self.inputWeights = self.__calculateInputWeightsUsingAE(features)
self.hiddenWeights = self.__calculateHiddenWeightsUsingAE(self.H)
V = linear_recurrent(features=features,
inputW=self.inputWeights,
hiddenW=self.hiddenWeights,
hiddenA=self.H,
bias= self.bias)
if self.LN:
V = self.layerNormalization(V)
self.H = sigmoidActFunc(V)
else:
print " Unknown activation function type"
raise NotImplementedError
return self.H
def initializePhase(self, lamb=0.0001):
"""
Step 1: Initialization phase
:param features feature matrix with dimension (numSamples, numInputs)
:param targets target matrix with dimension (numSamples, numOutputs)
"""
if self.activationFunction is "sig":
self.bias = np.random.random((1, self.numHiddenNeurons)) * 2 - 1
else:
print " Unknown activation function type"
raise NotImplementedError
self.M = inv(lamb*np.eye(self.numHiddenNeurons))
self.beta = np.zeros([self.numHiddenNeurons,self.outputs])
# randomly initialize the input->hidden connections
self.inputWeights = np.random.random((self.numHiddenNeurons, self.inputs))
self.inputWeights = self.inputWeights * 2 - 1
if self.AE:
self.inputAE.initializePhase(lamb=0.00001)
self.hiddenAE.initializePhase(lamb=0.00001)
else:
# randomly initialize the input->hidden connections
self.inputWeights = np.random.random((self.numHiddenNeurons, self.inputs))
self.inputWeights = self.inputWeights * 2 - 1
if self.ORTH:
if self.numHiddenNeurons > self.inputs:
self.inputWeights = orthogonalization(self.inputWeights)
else:
self.inputWeights = orthogonalization(self.inputWeights.transpose())
self.inputWeights = self.inputWeights.transpose()
# hidden layer to hidden layer wieghts
self.hiddenWeights = np.random.random((self.numHiddenNeurons, self.numHiddenNeurons))
self.hiddenWeights = self.hiddenWeights * 2 - 1
if self.ORTH:
self.hiddenWeights = orthogonalization(self.hiddenWeights)
def reset(self):
self.H = self.initial_H
def train(self, features, targets,RESETTING=False):
"""
Step 2: Sequential learning phase
:param features feature matrix with dimension (numSamples, numInputs)
:param targets target matrix with dimension (numSamples, numOutputs)
"""
(numSamples, numOutputs) = targets.shape
assert features.shape[0] == targets.shape[0]
H = self.calculateHiddenLayerActivation(features)
Ht = np.transpose(H)
try:
scale = 1/(self.forgettingFactor)
self.M = scale*self.M - np.dot(scale*self.M,
np.dot(Ht, np.dot(
pinv(np.eye(numSamples) + np.dot(H, np.dot(scale*self.M, Ht))),
np.dot(H, scale*self.M))))
if RESETTING:
beforeTrace=self.trace
self.trace=self.M.trace()
print np.abs(beforeTrace - self.trace)
if np.abs(beforeTrace - self.trace) < self.thresReset:
print self.M
eig,_=np.linalg.eig(self.M)
lambMin=min(eig)
lambMax=max(eig)
#lamb = (lambMax+lambMin)/2
lamb = lambMax
lamb = lamb.real
self.M= lamb*np.eye(self.numHiddenNeurons)
print "reset"
print self.M
self.beta = (self.forgettingFactor)*self.beta + np.dot(self.M, np.dot(Ht, targets - np.dot(H, (self.forgettingFactor)*self.beta)))
#self.beta = self.beta + np.dot(self.M, np.dot(Ht, targets - np.dot(H, self.beta)))
except np.linalg.linalg.LinAlgError:
print "SVD not converge, ignore the current training cycle"
# else:
# raise RuntimeError
def predict(self, features):
"""
Make prediction with feature matrix
:param features: feature matrix with dimension (numSamples, numInputs)
:return: predictions with dimension (numSamples, numOutputs)
"""
H = self.calculateHiddenLayerActivation(features)
prediction = np.dot(H, self.beta)
return prediction
| [
"[email protected]"
]
| |
6020e117613f6469e3277ef47d4851fe57e51de7 | f64aaa4b0f78774464033148290a13453c96528e | /generated/intermediate/ansible-module-sdk/azure_rm_eventgrideventsubscription_info.py | 5c9b8504415906ae504d64819ed9b7b0e9b86732 | [
"MIT"
]
| permissive | audevbot/autorest.cli.debug | e8996270a6a931f243532f65782c7f8fbb1b55c6 | a507fb6e2dd7826212537f27d583f203aac1c28f | refs/heads/master | 2020-06-04T05:25:17.018993 | 2019-08-27T21:57:18 | 2019-08-27T21:57:18 | 191,876,321 | 0 | 0 | MIT | 2019-08-28T05:57:19 | 2019-06-14T04:35:39 | Python | UTF-8 | Python | false | false | 17,488 | py | #!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_eventgrideventsubscription_info
version_added: '2.9'
short_description: Get EventSubscription info.
description:
- Get info of EventSubscription.
options:
scope:
description:
- >-
The scope of the event subscription. The scope can be a subscription, or
a resource group, or a top level resource belonging to a resource
provider namespace, or an EventGrid topic. For example, use
'/subscriptions/{subscriptionId}/' for a subscription,
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for
a resource group, and
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}'
for a resource, and
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/topics/{topicName}'
for an EventGrid topic.
type: str
event_subscription_name:
description:
- Name of the event subscription
type: str
location:
description:
- Name of the location
type: str
topic_type_name:
description:
- Name of the topic type
type: str
resource_group:
description:
- The name of the resource group within the user's subscription.
type: str
provider_namespace:
description:
- Namespace of the provider of the topic
type: str
resource_type_name:
description:
- Name of the resource type
type: str
name:
description:
- Name of the resource
type: str
id:
description:
- Fully qualified identifier of the resource
type: str
type:
description:
- Type of the resource
type: str
topic:
description:
- Name of the topic of the event subscription.
type: str
provisioning_state:
description:
- Provisioning state of the event subscription.
type: str
destination:
description:
- >-
Information about the destination where events have to be delivered for
the event subscription.
type: dict
filter:
description:
- Information about the filter for the event subscription.
type: dict
suboptions:
subject_begins_with:
description:
- >-
An optional string to filter events for an event subscription based
on a resource path prefix.<br>The format of this depends on the
publisher of the events. <br>Wildcard characters are not supported
in this path.
type: str
subject_ends_with:
description:
- >-
An optional string to filter events for an event subscription based
on a resource path suffix.<br>Wildcard characters are not supported
in this path.
type: str
included_event_types:
description:
- >-
A list of applicable event types that need to be part of the event
subscription. <br>If it is desired to subscribe to all event types,
the string "all" needs to be specified as an element in this list.
type: list
is_subject_case_sensitive:
description:
- >-
Specifies if the SubjectBeginsWith and SubjectEndsWith properties of
the filter <br>should be compared in a case sensitive manner.
type: boolean
labels:
description:
- List of user defined labels.
type: list
retry_policy:
description:
- >-
The retry policy for events. This can be used to configure maximum
number of delivery attempts and time to live for events.
type: dict
suboptions:
max_delivery_attempts:
description:
- Maximum number of delivery retry attempts for events.
type: number
event_time_to_live_in_minutes:
description:
- Time To Live (in minutes) for events.
type: number
dead_letter_destination:
description:
- The DeadLetter destination of the event subscription.
type: dict
extends_documentation_fragment:
- azure
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: EventSubscriptions_ListGlobalBySubscription
azure_rm_eventgrideventsubscription_info: {}
- name: EventSubscriptions_GetForSubscription
azure_rm_eventgrideventsubscription_info:
scope: subscriptions/5b4b650e-28b9-4790-b3ab-ddbd88d727c4
event_subscription_name: examplesubscription3
- name: EventSubscriptions_GetForResourceGroup
azure_rm_eventgrideventsubscription_info:
scope: >-
subscriptions/5b4b650e-28b9-4790-b3ab-ddbd88d727c4/resourceGroups/examplerg
event_subscription_name: examplesubscription2
- name: EventSubscriptions_GetForResource
azure_rm_eventgrideventsubscription_info:
scope: >-
subscriptions/5b4b650e-28b9-4790-b3ab-ddbd88d727c4/resourceGroups/examplerg/providers/Microsoft.EventHub/namespaces/examplenamespace1
event_subscription_name: examplesubscription1
- name: EventSubscriptions_GetForCustomTopic
azure_rm_eventgrideventsubscription_info:
scope: >-
subscriptions/5b4b650e-28b9-4790-b3ab-ddbd88d727c4/resourceGroups/examplerg/providers/Microsoft.EventGrid/topics/exampletopic2
event_subscription_name: examplesubscription1
- name: EventSubscriptions_ListRegionalBySubscription
azure_rm_eventgrideventsubscription_info:
location: myLocation
- name: EventSubscriptions_ListGlobalBySubscriptionForTopicType
azure_rm_eventgrideventsubscription_info:
topic_type_name: myTopicType
- name: EventSubscriptions_ListGlobalByResourceGroup
azure_rm_eventgrideventsubscription_info:
resource_group: myResourceGroup
- name: EventSubscriptions_ListRegionalBySubscriptionForTopicType
azure_rm_eventgrideventsubscription_info:
location: myLocation
topic_type_name: myTopicType
- name: EventSubscriptions_ListRegionalByResourceGroup
azure_rm_eventgrideventsubscription_info:
location: myLocation
resource_group: myResourceGroup
- name: EventSubscriptions_ListGlobalByResourceGroupForTopicType
azure_rm_eventgrideventsubscription_info:
topic_type_name: myTopicType
resource_group: myResourceGroup
- name: EventSubscriptions_ListRegionalByResourceGroupForTopicType
azure_rm_eventgrideventsubscription_info:
location: myLocation
topic_type_name: myTopicType
resource_group: myResourceGroup
- name: EventSubscriptions_ListByResource
azure_rm_eventgrideventsubscription_info:
resource_group: myResourceGroup
provider_namespace: Microsoft.EventGrid
resource_type_name: topics
name: myResourceType
'''
RETURN = '''
event_subscriptions:
description: >-
A list of dict results where the key is the name of the EventSubscription
and the values are the facts for that EventSubscription.
returned: always
type: complex
contains:
eventsubscription_name:
description: The key is the name of the server that the values relate to.
type: complex
contains:
id:
description:
- Fully qualified identifier of the resource
returned: always
type: str
sample: null
name:
description:
- Name of the resource
returned: always
type: str
sample: null
type:
description:
- Type of the resource
returned: always
type: str
sample: null
properties:
description:
- Properties of the event subscription
returned: always
type: dict
sample: null
'''
import time
import json
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from copy import deepcopy
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.eventgrid import EventGridManagementClient
from msrestazure.azure_operation import AzureOperationPoller
from msrest.polling import LROPoller
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMEventSubscriptionsInfo(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
scope=dict(
type='str'
),
event_subscription_name=dict(
type='str'
),
location=dict(
type='str'
),
topic_type_name=dict(
type='str'
),
resource_group=dict(
type='str'
),
provider_namespace=dict(
type='str'
),
resource_type_name=dict(
type='str'
),
name=dict(
type='str'
)
)
self.scope = None
self.event_subscription_name = None
self.location = None
self.topic_type_name = None
self.resource_group = None
self.provider_namespace = None
self.resource_type_name = None
self.name = None
self.id = None
self.name = None
self.type = None
self.properties = None
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.url = None
self.status_code = [200]
self.query_parameters = {}
self.query_parameters['api-version'] = '2019-01-01'
self.header_parameters = {}
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
self.mgmt_client = None
super(AzureRMEventSubscriptionsInfo, self).__init__(self.module_arg_spec, supports_tags=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
self.mgmt_client = self.get_mgmt_svc_client(EventGridManagementClientClient,
base_url=self._cloud_environment.endpoints.resource_manager)
if (self.resource_group is not None and
self.provider_namespace is not None and
self.resource_type_name is not None and
self.name is not None):
self.results['event_subscriptions'] = self.format_item(self.listbyresource())
elif (self.resource_group is not None and
self.location is not None and
self.topic_type_name is not None):
self.results['event_subscriptions'] = self.format_item(self.listregionalbyresourcegroupfortopictype())
elif (self.resource_group is not None and
self.topic_type_name is not None):
self.results['event_subscriptions'] = self.format_item(self.listglobalbyresourcegroupfortopictype())
elif (self.resource_group is not None and
self.location is not None):
self.results['event_subscriptions'] = self.format_item(self.listregionalbyresourcegroup())
elif (self.location is not None and
self.topic_type_name is not None):
self.results['event_subscriptions'] = self.format_item(self.listregionalbysubscriptionfortopictype())
elif (self.resource_group is not None):
self.results['event_subscriptions'] = self.format_item(self.listglobalbyresourcegroup())
elif (self.topic_type_name is not None):
self.results['event_subscriptions'] = self.format_item(self.listglobalbysubscriptionfortopictype())
elif (self.location is not None):
self.results['event_subscriptions'] = self.format_item(self.listregionalbysubscription())
elif (self.scope is not None and
self.event_subscription_name is not None):
self.results['event_subscriptions'] = self.format_item(self.get())
else:
self.results['event_subscriptions'] = [self.format_item(self.listglobalbysubscription())]
return self.results
def listbyresource(self):
response = None
try:
response = self.mgmt_client.event_subscriptions.list_by_resource(resource_group_name=self.resource_group,
provider_namespace=self.provider_namespace,
resource_type_name=self.resource_type_name,
resource_name=self.name)
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return response.as_dict()
def listregionalbyresourcegroupfortopictype(self):
response = None
try:
response = self.mgmt_client.event_subscriptions.list_regional_by_resource_group_for_topic_type(resource_group_name=self.resource_group,
location=self.location,
topic_type_name=self.topic_type_name)
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return response.as_dict()
def listglobalbyresourcegroupfortopictype(self):
response = None
try:
response = self.mgmt_client.event_subscriptions.list_global_by_resource_group_for_topic_type(resource_group_name=self.resource_group,
topic_type_name=self.topic_type_name)
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return response.as_dict()
def listregionalbyresourcegroup(self):
response = None
try:
response = self.mgmt_client.event_subscriptions.list_regional_by_resource_group(resource_group_name=self.resource_group,
location=self.location)
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return response.as_dict()
def listregionalbysubscriptionfortopictype(self):
response = None
try:
response = self.mgmt_client.event_subscriptions.list_regional_by_subscription_for_topic_type(location=self.location,
topic_type_name=self.topic_type_name)
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return response.as_dict()
def listglobalbyresourcegroup(self):
response = None
try:
response = self.mgmt_client.event_subscriptions.list_global_by_resource_group(resource_group_name=self.resource_group)
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return response.as_dict()
def listglobalbysubscriptionfortopictype(self):
response = None
try:
response = self.mgmt_client.event_subscriptions.list_global_by_subscription_for_topic_type(topic_type_name=self.topic_type_name)
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return response.as_dict()
def listregionalbysubscription(self):
response = None
try:
response = self.mgmt_client.event_subscriptions.list_regional_by_subscription(location=self.location)
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return response.as_dict()
def get(self):
response = None
try:
response = self.mgmt_client.event_subscriptions.get(scope=self.scope,
event_subscription_name=self.event_subscription_name)
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return response.as_dict()
def listglobalbysubscription(self):
response = None
try:
response = self.mgmt_client.event_subscriptions.list_global_by_subscription()
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return response.as_dict()
def format_item(item):
return item
def main():
AzureRMEventSubscriptionsInfo()
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
6ddae08c21df8c42e44f5e6d4404af25f79849a0 | 8e24e8bba2dd476f9fe612226d24891ef81429b7 | /geeksforgeeks/python/python_all/64_2.py | 5b45ab1e95ee997b47876fb571f3e9db42c2eeed | []
| no_license | qmnguyenw/python_py4e | fb56c6dc91c49149031a11ca52c9037dc80d5dcf | 84f37412bd43a3b357a17df9ff8811eba16bba6e | refs/heads/master | 2023-06-01T07:58:13.996965 | 2021-06-15T08:39:26 | 2021-06-15T08:39:26 | 349,059,725 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,023 | py | Python program to print Calendar without calendar or datetime module
Given the month and year. The task is to show the calendar of that month and
in the given year without using any module or pre-defined functions.
**Examples:**
**Input :**
mm(1-12) :9
yy :2010
**Output :**
September 2010
Su Mo Tu We Th Fr Sa
01 02 03 04
05 06 07 08 09 10 11
12 13 14 15 16 17 18
19 20 21 22 23 24 25
26 27 28 29 30
**Approach:**
In the program below, we first calculate the number of odd days to find the
day of the date 01-mm-yyyy.Then, we take in the year(yy) and the month(mm) as
input and displays the calendar for that month of the year.
Below is the implementation of the given approach.
__
__
__
__
__
__
__
# Python code to print Calendar
# Without use of Calendar module
mm = 2
yy = 2020
month ={1:'January', 2:'February', 3:'March',
4:'April', 5:'May', 6:'June', 7:'July',
8:'August', 9:'September', 10:'October',
11:'November', 12:'December'}
# code below for calculation of odd days
day =(yy-1)% 400
day = (day//100)*5 + ((day % 100) - (day %
100)//4) + ((day % 100)//4)*2
day = day % 7
nly =[31, 28, 31, 30, 31, 30,
31, 31, 30, 31, 30, 31]
ly =[31, 29, 31, 30, 31, 30,
31, 31, 30, 31, 30, 31]
s = 0
if yy % 4 == 0:
for i in range(mm-1):
s+= ly[i]
else:
for i in range(mm-1):
s+= nly[i]
day += s % 7
day = day % 7
# variable used for white space filling
# where date not present
space =''
space = space.rjust(2, ' ')
# code below is to print the calendar
print(month[mm], yy)
print('Su', 'Mo', 'Tu', 'We', 'Th', 'Fr',
'Sa')
if mm == 9 or mm == 4 or mm == 6 or mm
== 11:
for i in range(31 + day):
if i<= day:
print(space, end =' ')
else:
print("{:02d}".format(i-day), end =' ')
if (i + 1)% 7 == 0:
print()
elif mm == 2:
if yy % 4 == 0:
p = 30
else:
p = 29
for i in range(p + day):
if i<= day:
print(space, end =' ')
else:
print("{:02d}".format(i-day), end =' ')
if (i + 1)% 7 == 0:
print()
else:
for i in range(32 + day):
if i<= day:
print(space, end =' ')
else:
print("{:02d}".format(i-day), end =' ')
if (i + 1)% 7 == 0:
print()
---
__
__
**Output:**
February 2020
Su Mo Tu We Th Fr Sa
01
02 03 04 05 06 07 08
09 10 11 12 13 14 15
16 17 18 19 20 21 22
23 24 25 26 27 28 29
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
| [
"[email protected]"
]
| |
243b00fb792df0d908725a77d369f7a886e958ca | 7319bdc1aa1edd9e37424da47264882753dda919 | /monitor_nomina.py | fde617e7fa6aa3fb079d6c0dc9c7e6ee000411ae | []
| no_license | njmube/satconnect | 4ff81ac132811d2784d82a872be34590f53021db | de421f546a6f7f4cc5f247d1b2ba91ac272bdcb9 | refs/heads/master | 2023-03-18T12:58:18.379008 | 2017-10-24T07:14:05 | 2017-10-24T07:14:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | # -*- coding: utf-8 -*-
from LibTools.filesystem import Carpeta
from slaves import SentinelNomina
import settings
if __name__ == '__main__':
carpeta = Carpeta(settings.folder_nomina)
sentinela = SentinelNomina(carpeta)
sentinela.start_Monitoring()
| [
"="
]
| = |
7091475a03d37a18e9d953f65307c93e950ce3ad | fee71dd79c16f8e4aa4be46aa25863a3e8539a51 | /ear/core/bs2051.py | 058eefc981611aa995294b0783b491c5ba08e367 | [
"BSD-3-Clause-Clear"
]
| permissive | ebu/ebu_adm_renderer | d004ed857b3004c9de336426f402654779a0eaf8 | ef2189021203101eab323e1eccdd2527b32a5024 | refs/heads/master | 2023-08-09T09:13:06.626698 | 2022-12-07T12:22:39 | 2022-12-07T12:22:39 | 123,921,945 | 61 | 13 | BSD-3-Clause-Clear | 2023-08-30T17:17:05 | 2018-03-05T13:15:36 | Python | UTF-8 | Python | false | false | 1,791 | py | import pkg_resources
from ..compatibility import load_yaml
from .geom import PolarPosition
from .layout import Channel, Layout
def _dict_to_channel(d):
position = PolarPosition(azimuth=d["position"]["az"],
elevation=d["position"]["el"],
distance=1.0)
return Channel(
name=d["name"],
is_lfe=d.get("is_lfe", False),
polar_position=position,
polar_nominal_position=position,
az_range=tuple(d.get("az_range", (position.azimuth, position.azimuth))),
el_range=tuple(d.get("el_range", (position.elevation, position.elevation))),
)
def _dict_to_layout(d):
return Layout(
name=d["name"],
channels=list(map(_dict_to_channel, d["channels"])),
)
def _load_layouts():
fname = "data/2051_layouts.yaml"
with pkg_resources.resource_stream(__name__, fname) as layouts_file:
layouts_data = load_yaml(layouts_file)
layouts = list(map(_dict_to_layout, layouts_data))
for layout in layouts:
errors = []
layout.check_positions(callback=errors.append)
assert errors == []
layout_names = [layout.name for layout in layouts]
layouts_dict = {layout.name: layout for layout in layouts}
return layout_names, layouts_dict
layout_names, layouts = _load_layouts()
def get_layout(name):
"""Get data for a layout specified in BS.2051.
Parameters:
name (str): Full layout name, e.g. "4+5+0"
Returns:
Layout: object representing the layout; real speaker positions are set
to the nominal positions.
"""
if name not in layout_names:
raise KeyError("Unknown layout name '{name}'.".format(name=name))
return layouts[name]
| [
"[email protected]"
]
| |
bb98f35adc8e0f2ec79f4ea7a0b2314a9ec8bec0 | 0a85e9ecb51c89110794aeb399fc3ccc0bff8c43 | /InterviewCake/Practice Problems/reverse_string_inPlace.py | 482b60e1d1415f53519182dd35b2f0e7cd6af001 | []
| no_license | jordan-carson/Data_Structures_Algos | 6d246cd187e3c3e36763f1eedc535ae1b95c0b18 | 452bb766607963e5ab9e39a354a24ebb26ebaaf5 | refs/heads/master | 2020-12-02T23:19:11.315890 | 2020-09-15T01:23:29 | 2020-09-15T01:23:29 | 231,147,094 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py |
STRING = ['a', 'b', 'c', 'd']
def reverse_string(string_list):
left_index = 0
right_index = len(string_list) - 1
while left_index < right_index:
string_list[left_index], string_list[right_index] = \
string_list[right_index], string_list[left_index]
left_index += 1
right_index -= 1
return string_list
if __name__ == '__main__':
print(reverse_string(STRING)) | [
"[email protected]"
]
| |
7b35ac2384529e8bb902194f56b1d0d824520edc | 016109b9f052ffd037e9b21fa386b36089b05813 | /hashP4.py | 559f024f058f63f9e587e9c5a8b7a38c51b5ec47 | []
| no_license | nsshayan/DataStructuresAndAlgorithms | 9194508c5227c5c8c60b9950917a4ea8da8bbab2 | 2f7ee1bc8f4b53c35d1cce62e898a9695d99540a | refs/heads/master | 2022-09-29T21:15:33.803558 | 2022-09-08T17:14:59 | 2022-09-08T17:14:59 | 73,257,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | A,k = map(int,raw_input().rstrip().split(" "))
nos = map(int,raw_input().rstrip().split(" "))
hashMap = [0 for y in range(1000002)]
for i in range(A):
hashMap[nos[i]] += 1
left = 0
right = 1000001
flag = 0
while left < right:
if hashMap[left] == 0 or hashMap[right]==0:
while hashMap[left]==0:
left += 1
while hashMap[right] == 0:
right -= 1
if (left + right ) == k and left != right:
flag = 1
break
elif left+right > k:
right -= 1
elif left + right < k:
left += 1
if left+right == k and left == right and hashMap[left] > 1:
flag = 1
if flag == 1:
print "YES"
else :
print "NO" | [
"[email protected]"
]
| |
1628b9d704c430771ffe07895f60f69d5d03c21c | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nntwelv.py | 4e1af148e299d47bb87a0be2b829ebcc80cee86d | []
| no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 2,600 | py | ii = [('CookGHP3.py', 8), ('LyelCPG2.py', 4), ('MarrFDI.py', 2), ('RogePAV2.py', 11), ('CoolWHM2.py', 20), ('KembFFF.py', 1), ('GodwWSL2.py', 22), ('RogePAV.py', 4), ('SadlMLP.py', 6), ('WilbRLW.py', 15), ('WilbRLW4.py', 9), ('RennJIT.py', 15), ('ProuWCM.py', 4), ('AubePRP2.py', 28), ('CookGHP.py', 6), ('ShawHDE.py', 6), ('MartHSI2.py', 11), ('LeakWTI2.py', 19), ('UnitAI.py', 9), ('KembFJ1.py', 20), ('WilkJMC3.py', 7), ('WilbRLW5.py', 7), ('LeakWTI3.py', 18), ('PettTHE.py', 14), ('MarrFDI3.py', 7), ('PeckJNG.py', 19), ('BailJD2.py', 5), ('AubePRP.py', 21), ('GellWPT.py', 10), ('AdamWEP.py', 7), ('FitzRNS3.py', 37), ('WilbRLW2.py', 10), ('ClarGE2.py', 54), ('GellWPT2.py', 7), ('WilkJMC2.py', 5), ('CarlTFR.py', 93), ('SeniNSP.py', 4), ('LyttELD.py', 1), ('CoopJBT2.py', 1), ('GrimSLE.py', 1), ('RoscTTI3.py', 2), ('AinsWRR3.py', 4), ('CookGHP2.py', 4), ('KiddJAE.py', 6), ('RoscTTI2.py', 2), ('CoolWHM.py', 27), ('MarrFDI2.py', 2), ('CrokTPS.py', 7), ('ClarGE.py', 47), ('LandWPA.py', 1), ('BuckWGM.py', 13), ('IrviWVD.py', 9), ('LyelCPG.py', 41), ('GilmCRS.py', 5), ('DaltJMA.py', 12), ('WestJIT2.py', 23), ('DibdTRL2.py', 17), ('AinsWRR.py', 2), ('CrocDNL.py', 9), ('MedwTAI.py', 18), ('WadeJEB.py', 38), ('FerrSDO2.py', 2), ('TalfTIT.py', 1), ('NewmJLP.py', 3), ('GodwWLN.py', 10), ('CoopJBT.py', 1), ('KirbWPW2.py', 6), ('SoutRD2.py', 4), ('BackGNE.py', 22), ('LeakWTI4.py', 29), ('LeakWTI.py', 26), ('MedwTAI2.py', 9), ('BachARE.py', 133), ('SoutRD.py', 6), ('DickCSG.py', 1), ('BuckWGM2.py', 2), ('WheeJPT.py', 27), ('MereHHB3.py', 37), ('HowiWRL2.py', 14), ('BailJD3.py', 1), ('MereHHB.py', 31), ('WilkJMC.py', 24), ('HogaGMM.py', 15), ('MartHRW.py', 9), ('MackCNH.py', 11), ('WestJIT.py', 16), ('BabbCEM.py', 25), ('FitzRNS4.py', 21), ('CoolWHM3.py', 14), ('DequTKM.py', 9), ('FitzRNS.py', 47), ('BentJRP.py', 3), ('LyttELD3.py', 2), ('RoscTTI.py', 11), ('ThomGLG.py', 11), ('StorJCC.py', 16), ('KembFJ2.py', 20), ('LewiMJW.py', 20), ('BabbCRD.py', 3), ('MackCNH2.py', 13), ('JacoWHI2.py', 34), ('SomeMMH.py', 8), ('HaliTBC.py', 1), ('WilbRLW3.py', 20), ('MereHHB2.py', 13), ('BrewDTO.py', 2), ('JacoWHI.py', 29), ('ClarGE3.py', 31), ('RogeSIP.py', 10), ('MartHRW2.py', 8), ('DibdTRL.py', 19), ('FitzRNS2.py', 43), ('HogaGMM2.py', 5), ('MartHSI.py', 10), ('EvarJSP.py', 7), ('DwigTHH.py', 6), ('NortSTC.py', 1), ('SadlMLP2.py', 4), ('BowrJMM2.py', 4), ('LyelCPG3.py', 11), ('BowrJMM3.py', 3), ('BeckWRE.py', 2), ('TaylIF.py', 5), ('WordWYR.py', 1), ('DibdTBR.py', 1), ('ThomWEC.py', 3), ('KeigTSS.py', 20), ('KirbWPW.py', 4), ('WaylFEP.py', 9), ('ClarGE4.py', 77), ('HowiWRL.py', 16)] | [
"[email protected]"
]
| |
2d04eb4a6d7119cd114da0714ffeaa23551be0a1 | ad5ad404d24f1ef195d069b2e9d36b1a22cfd25d | /kde/applications/kiten/kiten.py | 68d4236f5c283e083b03af733ec7b7b92ed78a0d | [
"BSD-2-Clause"
]
| permissive | arruor/craft-blueprints-kde | 6643941c87afd09f20dd54635022d8ceab95e317 | e7e2bef76d8efbc9c4b84411aa1e1863ac8633c1 | refs/heads/master | 2020-03-22T17:54:38.445587 | 2018-07-10T11:47:21 | 2018-07-10T11:47:21 | 140,423,580 | 0 | 0 | null | 2018-07-10T11:43:08 | 2018-07-10T11:43:07 | null | UTF-8 | Python | false | false | 1,228 | py | import info
class subinfo(info.infoclass):
def setTargets(self):
self.versionInfo.setDefaultValues()
self.description = "Kiten"
def setDependencies(self):
self.runtimeDependencies["virtual/base"] = "default"
self.buildDependencies["kde/frameworks/extra-cmake-modules"] = "default"
self.runtimeDependencies["libs/qt5/qtbase"] = "default"
self.runtimeDependencies["kde/frameworks/tier1/karchive"] = "default"
self.runtimeDependencies["kde/frameworks/tier2/kcompletion"] = "default"
self.runtimeDependencies["kde/frameworks/tier1/kconfig"] = "default"
self.runtimeDependencies["kde/frameworks/tier1/kcoreaddons"] = "default"
self.runtimeDependencies["kde/frameworks/tier2/kcrash"] = "default"
self.runtimeDependencies["kde/frameworks/tier2/kdoctools"] = "default"
self.runtimeDependencies["kde/frameworks/tier1/ki18n"] = "default"
self.runtimeDependencies["kde/frameworks/tier3/khtml"] = "default"
self.runtimeDependencies["kde/frameworks/tier3/kxmlgui"] = "default"
from Package.CMakePackageBase import *
class Package(CMakePackageBase):
def __init__(self):
CMakePackageBase.__init__(self)
| [
"[email protected]"
]
| |
7c5e77e8e8708914b94c95c7da9fc3574ad25c8c | a14795a79fd8f39cede7fa5eb86f9717b5c289c2 | /backend/course/api/v1/serializers.py | 977b3866deffb183b0133225485e9b022f8b7e3e | []
| no_license | crowdbotics-apps/dearfuturescientist-21123 | fcdbe95a9cd9e8713198b6accbeeb56aa5b0b2d4 | 5b282411ebaf39580b938f6678afc8a36e34aba4 | refs/heads/master | 2022-12-30T20:23:25.888830 | 2020-10-05T19:00:56 | 2020-10-05T19:00:56 | 301,510,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,622 | py | from rest_framework import serializers
from course.models import (
Recording,
Event,
Subscription,
Course,
Group,
Module,
PaymentMethod,
SubscriptionType,
Enrollment,
Lesson,
Category,
)
class LessonSerializer(serializers.ModelSerializer):
class Meta:
model = Lesson
fields = "__all__"
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = "__all__"
class SubscriptionTypeSerializer(serializers.ModelSerializer):
class Meta:
model = SubscriptionType
fields = "__all__"
class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = Group
fields = "__all__"
class EventSerializer(serializers.ModelSerializer):
class Meta:
model = Event
fields = "__all__"
class RecordingSerializer(serializers.ModelSerializer):
class Meta:
model = Recording
fields = "__all__"
class PaymentMethodSerializer(serializers.ModelSerializer):
class Meta:
model = PaymentMethod
fields = "__all__"
class CourseSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = "__all__"
class SubscriptionSerializer(serializers.ModelSerializer):
class Meta:
model = Subscription
fields = "__all__"
class EnrollmentSerializer(serializers.ModelSerializer):
class Meta:
model = Enrollment
fields = "__all__"
class ModuleSerializer(serializers.ModelSerializer):
class Meta:
model = Module
fields = "__all__"
| [
"[email protected]"
]
| |
94495ae9bda52bd44a846dc64ca184a3dab2436d | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/KISS/testcase/firstcases/testcase9_006.py | 61760f5dab43c2ef13a77980e6ed785b691254ad | []
| no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,084 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'fr.neamar.kiss',
'appActivity' : 'fr.neamar.kiss.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'fr.neamar.kiss/fr.neamar.kiss.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase006
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().resourceId(\"fr.neamar.kiss:id/menuButton\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Device settings\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"9_006\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'fr.neamar.kiss'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"[email protected]"
]
| |
263ca80ed3ebdcc465692fef40cd71b494ac004c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03807/s835726532.py | c603899438bd501bb5871b424daa8724dfe35dfc | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | N = int(input())
a = list(map(int,input().split()))
odd = 0
for i in range(N):
if a[i] % 2:
odd += 1
if odd % 2:
print('NO')
else:
print('YES') | [
"[email protected]"
]
| |
fcb2745a3b28acb9bdab55a49b61a805e5d2198f | 55493112595d303d39b90ca9112e1d0a52f435e4 | /WorkforceManagement/views/Computer_View.py | 4fc447fa4d1e6adaa0a611f92c7069d1ab909d56 | []
| no_license | NSS-Spontaneous-Spoonbills/Sprint2 | a06c2ea08dbe58289984591b5ef412242924f86f | 7fd603ee531556b32b100c5a9f109b0e9207f369 | refs/heads/master | 2020-03-25T11:38:55.449223 | 2018-08-13T21:00:35 | 2018-08-13T21:00:35 | 143,741,505 | 0 | 1 | null | 2018-08-13T21:26:08 | 2018-08-06T14:38:30 | Python | UTF-8 | Python | false | false | 1,944 | py | from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from WorkforceManagement.models import Computer
from WorkforceManagement.forms import *
def Computer_List_View(request):
"""Displays all computers in the database
Author: Erin Meaker
"""
computers = Computer.objects.all()
return render(request, 'WorkforceManagement/Computer_List.html', {'computers': computers})
def Computer_Detail_View(request, pk):
"""Displays details about a specific computer
Author: Erin Meaker
"""
computer = get_object_or_404(Computer, pk=pk)
return render(request, 'WorkforceManagement/Computer_Detail.html', {'computer': computer})
def Computer_New_View(request):
"""Displays form for adding new computer to the database
Author: Erin Meaker
"""
if request.method == "POST":
form = Computer_New_Form(request.POST)
new_comp = form.save(commit=False)
new_comp.save()
return redirect('computer_detail', pk=new_comp.pk)
else:
form = Computer_New_Form()
return render(request, 'WorkforceManagement/Computer_Update.html', {'form': form})
def Computer_Update_View(request, pk):
"""Displays form for updating the computers
Author: Erin Meaker
"""
computer = get_object_or_404(Computer, pk=pk)
if request.method == "POST":
form = Computer_Update_Form(request.POST, instance=computer)
computer = form.save(commit=False)
computer.save()
return redirect('computer_detail', pk=computer.pk)
else:
form = Computer_Update_Form(instance=computer)
return render(request, 'WorkforceManagement/Computer_Update.html', {'form': form})
def Computer_Delete_View(request, pk):
"""Displays template for deleting a computer
Author: Erin Meaker"""
computer = get_object_or_404(Computer, pk=pk)
computer.delete()
return redirect('computer_list')
| [
"[email protected]"
]
| |
0f20818aacacd277b492468e80b7128771cc7584 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_97/1704.py | 2ef79a2cad74434c186149c67d373ceeab96e152 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | def areRecycled(number1, number2):
recycled = False
numero1 = number1
for i in range(len(number2)):
numero1.insert(0,numero1.pop())
if numero1 == number2:
return True
return False
archi = open("C-small-attempt2.in","r")
cant = open("output.dat","w")
cases = int(archi.readline().split()[0])
for i in range(cases):
cont = 0
label = "Case #" + str(i+1) + ": "
numeros = archi.readline().replace('\n','').split(" ")
limInferior = int(numeros[0])
limSuperior = int(numeros[1])
j=limInferior
while j < limSuperior:
k=j+1;
while k<= limSuperior:
if areRecycled(list(str(k)),list(str(j))):
cont = cont + 1
k = k + 1
j = j + 1
label = label + str(cont) + '\n'
cant.writelines(label)
| [
"[email protected]"
]
| |
4d7ab7bfcefd8572eb06e3978ebf7097d6c4a4f4 | 232fc2c14942d3e7e28877b502841e6f88696c1a | /dizoo/multiagent_particle/config/cooperative_navigation_collaq_config.py | 59f41109f0f514f61ca8866df2a01ca581003b23 | [
"Apache-2.0"
]
| permissive | shengxuesun/DI-engine | ebf84221b115b38b4b3fdf3079c66fe81d42d0f7 | eb483fa6e46602d58c8e7d2ca1e566adca28e703 | refs/heads/main | 2023-06-14T23:27:06.606334 | 2021-07-12T12:36:18 | 2021-07-12T12:36:18 | 385,454,483 | 1 | 0 | Apache-2.0 | 2021-07-13T02:56:27 | 2021-07-13T02:56:27 | null | UTF-8 | Python | false | false | 2,129 | py | from easydict import EasyDict
n_agent = 5
num_landmarks = n_agent
collector_env_num = 4
evaluator_env_num = 2
cooperative_navigation_collaq_config = dict(
env=dict(
n_agent=n_agent,
num_landmarks=num_landmarks,
max_step=100,
collector_env_num=collector_env_num,
evaluator_env_num=evaluator_env_num,
manager=dict(shared_memory=False, ),
n_evaluator_episode=5,
stop_value=0,
),
policy=dict(
cuda=True,
on_policy=True,
model=dict(
agent_num=n_agent,
obs_shape=2 + 2 + (n_agent - 1) * 2 + num_landmarks * 2,
alone_obs_shape=2 + 2 + (num_landmarks) * 2,
global_obs_shape=n_agent * 2 + num_landmarks * 2 + n_agent * 2,
action_shape=5,
hidden_size_list=[128, 128, 64],
attention=True,
self_feature_range=[2, 4], # placeholder
ally_feature_range=[4, n_agent * 2 + 2], # placeholder
attention_size=32,
),
agent_num=n_agent,
learn=dict(
update_per_collect=100,
batch_size=32,
learning_rate=0.0001,
target_update_theta=0.001,
discount_factor=0.99,
),
collect=dict(
n_sample=600,
unroll_len=16,
env_num=collector_env_num,
),
eval=dict(env_num=evaluator_env_num, ),
other=dict(eps=dict(
type='exp',
start=1.0,
end=0.05,
decay=100000,
), ),
),
)
cooperative_navigation_collaq_config = EasyDict(cooperative_navigation_collaq_config)
main_config = cooperative_navigation_collaq_config
cooperative_navigation_collaq_create_config = dict(
env=dict(
import_names=['dizoo.multiagent_particle.envs.particle_env'],
type='cooperative_navigation',
),
env_manager=dict(type='subprocess'),
policy=dict(type='collaq'),
)
cooperative_navigation_collaq_create_config = EasyDict(cooperative_navigation_collaq_create_config)
create_config = cooperative_navigation_collaq_create_config
| [
"[email protected]"
]
| |
984769b8bfd917b7f3a450664dda8ca833caabdc | b7f3edb5b7c62174bed808079c3b21fb9ea51d52 | /components/safe_browsing/content/web_ui/DEPS | c4dfe28ac40a5b9fd60086f5f0bb2d45f1b6d99f | [
"BSD-3-Clause"
]
| permissive | otcshare/chromium-src | 26a7372773b53b236784c51677c566dc0ad839e4 | 64bee65c921db7e78e25d08f1e98da2668b57be5 | refs/heads/webml | 2023-03-21T03:20:15.377034 | 2020-11-16T01:40:14 | 2020-11-16T01:40:14 | 209,262,645 | 18 | 21 | BSD-3-Clause | 2023-03-23T06:20:07 | 2019-09-18T08:52:07 | null | UTF-8 | Python | false | false | 409 | include_rules = [
"+components/enterprise/common/proto/connectors.pb.h",
"+components/grit/components_resources.h",
"+components/password_manager/core/browser/hash_password_manager.h",
"+components/user_prefs",
"+components/safe_browsing/core/proto/csd.pb.h",
"+components/strings/grit/components_strings.h",
"+components/grit/components_scaled_resources.h",
"+components/safe_browsing_db",
]
| [
"[email protected]"
]
| ||
699b7062a1c9a0e705a481a5c8cf42e5a18dc7f6 | ef20884169d10ec9ac4d1d3b77ee35245d248294 | /practice/first_step_with_tensorflow/kmean_create_data.py | b95cc97c8c9d36f85fbdcbe9af721f29fd09ec7d | []
| no_license | heaven324/Deeplearning | 64016671879cdf1742eff6f374cfb640cfc708ae | a7a8d590fa13f53348f83f8c808538affbc7b3e8 | refs/heads/master | 2023-05-05T08:54:27.888155 | 2021-05-22T08:25:47 | 2021-05-22T08:25:47 | 188,010,607 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | import numpy as np
num_points = 2000
vectors_set = []
for i in range(num_points):
if np.random.random() > 0.5:
vectors_set.append([np.random.normal(0.0, 0.9), np.random.normal(0.0, 0.9)])
else:
vectors_set.append([np.random.normal(3.0, 0.5), np.random.normal(1.0, 0.5)])
# 난수 생성 확인
#print(vectors_set) | [
"[email protected]"
]
| |
83c63b60c22628725f344b1bf4635e30bbf5aae9 | 577fd6f5ce00ba4b530937e84f3b426b30cd9d08 | /Checkiolearn/Polygon/sun_angle.py | ecd226f204d9bf718eb6cd5d5451c14c7f50b0f1 | []
| no_license | YxiangJ/Python | 33e2d0d4c26ce35ccd3504b73de15e45adb6946c | bcb1a0ace39fbcbe868a341652085c0ddf307c17 | refs/heads/master | 2018-09-24T08:24:13.692535 | 2018-06-07T01:11:00 | 2018-06-07T01:11:00 | 126,120,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | def sun_angle(time):
# replace this for solution
l = time.split(':')
result = (int(l[0]) - 6) * 15 + int(l[1]) / 4
if int(l[0]) > 18 or int(l[0]) < 6:
return "I don't see the sun!"
else:
return result
if __name__ == '__main__':
print("Example:")
print(sun_angle("07:00"))
# These "asserts" using only for self-checking and not necessary for auto-testing
assert sun_angle("07:00") == 15
assert sun_angle("01:23") == "I don't see the sun!"
print("Coding complete? Click 'Check' to earn cool rewards!")
| [
"[email protected]"
]
| |
c14d81b13ff0bfca027e09587f8f586914771894 | 8051c715e86095c1a0f2d6dcee78150417562d00 | /app/api/response_api.py | 8ea2f772957ae7aa5d8b6a8b84bed6bcac25e956 | [
"BSD-3-Clause"
]
| permissive | minkione/Apfell | 45bd47249afa59389ab8237558c52d3f083cae29 | 096b6524c44b0673f11d18bd2388193d074380d6 | refs/heads/master | 2020-03-28T12:22:37.741190 | 2018-09-10T02:42:06 | 2018-09-10T02:42:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,766 | py | from app import apfell, db_objects
from sanic.response import json
from app.database_models.model import Task, Response
import base64
from sanic_jwt.decorators import protected, inject_user
from app.api.file_api import create_filemeta_in_database_func, download_file_to_database_func
import json as js
# This gets all responses in the database
@apfell.route(apfell.config['API_BASE'] + "/responses/", methods=['GET'])
@inject_user()
@protected()
async def get_all_responses(request, user):
try:
all_responses = await db_objects.execute(Response.select())
except Exception as e:
return json({'status': 'error',
'error': 'Cannot get responses'})
return json([c.to_json() for c in all_responses])
# Get a single response
@apfell.route(apfell.config['API_BASE'] + "/response/<rid:int>", methods=['GET'])
@inject_user()
@protected()
async def get_one_response(request, user, rid):
try:
resp = await db_objects.get(Response, id=rid)
except Exception as e:
return json({'status': 'error', 'error': 'Cannot get that response'})
return json(resp.to_json())
# implant calling back to update with base64 encoded response from executing a task
# We don't add @protected or @injected_user here because the callback needs to be able to post here for responses
@apfell.route(apfell.config['API_BASE'] + "/responses/<tid:int>", methods=['POST'])
async def update_task_for_callback(request, tid):
data = request.json
decoded = base64.b64decode(data['response']).decode("utf-8")
try:
task = await db_objects.get(Task, id=tid)
except Exception as e:
return json({'status': 'error',
'error': 'Task does not exist'})
try:
if 'response' not in data:
return json({'status': 'error', 'error': 'task response not in data'})
if task.command.cmd == "download":
try:
download_response = js.loads(decoded)
if 'total_chunks' in download_response:
return await create_filemeta_in_database_func(download_response)
elif 'chunk_data' in download_response:
return await download_file_to_database_func(download_response)
except Exception as e:
pass
resp = await db_objects.create(Response, task=task, response=decoded)
task.status = "processed"
await db_objects.update(task)
status = {'status': 'success'}
resp_json = resp.to_json()
return json({**status, **resp_json}, status=201)
except Exception as e:
print(e)
return json({'status': 'error',
'error': 'Failed to update task',
'msg': str(e)})
| [
"[email protected]"
]
| |
e3e25ce23370e068912110921559d559bca593e6 | 1a5c27bc6e2d39a258dd517d2dc3570c13e42a70 | /flaskext/utils.py | ff2d1dcf02a01b52fcfe2121292f09a4dde4989a | [
"MIT"
]
| permissive | fumingshih/flask-peewee | 0f8e169ca7ab2d7ab437a5620a2ff2f082d668dd | 4f44ec5583abba5099880a2a2af76404223a594b | refs/heads/master | 2021-01-18T11:00:19.120283 | 2011-11-09T14:36:02 | 2011-11-09T14:36:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,083 | py | import math
import random
import re
import sys
from hashlib import sha1
from flask import abort, request, render_template
from peewee import Model, DoesNotExist, SelectQuery
def get_object_or_404(query_or_model, **query):
try:
return query_or_model.get(**query)
except DoesNotExist:
abort(404)
def object_list(template_name, qr, var_name='object_list', **kwargs):
pq = PaginatedQuery(qr, kwargs.pop('paginate_by', 20))
kwargs[var_name] = pq.get_list()
return render_template(template_name, pagination=pq, page=pq.get_page(), **kwargs)
class PaginatedQuery(object):
page_var = 'page'
def __init__(self, query_or_model, paginate_by):
self.paginate_by = paginate_by
if isinstance(query_or_model, SelectQuery):
self.query = query_or_model
self.model = self.query.model
else:
self.model = query_or_model
self.query = self.model.select()
def get_page(self):
return int(request.args.get(self.page_var) or 1)
def get_pages(self):
return math.ceil(float(self.query.count()) / self.paginate_by)
def get_list(self):
return self.query.paginate(self.get_page(), self.paginate_by)
def get_next():
if not request.query_string:
return request.path
return '%s?%s' % (request.path, request.query_string)
def slugify(s):
return re.sub('[^a-z0-9_\-]+', '-', s.lower())
def load_class(s):
path, klass = s.rsplit('.', 1)
__import__(path)
mod = sys.modules[path]
return getattr(mod, klass)
# borrowing these methods, slightly modified, from django.contrib.auth
def get_hexdigest(salt, raw_password):
return sha1(salt + raw_password).hexdigest()
def make_password(raw_password):
salt = get_hexdigest(str(random.random()), str(random.random()))[:5]
hsh = get_hexdigest(salt, raw_password)
return '%s$%s' % (salt, hsh)
def check_password(raw_password, enc_password):
salt, hsh = enc_password.split('$', 1)
return hsh == get_hexdigest(salt, raw_password)
| [
"[email protected]"
]
| |
ba897465ddc7bea4ef33e45bb292ec6dcdea5381 | 392495a85f77e72e7c3562576aa362d7860c17ee | /backend/setup.py | 244a73c8d40ebb0836da93cb7d08757fdc76199d | []
| no_license | messa/aiohttp-nextjs-graphql-demo-forum | ef51c26720a6f67a36f08d5caeba4e2d9bef0332 | 38fb66d011faec881b184e132aa7347517ee99e6 | refs/heads/master | 2020-04-16T22:38:08.171305 | 2019-02-04T02:18:35 | 2019-02-04T02:18:35 | 165,976,811 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | #!/usr/bin/env python3
from setuptools import setup, find_packages
setup(
name='forum-backend',
version='0.0.1',
packages=find_packages(exclude=['doc', 'tests*']),
install_requires=[
'aiohttp',
'aiohttp-graphql',
'pyyaml',
],
entry_points={
'console_scripts': [
'forum-backend=forum_backend:main',
],
})
| [
"[email protected]"
]
| |
410aa5e90d452ce0c150cc25c78df4ee555a14c6 | 20c20938e201a0834ccf8b5f2eb5d570d407ad15 | /abc094/arc095_a/7981214.py | 7f6c3dcab7bd1fb3884adf64c039c5841bf608cf | []
| no_license | kouhei-k/atcoder_submissions | 8e1a1fb30c38e0d443b585a27c6d134bf1af610a | 584b4fd842ccfabb16200998fe6652f018edbfc5 | refs/heads/master | 2021-07-02T21:20:05.379886 | 2021-03-01T12:52:26 | 2021-03-01T12:52:26 | 227,364,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | N=int(input())
a=list(map(int,input().split()))
b=sorted(a)
ans=[b[N//2],b[(N//2) -1]]
for i in range(N):
if a[i] >= ans[0]:
print(ans[1])
else:
print(ans[0])
| [
"[email protected]"
]
| |
8e8516567da050393095124d42c7601023b8cc02 | 70eef679af91823579963dfb33eb94358353da87 | /evennia/utils/inlinefunc.py | 8cc14a95895b753b1f9faddf1c1a673ef8191dc1 | [
"BSD-3-Clause"
]
| permissive | Pinacolada64/evennia | 17e68f4f6b7ddcb4891256ceab2fbf02d185b9db | ed1b3ee8195cb93cd3382625d8d20d83d63c5322 | refs/heads/master | 2020-04-30T01:39:53.499431 | 2016-02-26T11:02:20 | 2016-02-26T11:02:20 | 52,920,172 | 1 | 0 | null | 2016-03-02T00:15:02 | 2016-03-02T00:15:02 | null | UTF-8 | Python | false | false | 8,450 | py | """
Inlinefunc
**Note: This module is deprecated. Use evennia.utils.nested_inlinefuncs instead.**
This is a simple inline text language for use to custom-format text in
Evennia. It is applied BEFORE ANSI/MUX parsing is applied.
To activate Inlinefunc, settings.INLINEFUNC_ENABLED must be set.
The format is straightforward:
{funcname([arg1,arg2,...]) text {/funcname
Example:
"This is {pad(50,c,-) a center-padded text{/pad of width 50."
->
"This is -------------- a center-padded text--------------- of width 50."
This can be inserted in any text, operated on by the parse_inlinefunc
function. funcname() (no space is allowed between the name and the
argument tuple) is picked from a selection of valid functions from
settings.INLINEFUNC_MODULES.
Commands can be nested, and will applied inside-out. For correct
parsing their end-tags must match the starting tags in reverse order.
Example:
"The time is {pad(30){time(){/time{/padright now."
->
"The time is Oct 25, 11:09 right now."
An inline function should have the following call signature:
def funcname(text, *args, **kwargs)
where the text is always the part between {funcname(args) and
{/funcname and the *args are taken from the appropriate part of the
call. It is important that the inline function properly clean the
incoming args, checking their type and replacing them with sane
defaults if needed. If impossible to resolve, the unmodified text
should be returned. The inlinefunc should never cause a traceback.
"""
import re
from django.conf import settings
from evennia.utils import utils, logger
_DEFAULT_WIDTH = settings.CLIENT_DEFAULT_WIDTH
# inline functions
def pad(text, *args, **kwargs):
"""
Pad to width. pad(text, width=78, align='c', fillchar=' ')
"""
width = _DEFAULT_WIDTH
align = 'c'
fillchar = ' '
for iarg, arg in enumerate(args):
if iarg == 0:
width = int(arg) if arg.strip().isdigit() else width
elif iarg == 1:
align = arg if arg in ('c', 'l', 'r') else align
elif iarg == 2:
fillchar = arg[0]
else:
break
return utils.pad(text, width=width, align=align, fillchar=fillchar)
def crop(text, *args, **kwargs):
"""
Crop to width. crop(text, width=78, suffix='[...]')
"""
width = _DEFAULT_WIDTH
suffix = "[...]"
for iarg, arg in enumerate(args):
if iarg == 0:
width = int(arg) if arg.strip().isdigit() else width
elif iarg == 1:
suffix = arg
else:
break
return utils.crop(text, width=width, suffix=suffix)
def wrap(text, *args, **kwargs):
"""
Wrap/Fill text to width. fill(text, width=78, indent=0)
"""
width = _DEFAULT_WIDTH
indent = 0
for iarg, arg in enumerate(args):
if iarg == 0:
width = int(arg) if arg.strip().isdigit() else width
elif iarg == 1:
indent = int(arg) if arg.isdigit() else indent
return utils.wrap(text, width=width, indent=indent)
def time(text, *args, **kwargs):
"""
Inserts current time.
"""
import time
strformat = "%h %d, %H:%M"
if args and args[0]:
strformat = str(args[0])
return time.strftime(strformat)
def you(text, *args, **kwargs):
"""
Inserts your name.
"""
name = "You"
sess = kwargs.get("session")
if sess and sess.puppet:
name = sess.puppet.key
return name
# load functions from module (including this one, if using default settings)
_INLINE_FUNCS = {}
for module in utils.make_iter(settings.INLINEFUNC_MODULES):
_INLINE_FUNCS.update(utils.all_from_module(module))
_INLINE_FUNCS.pop("inline_func_parse", None)
# dynamically build regexes for found functions
_RE_FUNCFULL = r"\{%s\((.*?)\)(.*?){/%s"
_RE_FUNCFULL_SINGLE = r"\{%s\((.*?)\)"
_RE_FUNCSTART = r"\{((?:%s))"
_RE_FUNCEND = r"\{/((?:%s))"
_RE_FUNCSPLIT = r"(\{/*(?:%s)(?:\(.*?\))*)"
_RE_FUNCCLEAN = r"\{%s\(.*?\)|\{/%s"
_INLINE_FUNCS = dict((key, (func, re.compile(_RE_FUNCFULL % (key, key), re.DOTALL & re.MULTILINE),
re.compile(_RE_FUNCFULL_SINGLE % key, re.DOTALL & re.MULTILINE)))
for key, func in _INLINE_FUNCS.items() if callable(func))
_FUNCSPLIT_REGEX = re.compile(_RE_FUNCSPLIT % r"|".join([key for key in _INLINE_FUNCS]), re.DOTALL & re.MULTILINE)
_FUNCSTART_REGEX = re.compile(_RE_FUNCSTART % r"|".join([key for key in _INLINE_FUNCS]), re.DOTALL & re.MULTILINE)
_FUNCEND_REGEX = re.compile(_RE_FUNCEND % r"|".join([key for key in _INLINE_FUNCS]), re.DOTALL & re.MULTILINE)
_FUNCCLEAN_REGEX = re.compile("|".join([_RE_FUNCCLEAN % (key, key) for key in _INLINE_FUNCS]), re.DOTALL & re.MULTILINE)
# inline parser functions
def _execute_inline_function(funcname, text, session):
"""
Get the enclosed text between {funcname(...) and {/funcname
and execute the inline function to replace the whole block
with the result.
Args:
funcname (str): Inlinefunction identifier.
text (str): Text to process.
session (Session): Session object.
Notes:
This lookup is "dumb" - we just grab the first end tag we find. So
to work correctly this function must be called "inside out" on a
nested function tree, so each call only works on a "flat" tag.
"""
def subfunc(match):
"""
replace the entire block with the result of the function call
"""
args = [part.strip() for part in match.group(1).split(",")]
intext = match.group(2)
kwargs = {"session":session}
return _INLINE_FUNCS[funcname][0](intext, *args, **kwargs)
return _INLINE_FUNCS[funcname][1].sub(subfunc, text)
def _execute_inline_single_function(funcname, text, session):
"""
Get the arguments of a single function call (no matching end tag)
and execute it with an empty text input.
Args:
funcname (str): Function identifier.
text (str): String to process.
session (Session): Session id.
"""
def subfunc(match):
"replace the single call with the result of the function call"
args = [part.strip() for part in match.group(1).split(",")]
kwargs = {"session":session}
return _INLINE_FUNCS[funcname][0]("", *args, **kwargs)
return _INLINE_FUNCS[funcname][2].sub(subfunc, text)
def parse_inlinefunc(text, strip=False, session=None):
"""
Parse inline function-replacement.
Args:
text (str): Text to parse.
strip (bool, optional): Remove all supported inlinefuncs from text.
session (bool): Session calling for the parsing.
Returns:
text (str): Parsed text with processed results of
inlinefuncs.
"""
if strip:
# strip all functions
return _FUNCCLEAN_REGEX.sub("", text)
stack = []
for part in _FUNCSPLIT_REGEX.split(text):
endtag = _FUNCEND_REGEX.match(part)
if endtag:
# an end tag
endname = endtag.group(1)
while stack:
new_part = stack.pop()
part = new_part + part # add backwards -> fowards
starttag = _FUNCSTART_REGEX.match(new_part)
if starttag:
startname = starttag.group(1)
if startname == endname:
part = _execute_inline_function(startname, part, session)
break
stack.append(part)
# handle single functions without matching end tags; these are treated
# as being called with an empty string as text argument.
outstack = []
for part in _FUNCSPLIT_REGEX.split("".join(stack)):
starttag = _FUNCSTART_REGEX.match(part)
if starttag:
logger.log_dep("The {func()-style inlinefunc is deprecated. Use the $func{} form instead.")
startname = starttag.group(1)
part = _execute_inline_single_function(startname, part, session)
outstack.append(part)
return "".join(outstack)
def _test():
# this should all be handled
s = "This is a text with a{pad(78,c,-)text {pad(5)of{/pad {pad(30)nice{/pad size{/pad inside {pad(4,l)it{/pad."
s2 = "This is a text with a----------------text of nice size---------------- inside it ."
t = parse_inlinefunc(s)
assert(t == s2)
return t
| [
"[email protected]"
]
| |
63bd83adcb7f9700378098678b26a5b39b3d7a86 | 719853613b5b96f02072be1fde736d883e799f02 | /server/intrinsic/management/commands/intrinsic_import_ec2.py | a6bd9aeef70b6ccd8ad1fe6dbb896cfbc53d5e39 | [
"MIT",
"CC-BY-2.0"
]
| permissive | anmolkabra/opensurfaces | 5ba442123586533a93eb29890fa1694e3efdbfe8 | a42420083a777d7e1906506cc218f681c5cd145b | refs/heads/master | 2020-03-20T01:11:05.182880 | 2018-06-13T14:55:45 | 2018-06-13T14:55:45 | 137,068,945 | 0 | 0 | MIT | 2018-06-12T12:32:53 | 2018-06-12T12:32:52 | null | UTF-8 | Python | false | false | 1,615 | py | import glob
import time
import timeit
from django.core.management.base import BaseCommand
from intrinsic.tasks import import_ec2_task
class Command(BaseCommand):
args = ''
help = 'Import image algorithms run on ec2'
def handle(self, *args, **options):
indir = '/vol/completed-tasks'
scheduled_fnames = {}
sleep_time = 2
total_count = None
start_time = None
first = True
while True:
files = glob.glob("%s/*.pickle" % indir)
c = 0
for fname in files:
if fname in scheduled_fnames:
scheduled_fnames[fname] -= sleep_time
else:
scheduled_fnames[fname] = 0
if scheduled_fnames[fname] <= 0:
import_ec2_task.delay(fname)
scheduled_fnames[fname] = 3600
c += 1
# ignore the first time
if first:
total_count = 0
start_time = timeit.default_timer()
rate = "N/A"
first = False
else:
total_count += c
time_elapsed = max(timeit.default_timer() - start_time, 1e-3)
rate = "%.3f" % (float(total_count) / time_elapsed)
if c > 0:
sleep_time = max(sleep_time // 2, 2)
else:
sleep_time = min(sleep_time * 2, 3600)
time.sleep(sleep_time)
print "%s new files (average %s files/s); sleep for %s seconds..." % (
c, rate, sleep_time)
| [
"[email protected]"
]
| |
509ca1afcbfe5bbdeb744ed4f48259dbb9978d9f | 775d3690f09f34347e2a7918b060f9dd9f83c10d | /research/vrgripper/vrgripper_env_models.py | 36c7825380aeab82828057bd703230deea3186e6 | [
"Apache-2.0"
]
| permissive | HK2-D/tensor2robot | aa0ccc9a997ba72447a48d0dc3acf71d2f4af827 | 58d71467eecf02d3a1646d26cc9011f81753f560 | refs/heads/master | 2023-02-04T03:18:22.863436 | 2020-12-24T01:20:50 | 2020-12-24T01:21:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,221 | py | # coding=utf-8
# Copyright 2020 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""T2RModels for VRGripper env tasks."""
from typing import Callable, Dict, List, Optional, Text, Tuple
import gin
import numpy as np
from tensor2robot.layers import mdn
from tensor2robot.layers import vision_layers
from tensor2robot.meta_learning import meta_tfdata
from tensor2robot.models import abstract_model
from tensor2robot.models import regression_model
from tensor2robot.preprocessors import abstract_preprocessor
from tensor2robot.preprocessors import distortion
from tensor2robot.utils import tensorspec_utils
import tensorflow.compat.v1 as tf # tf
import tensorflow_probability as tfp
from tensorflow.contrib import layers as contrib_layers
TensorSpec = tensorspec_utils.ExtendedTensorSpec
TRAIN = tf.estimator.ModeKeys.TRAIN
PREDICT = tf.estimator.ModeKeys.PREDICT
FLOAT_DTYPES = [tf.bfloat16, tf.float32, tf.float64]
@gin.configurable
class DefaultVRGripperPreprocessor(abstract_preprocessor.AbstractPreprocessor):
"""The default VRGripperEnv preprocessor."""
def __init__(self,
src_img_res = (220, 300),
crop_size = (200, 280),
mixup_alpha = 0.0,
**kwargs):
"""Construct the preprocessor.
Args:
src_img_res: The true height and width of the image data. If the model
expects images of a different size, we automatically resize the images.
crop_size: Before resizing the image, take a crop of the image to this
height and width. Is a no-op if equal to src_img_res. Crop is done
randomly at train time, and is take from the center otherwise.
mixup_alpha: If > 0., turns on Mixup data augmentation for features and
labels.
**kwargs: Keyword args passed to parent class.
"""
super(DefaultVRGripperPreprocessor, self).__init__(**kwargs)
self._src_img_res = src_img_res
self._crop_size = crop_size
self._mixup_alpha = mixup_alpha
def get_in_feature_specification(self, mode
):
"""See base class."""
feature_spec = tensorspec_utils.copy_tensorspec(
self._model_feature_specification_fn(mode))
# Don't want to parse the original_image, since we don't want to parse it
# and we are adding this feature in preprocess_fn to satisfy the model's
# inputs.
if mode != PREDICT and 'original_image' in feature_spec:
del feature_spec['original_image']
if 'image' in feature_spec:
true_img_shape = feature_spec.image.shape.as_list()
# Overwrite the H, W dimensions.
true_img_shape[-3:-1] = self._src_img_res
feature_spec.image = TensorSpec.from_spec(
feature_spec.image, shape=true_img_shape, dtype=tf.uint8)
return tensorspec_utils.flatten_spec_structure(feature_spec)
def get_in_label_specification(self, mode
):
"""See base class."""
return tensorspec_utils.flatten_spec_structure(
self._model_label_specification_fn(mode))
def get_out_feature_specification(self, mode
):
"""See base class."""
return tensorspec_utils.flatten_spec_structure(
self._model_feature_specification_fn(mode))
def get_out_label_specification(self, mode
):
"""See base class."""
return tensorspec_utils.flatten_spec_structure(
self._model_label_specification_fn(mode))
def _preprocess_fn(
self, features,
labels,
mode
):
"""Resize images and convert them from uint8 -> float32."""
if 'image' in features:
ndim = len(features.image.shape)
is_sequence = (ndim > 4)
input_size = self._src_img_res
target_size = self._crop_size
features.original_image = features.image
features.image = distortion.preprocess_image(features.image, mode,
is_sequence, input_size,
target_size)
features.image = tf.image.convert_image_dtype(features.image, tf.float32)
out_feature_spec = self.get_out_feature_specification(mode)
if out_feature_spec.image.shape != features.image.shape:
features.image = meta_tfdata.multi_batch_apply(
tf.image.resize_images, 2, features.image,
out_feature_spec.image.shape.as_list()[-3:-1])
if self._mixup_alpha > 0. and labels and mode == TRAIN:
lmbda = tfp.distributions.Beta(
self._mixup_alpha, self._mixup_alpha).sample()
for key, x in features.items():
if x.dtype in FLOAT_DTYPES:
features[key] = lmbda * x + (1-lmbda)*tf.reverse(x, axis=[0])
if labels is not None:
for key, x in labels.items():
if x.dtype in FLOAT_DTYPES:
labels[key] = lmbda * x + (1 - lmbda) * tf.reverse(x, axis=[0])
return features, labels
@gin.configurable
class VRGripperRegressionModel(regression_model.RegressionModel):
"""Continuous regression output model for VRGripper Env."""
def __init__(self,
use_gripper_input = True,
normalize_outputs = False,
output_mean = None,
output_stddev = None,
outer_loss_multiplier = 1.,
num_mixture_components = 1,
output_mixture_sample = False,
condition_mixture_stddev = False,
episode_length = 40,
**kwargs):
"""Initialize the VRGripperRegressionModel.
Args:
use_gripper_input: If True, concatenate gripper pose with input to the
fully connected layers when predicting actions.
normalize_outputs: If True, scale actions by `output_stddev` and
translate by `output_mean`.
output_mean: The empirical mean of demonstration actions.
output_stddev: The empirical standard deviation of demonstration actions.
outer_loss_multiplier: A scaling factor for the outer loss.
num_mixture_components: The number of gaussian mixture components. Use 1
for standard mean squared error regression.
output_mixture_sample: If True (and num_mixture_components > 1), output
actions by sampling from a gaussian mixture. Otherwise, we use the mean
of the most likely component.
condition_mixture_stddev: If True, the mixture standard deviations will be
output from a neural net and thus conditioned on image/state. Otherwise,
they will simply be learned variables (unconditioned on image/state).
episode_length: The fixed length of an episode in the data.
**kwargs: Passed to parent.
Raises:
ValueError: If `output_mean` or `output_stddev` have incorrect length.
"""
super(VRGripperRegressionModel, self).__init__(**kwargs)
self._use_gripper_input = use_gripper_input
self._normalize_outputs = normalize_outputs
self._output_mean = None
self._output_stddev = None
self._outer_loss_multiplier = outer_loss_multiplier
self._num_mixture_components = num_mixture_components
self._output_mixture_sample = output_mixture_sample
self._condition_mixture_stddev = condition_mixture_stddev
self._episode_length = episode_length
if output_mean and output_stddev:
if not len(output_mean) == len(output_stddev) == self.action_size:
raise ValueError(
'Output mean and stddev have lengths {:d} and {:d}.'.format(
len(output_mean), len(output_stddev)))
self._output_mean = np.array([output_mean])
self._output_stddev = np.array([output_stddev])
@property
def default_preprocessor_cls(self):
return DefaultVRGripperPreprocessor
def get_feature_specification(self, mode):
del mode
image_spec = TensorSpec(
shape=(100, 100, 3),
dtype=tf.float32,
name='image0',
data_format='jpeg')
gripper_pose_spec = TensorSpec(
shape=(14,), dtype=tf.float32, name='world_pose_gripper')
tspec = tensorspec_utils.TensorSpecStruct(
image=image_spec, gripper_pose=gripper_pose_spec)
return tensorspec_utils.copy_tensorspec(
tspec, batch_size=self._episode_length)
def get_label_specification(self, mode):
del mode
action_spec = TensorSpec(
shape=(self._action_size,), dtype=tf.float32, name='action_world')
tspec = tensorspec_utils.TensorSpecStruct(action=action_spec)
return tensorspec_utils.copy_tensorspec(
tspec, batch_size=self._episode_length)
@property
def action_size(self):
return self._action_size
def _single_batch_a_func(self,
features,
scope,
mode,
context_fn=None,
reuse=tf.AUTO_REUSE):
"""A state -> action regression function that expects a single batch dim."""
gripper_pose = features.gripper_pose if self._use_gripper_input else None
with tf.variable_scope(scope, reuse=reuse, use_resource=True):
with tf.variable_scope('state_features', reuse=reuse, use_resource=True):
feature_points, end_points = vision_layers.BuildImagesToFeaturesModel(
features.image,
is_training=(mode == TRAIN),
normalizer_fn=contrib_layers.layer_norm)
if context_fn:
feature_points = context_fn(feature_points)
fc_input = tf.concat([feature_points, gripper_pose], -1)
outputs = {}
if self._num_mixture_components > 1:
dist_params = mdn.predict_mdn_params(
fc_input,
self._num_mixture_components,
self._action_size,
condition_sigmas=self._condition_mixture_stddev)
gm = mdn.get_mixture_distribution(
dist_params, self._num_mixture_components, self._action_size,
self._output_mean if self._normalize_outputs else None)
if self._output_mixture_sample:
# Output a mixture sample as action.
action = gm.sample()
else:
action = mdn.gaussian_mixture_approximate_mode(gm)
outputs['dist_params'] = dist_params
else:
action, _ = vision_layers.BuildImageFeaturesToPoseModel(
fc_input, num_outputs=self._action_size)
action = self._output_mean + self._output_stddev * action
outputs.update({
'inference_output': action,
'image': features.image,
'feature_points': feature_points,
'softmax': end_points['softmax']
})
return outputs
def a_func(self,
features,
scope,
mode,
context_fn=None,
reuse=tf.AUTO_REUSE,
config=None,
params=None):
"""A (state) regression function.
This function can return a stochastic or a deterministic tensor.
Args:
features: This is the first item returned from the input_fn and parsed by
tensorspec_utils.validate_and_pack. A spec_structure which fulfills the
requirements of the self.get_feature_spefication.
scope: String specifying variable scope.
mode: (ModeKeys) Specifies if this is training, evaluation or prediction.
context_fn: Optional python function that takes in features and returns
new features of same shape. For merging information like in RL^2.
reuse: Whether or not to reuse variables under variable scope 'scope'.
config: Optional configuration object. Will receive what is passed to
Estimator in config parameter, or the default config. Allows updating
things in your model_fn based on configuration such as num_ps_replicas,
or model_dir.
params: An optional dict of hyper parameters that will be passed into
input_fn and model_fn. Keys are names of parameters, values are basic
python types. There are reserved keys for TPUEstimator, including
'batch_size'.
Returns:
outputs: A {key: Tensor} mapping. The key 'action' is required.
"""
del config, params
return meta_tfdata.multi_batch_apply(self._single_batch_a_func, 2, features,
scope, mode, context_fn, reuse)
def loss_fn(self, labels, inference_outputs, mode, params=None):
"""This implements outer loss and configurable inner losses."""
if params and params.get('is_outer_loss', False):
pass
if self._num_mixture_components > 1:
gm = mdn.get_mixture_distribution(
inference_outputs['dist_params'], self._num_mixture_components,
self._action_size,
self._output_mean if self._normalize_outputs else None)
return -tf.reduce_mean(gm.log_prob(labels.action))
else:
return self._outer_loss_multiplier * tf.losses.mean_squared_error(
labels=labels.action,
predictions=inference_outputs['inference_output'])
@gin.configurable
class VRGripperDomainAdaptiveModel(VRGripperRegressionModel):
"""Base model which uses a learned loss to do domain adaptive imitation.
The model conditions on video only (no actions or gripper pose).
"""
def __init__(self,
predict_con_gripper_pose = False,
learned_loss_conv1d_layers = (10, 10,
6),
**kwargs):
"""Initialize the model.
Args:
predict_con_gripper_pose: If True, predict the condition gripper pose
input from the image features. Otherwise, set to zeros.
learned_loss_conv1d_layers: A tuple describing the conv1d layers of the
learned loss. If None, the learned loss won't use conv1d layers.
**kwargs: Passed to parent.
"""
super(VRGripperDomainAdaptiveModel, self).__init__(**kwargs)
self._predict_con_gripper_pose = predict_con_gripper_pose
self._learned_loss_conv1d_layers = learned_loss_conv1d_layers
def _predict_gripper_pose(self, feature_points):
"""Predict the condition gripper pose from feature points."""
out = feature_points
out = tf.layers.dense(out, 40, activation=tf.nn.relu, use_bias=False)
out = contrib_layers.layer_norm(out)
out = tf.layers.dense(out, 14, activation=None)
return out
def single_batch_a_func(
self, features, scope,
mode,
context_fn, reuse,
config,
params):
"""Single step action predictor when there is a single batch dim."""
del config
with tf.variable_scope(scope, reuse=reuse, use_resource=True):
with tf.variable_scope('state_features', reuse=reuse, use_resource=True):
feature_points, end_points = vision_layers.BuildImagesToFeaturesModel(
features.image,
is_training=(mode == TRAIN),
normalizer_fn=contrib_layers.layer_norm)
if context_fn:
feature_points = context_fn(feature_points)
if params and params.get('is_inner_loop', False):
if self._predict_con_gripper_pose:
gripper_pose = self._predict_gripper_pose(feature_points)
else:
gripper_pose = tf.zeros_like(features.gripper_pose)
else:
gripper_pose = features.gripper_pose
action, _ = vision_layers.BuildImageFeaturesToPoseModel(
feature_points, aux_input=gripper_pose, num_outputs=self._action_size)
action = self._output_mean + self._output_stddev * action
return {
'inference_output': action,
'image': features.image,
'feature_points': feature_points,
'softmax': end_points['softmax'],
}
def a_func(self,
features,
scope,
mode,
context_fn = None,
reuse=tf.AUTO_REUSE,
config = None,
params = None
):
"""Single step action predictor. See parent class."""
return meta_tfdata.multi_batch_apply(self.single_batch_a_func, 2, features,
scope, mode, context_fn, reuse, config,
params)
def model_train_fn(self,
features,
labels,
inference_outputs,
mode,
config = None,
params = None
):
"""Output learned loss if inner loop, or behavior clone if outer loop."""
if params and params.get('is_outer_loss', False):
# Outer loss case: use standard RegressionModel loss.
return self.loss_fn(labels, inference_outputs, mode, params)
# Inner loss case: compute learned loss function.
with tf.variable_scope(
'learned_loss', reuse=tf.AUTO_REUSE, use_resource=True):
predicted_action, _ = meta_tfdata.multi_batch_apply(
vision_layers.BuildImageFeaturesToPoseModel,
2,
inference_outputs['feature_points'],
num_outputs=self._action_size)
if self._learned_loss_conv1d_layers is None:
return tf.losses.mean_squared_error(predicted_action,
inference_outputs['action'])
ll_input = tf.concat([
predicted_action, inference_outputs['feature_points'],
inference_outputs['inference_output']
], -1)
net = ll_input
for num_filters in self._learned_loss_conv1d_layers[:-1]:
net = tf.layers.conv1d(
net, num_filters, 10, activation=tf.nn.relu, use_bias=False)
net = contrib_layers.layer_norm(net)
net = tf.layers.conv1d(net, self._learned_loss_conv1d_layers[-1],
1) # 1x1 convolution.
return tf.reduce_mean(tf.reduce_sum(tf.square(net), axis=(1, 2)))
| [
"[email protected]"
]
| |
7c0088fc02afdb9058cbb4fdf743efb97e73fad2 | f76f83dcdfdbfe254ab67e26b244475d2e810819 | /conttudoweb/inventory/migrations/0016_auto_20200723_1607.py | 3116c549689509a9211c9601d3096006c7d686c2 | []
| no_license | ConTTudOweb/ConTTudOwebProject | fda13ece406e1904d6efe4c3ceebd30e3d168eae | 18c3b8da1f65714eb01a420a0dbfb5305b9461f3 | refs/heads/master | 2022-12-14T22:05:00.243429 | 2021-03-15T23:32:41 | 2021-03-15T23:32:41 | 138,349,067 | 1 | 3 | null | 2022-12-08T07:49:21 | 2018-06-22T21:19:03 | Python | UTF-8 | Python | false | false | 436 | py | # Generated by Django 3.0.8 on 2020-07-23 19:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0015_auto_20200723_1600'),
]
operations = [
migrations.AlterField(
model_name='product',
name='description',
field=models.CharField(max_length=120, unique=True, verbose_name='descrição'),
),
]
| [
"[email protected]"
]
| |
1b78135398abeca244e835d6de11727d963c8134 | 49ee49ee34fa518b0df934081f5ea44a0faa3451 | /study-crow-framework/crow/examples/example_test.py | d252df0b805e995dadd5e2d37ab2bed1e000c5f6 | [
"BSD-3-Clause",
"MIT",
"ISC"
]
| permissive | kingsamchen/Eureka | a9458fcc7d955910bf2cefad3a1561cec3559702 | e38774cab5cf757ed858547780a8582951f117b4 | refs/heads/master | 2023-09-01T11:32:35.575951 | 2023-08-27T15:21:42 | 2023-08-27T15:22:31 | 42,903,588 | 28 | 16 | MIT | 2023-09-09T07:33:29 | 2015-09-22T01:27:05 | C++ | UTF-8 | Python | false | false | 1,401 | py | import urllib
assert "Hello World!" == urllib.urlopen('http://localhost:18080').read()
assert "About Crow example." == urllib.urlopen('http://localhost:18080/about').read()
assert 404 == urllib.urlopen('http://localhost:18080/list').getcode()
assert "3 bottles of beer!" == urllib.urlopen('http://localhost:18080/hello/3').read()
assert "100 bottles of beer!" == urllib.urlopen('http://localhost:18080/hello/100').read()
assert 400 == urllib.urlopen('http://localhost:18080/hello/500').getcode()
assert "3" == urllib.urlopen('http://localhost:18080/add_json', data='{"a":1,"b":2}').read()
assert "3" == urllib.urlopen('http://localhost:18080/add/1/2').read()
# test persistent connection
import socket
import time
s = socket.socket()
s.connect(('localhost', 18080))
for i in xrange(10):
s.send('''GET / HTTP/1.1
Host: localhost\r\n\r\n''');
assert 'Hello World!' in s.recv(1024)
# test large
s = socket.socket()
s.connect(('localhost', 18080))
s.send('''GET /large HTTP/1.1
Host: localhost\r\nConnection: close\r\n\r\n''')
r = ''
while True:
d = s.recv(1024*1024)
if not d:
break;
r += d
print len(r), len(d)
print len(r), r[:100]
assert len(r) > 512*1024
# test timeout
s = socket.socket()
s.connect(('localhost', 18080))
# invalid request, connection will be closed after timeout
s.send('''GET / HTTP/1.1
hHhHHefhwjkefhklwejfklwejf
''')
print s.recv(1024)
| [
"[email protected]"
]
| |
0a7c0934e651558320e4ccc999fab5b29f046a66 | 3e54f3ad08a8d3e4f17b77394491e3f625672fbe | /hybrid_AC_DC_networks/optimal_power_flows/optimal_power_flow_hybrid_AC_DC_networks.py | 56a26c844b511870eb9ff1ffdc9e590e8acb8383 | [
"MIT"
]
| permissive | shubhampachori12110095/EnergyManagementSourceCodes | ccb6f38c155e955624330a0f20b9ed2f4941b08a | 1ea824941fe87528622ec7aa8148024752a3947c | refs/heads/master | 2023-08-01T23:57:45.271895 | 2021-09-26T04:55:05 | 2021-09-26T04:55:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,158 | py | """
Optimal power flow models for hybrid AC/DC microgrids
@author: Tianyang Zhao
@email: [email protected]
Something should be noted for the hypothesis.
1) The energy losses on the bi-directional converters is modelled simply as used in
[1]Concerted action on computer modeling and simulation
[2]Energy management and operation modelling of hybrid AC–DC microgrid
There are more complex modelling method for different types of converters, see the following references for details.
[1]Mathematical Efficiency Modeling of Static Power Converters
[2]Power Loss Modeling of Isolated AC/DC Converter
The variations on the mathematical modelling result in significant differences in terms of the mathematical property.
2) Even renewable energy sources are assigned with operational cost, e.g., linear in this case.
3) The power losses is ignored in the real-time operation.
@Reference:
[1]
"""
from pypower import runopf
from gurobipy import *
from numpy import zeros, c_, shape, ix_, ones, r_, arange, sum, diag, concatenate, power
from scipy.sparse import csr_matrix as sparse
from scipy.sparse import hstack, vstack, diags
from distribution_system_optimization.test_cases import case33
from distribution_system_optimization.data_format import case_converters
# The following cases, data formats are imported from the Pypower package.
from pypower import case6ww, case9, case30, case118, case300
from pypower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, TAP, SHIFT, BR_STATUS, RATE_A
from pypower.idx_cost import MODEL, NCOST, PW_LINEAR, COST, POLYNOMIAL
from pypower.idx_bus import BUS_TYPE, REF, VA, VM, PD, GS, VMAX, VMIN, BUS_I, QD
from pypower.idx_gen import GEN_BUS, VG, PG, QG, PMAX, PMIN, QMAX, QMIN
from pypower.ext2int import ext2int
def main(Case_AC=None, Case_DC=None, Converters=None):
"""
:param Case_AC: AC case
:param Case_DC: DC case
:param Converters: Bi-directional converters
:return: Obtained solutions for hybrid AC DC networks
"""
# 1) Problem formulation
model_AC = AC_network_formulation(Case_AC)
model_DC = DC_network_formulation(Case_DC)
# 2) Solve the initial problems
sol_AC = AC_opf_solver(model_AC)
sol_DC = DC_opf_solver(model_DC)
# 3) Connect two systems via the BIC networks
model_converters = BIC_network_formulation(model_AC, model_DC, Converters)
# 4) Solve the merged functions
# 4.1) Solve the problem
return model_converters
def DC_network_formulation(case):
"""
:param case:
:return:
"""
case = ext2int(case)
baseMVA, bus, gen, branch, gencost = case["baseMVA"], case["bus"], case["gen"], case["branch"], case["gencost"]
nb = shape(case['bus'])[0] ## number of buses
nl = shape(case['branch'])[0] ## number of branches
ng = shape(case['gen'])[0] ## number of dispatchable injections
f = branch[:, F_BUS] ## list of "from" buses
t = branch[:, T_BUS] ## list of "to" buses
i = range(nl) ## double set of row indices
# Connection matrix
Cf = sparse((ones(nl), (i, f)), (nl, nb))
Ct = sparse((ones(nl), (i, t)), (nl, nb))
Cg = sparse((ones(ng), (gen[:, GEN_BUS], range(ng))), (nb, ng))
# Modify the branch resistance
Branch_R = branch[:, BR_X]
for i in range(nl):
if Branch_R[i] <= 0:
Branch_R[i] = max(Branch_R)
Cf = Cf.T
Ct = Ct.T
# Obtain the boundary information
Slmax = branch[:, RATE_A] / baseMVA
Pij_l = -Slmax
Iij_l = zeros(nl)
Vm_l = power(bus[:, VMIN], 2)
Pg_l = gen[:, PMIN] / baseMVA
Pij_u = Slmax
Iij_u = Slmax
# Vm_u = [max(turn_to_power(bus[:, VMAX], 2))] * nb
Vm_u = power(bus[:, VMAX], 2)
Pg_u = gen[:, PMAX] / baseMVA
# Pg_l = -Pg_u
lx = concatenate([Pij_l, Iij_l, Vm_l, Pg_l])
ux = concatenate([Pij_u, Iij_u, Vm_u, Pg_u])
# KCL equation
Aeq_p = hstack([Ct - Cf, -diag(Ct * Branch_R) * Ct, zeros((nb, nb)), Cg])
beq_p = bus[:, PD] / baseMVA
# KVL equation
Aeq_KVL = hstack([-2 * diags(Branch_R), diags(power(Branch_R, 2)), Cf.T - Ct.T, zeros((nl, ng))])
beq_KVL = zeros(nl)
Aeq = vstack([Aeq_p, Aeq_KVL])
Aeq = Aeq.todense()
beq = concatenate([beq_p, beq_KVL])
neq = len(beq)
nx = 2 * nl + nb + ng
Q = zeros(nx)
c = zeros(nx)
c0 = zeros(nx)
for i in range(ng):
Q[i + 2 * nl + nb] = gencost[i, 4] * baseMVA * baseMVA
c[i + 2 * nl + nb] = gencost[i, 5] * baseMVA
c0[i + 2 * nl + nb] = gencost[i, 6]
model = {"Q": Q,
"c": c,
"c0": c0,
"Aeq": Aeq,
"beq": beq,
"lx": lx,
"ux": ux,
"nx": nx,
"nb": nb,
"nl": nl,
"ng": ng,
"f": f,
"neq": neq}
return model
def AC_network_formulation(case):
"""
:param case:
:return:
"""
case = ext2int(case)
baseMVA, bus, gen, branch, gencost = case["baseMVA"], case["bus"], case["gen"], case["branch"], case["gencost"]
nb = shape(case['bus'])[0] ## number of buses
nl = shape(case['branch'])[0] ## number of branches
ng = shape(case['gen'])[0] ## number of dispatchable injections
f = branch[:, F_BUS] ## list of "from" buses
t = branch[:, T_BUS] ## list of "to" buses
i = range(nl) ## double set of row indices
# Connection matrix
Cf = sparse((ones(nl), (i, f)), (nl, nb))
Ct = sparse((ones(nl), (i, t)), (nl, nb))
Cg = sparse((ones(ng), (gen[:, GEN_BUS], range(ng))), (nb, ng))
Branch_R = branch[:, BR_R]
Branch_X = branch[:, BR_X]
Cf = Cf.T
Ct = Ct.T
# Obtain the boundary information
Slmax = branch[:, RATE_A] / baseMVA
Pij_l = -Slmax
Qij_l = -Slmax
Iij_l = zeros(nl)
Vm_l = power(bus[:, VMIN], 2)
Pg_l = gen[:, PMIN] / baseMVA
Qg_l = gen[:, QMIN] / baseMVA
Pij_u = Slmax
Qij_u = Slmax
Iij_u = Slmax
Vm_u = 2 * power(bus[:, VMAX], 2)
Pg_u = 2 * gen[:, PMAX] / baseMVA
Qg_u = gen[:, QMAX] / baseMVA
# Problem formulation
lx = concatenate([Pij_l, Qij_l, Iij_l, Vm_l, Pg_l, Qg_l])
ux = concatenate([Pij_u, Qij_u, Iij_u, Vm_u, Pg_u, Qg_u])
# KCL equation, active power
Aeq_p = hstack([Ct - Cf, zeros((nb, nl)), -diag(Ct * Branch_R) * Ct, zeros((nb, nb)), Cg, zeros((nb, ng))])
beq_p = bus[:, PD] / baseMVA
# KCL equation, reactive power
Aeq_q = hstack([zeros((nb, nl)), Ct - Cf, -diag(Ct * Branch_X) * Ct, zeros((nb, nb)), zeros((nb, ng)), Cg])
beq_q = bus[:, QD] / baseMVA
# KVL equation
Aeq_KVL = hstack([-2 * diags(Branch_R), -2 * diags(Branch_X),
diags(power(Branch_R, 2)) + diags(power(Branch_X, 2)), Cf.T - Ct.T,
zeros((nl, 2 * ng))])
beq_KVL = zeros(nl)
Aeq = vstack([Aeq_p, Aeq_q, Aeq_KVL])
Aeq = Aeq.todense()
beq = concatenate([beq_p, beq_q, beq_KVL])
neq = len(beq)
nx = 3 * nl + nb + 2 * ng
Q = zeros(nx)
c = zeros(nx)
c0 = zeros(nx)
for i in range(ng):
Q[i + 3 * nl + nb] = gencost[i, 4] * baseMVA * baseMVA
c[i + 3 * nl + nb] = gencost[i, 5] * baseMVA
c0[i + 3 * nl + nb] = gencost[i, 6]
for i in range(nl):
c[i + 3 * nl] = Branch_R[i]
model = {"Q": Q,
"c": c,
"c0": c0,
"Aeq": Aeq,
"beq": beq,
"lx": lx,
"ux": ux,
"nx": nx,
"nb": nb,
"nl": nl,
"ng": ng,
"f": f,
"neq": neq}
return model
def AC_opf_solver(case):
"""
Optimal power flow solver for AC networks
:param model:
:return: AC OPF solution
"""
nl = case["nl"]
nb = case["nb"]
ng = case["ng"]
f = case["f"]
nx = case["nx"]
lx = case["lx"]
ux = case["ux"]
Aeq = case["Aeq"]
beq = case["beq"]
neq = len(beq)
Q = case["Q"]
c = case["c"]
c0 = case["c0"]
model = Model("OPF")
# Define the decision variables
x = {}
for i in range(nx):
x[i] = model.addVar(lb=lx[i], ub=ux[i], vtype=GRB.CONTINUOUS)
for i in range(neq):
expr = 0
for j in range(nx):
expr += x[j] * Aeq[i, j]
model.addConstr(lhs=expr, sense=GRB.EQUAL, rhs=beq[i])
for i in range(nl):
model.addConstr(x[i] * x[i] + x[i + nl] * x[i + nl] <= x[i + 2 * nl] * x[f[i] + 3 * nl])
obj = 0
for i in range(nx):
obj += Q[i] * x[i] * x[i] + c[i] * x[i] + c0[i]
model.setObjective(obj)
model.Params.OutputFlag = 0
model.Params.LogToConsole = 0
model.Params.DisplayInterval = 1
model.optimize()
xx = []
for v in model.getVars():
xx.append(v.x)
obj = obj.getValue()
Pij = xx[0:nl]
Qij = xx[nl + 0:2 * nl]
Iij = xx[2 * nl:3 * nl]
Vi = xx[3 * nl:3 * nl + nb]
Pg = xx[3 * nl + nb:3 * nl + nb + ng]
Qg = xx[3 * nl + nb + ng:3 * nl + nb + 2 * ng]
primal_residual = zeros(nl)
for i in range(nl):
primal_residual[i] = Pij[i] * Pij[i] + Qij[i] * Qij[i] - Iij[i] * Vi[int(f[i])]
sol = {"Pij": Pij,
"Qij": Qij,
"Iij": Iij,
"Vm": power(Vi, 0.5),
"Pg": Pg,
"Qg": Qg,
"obj": obj}
return sol, primal_residual
def DC_opf_solver(case):
"""
Optimal power flow solver for DC networks
:param model:
:return: DC OPF solution
"""
nl = case["nl"]
nb = case["nb"]
ng = case["ng"]
f = case["f"]
nx = case["nx"]
lx = case["lx"]
ux = case["ux"]
Aeq = case["Aeq"]
beq = case["beq"]
neq = len(beq)
Q = case["Q"]
c = case["c"]
c0 = case["c0"]
model = Model("OPF_DC")
# Define the decision variables
x = {}
for i in range(nx):
x[i] = model.addVar(lb=lx[i], ub=ux[i], vtype=GRB.CONTINUOUS)
for i in range(neq):
expr = 0
for j in range(nx):
expr += x[j] * Aeq[i, j]
model.addConstr(lhs=expr, sense=GRB.EQUAL, rhs=beq[i])
for i in range(nl):
model.addConstr(x[i] * x[i] <= x[i + nl] * x[f[i] + 2 * nl])
obj = 0
for i in range(nx):
obj += Q[i] * x[i] * x[i] + c[i] * x[i] + c0[i]
model.setObjective(obj)
model.Params.OutputFlag = 0
model.Params.LogToConsole = 0
model.Params.DisplayInterval = 1
model.optimize()
xx = []
for v in model.getVars():
xx.append(v.x)
obj = obj.getValue()
Pij = xx[0:nl]
Iij = xx[nl:2 * nl]
Vi = xx[2 * nl:2 * nl + nb]
Pg = xx[2 * nl + nb:2 * nl + nb + ng]
primal_residual = zeros(nl)
for i in range(nl):
primal_residual[i] = Pij[i] * Pij[i] - Iij[i] * Vi[int(f[i])]
sol = {"Pij": Pij,
"Iij": Iij,
"Vm": power(Vi, 0.5),
"Pg": Pg,
"obj": obj}
return sol, primal_residual
def BIC_network_formulation(case_AC, case_DC, case_BIC):
"""
Merger the AC network and DC networks
:param case_AC:
:param case_DC:
:param case_BIC:
:return:
"""
from distribution_system_optimization.data_format.case_converters import AC_ID, DC_ID, EFF_A2D, EFF_D2A, \
SMAX
nx_BIC = shape(case_BIC["con"])[0]
nx_AC = case_AC["nx"]
nx_DC = case_DC["nx"]
nx = nx_AC + nx_DC + nx_BIC * 2
lx = concatenate([case_AC["lx"], case_DC["lx"], zeros(2 * nx_BIC)])
ux = concatenate([case_AC["ux"], case_DC["ux"], case_BIC["con"][:, SMAX] / case_BIC["baseMVA"],
case_BIC["con"][:, SMAX] / case_BIC["baseMVA"]])
Q = concatenate([case_AC["Q"], case_DC["Q"], zeros(nx_BIC * 2)])
c = concatenate([case_AC["c"], case_DC["c"], zeros(nx_BIC * 2)])
c0 = concatenate([case_AC["c0"], case_DC["c0"], zeros(nx_BIC * 2)])
# Update the equality constraints
neq = case_AC["neq"] + case_DC["neq"]
Aeq = zeros((neq, nx))
Aeq[0:case_AC["neq"], 0:case_AC["nx"]] = case_AC["Aeq"]
Aeq[case_AC["neq"]:neq, case_AC["nx"]:case_AC["nx"] + case_DC["nx"]] = case_DC["Aeq"]
# Update the KCL equations
for i in range(nx_BIC):
# Update the AC network information
Aeq[int(case_BIC["con"][i][AC_ID]), case_AC["nx"] + case_DC["nx"] + i] = -1
Aeq[int(case_BIC["con"][i][AC_ID]), case_AC["nx"] + case_DC["nx"] + nx_BIC + i] = case_BIC["con"][
i, EFF_D2A]
# Update the DC network information
Aeq[case_AC["nx"] + int(case_BIC["con"][i][DC_ID]), case_AC["nx"] + case_DC["nx"] + nx_BIC + i] = -1
Aeq[case_AC["nx"] + int(case_BIC["con"][i][DC_ID]), case_AC["nx"] + case_DC["nx"] + i] = \
case_BIC["con"][i, EFF_A2D]
beq = concatenate([case_AC["beq"], case_DC["beq"]])
model = Model("OPF_AC_DC")
# Define the decision variables
x = {}
for i in range(nx):
x[i] = model.addVar(lb=lx[i], ub=ux[i], vtype=GRB.CONTINUOUS)
for i in range(neq):
expr = 0
for j in range(nx):
expr += x[j] * Aeq[i, j]
model.addConstr(lhs=expr, sense=GRB.EQUAL, rhs=beq[i])
for i in range(case_AC["nl"]):
model.addConstr(x[i] * x[i] + x[i + case_AC["nl"]] * x[i + case_AC["nl"]] <= x[i + 2 * case_AC["nl"]] * x[
case_AC["f"][i] + 3 * case_AC["nl"]])
for i in range(case_DC["nl"]):
model.addConstr(
x[case_AC["nx"] + i] * x[case_AC["nx"] + i] <= x[case_AC["nx"] + i + case_DC["nl"]] * x[
case_AC["nx"] + case_DC["f"][i] + 2 * case_DC["nl"]])
obj = 0
for i in range(nx):
obj += Q[i] * x[i] * x[i] + c[i] * x[i] + c0[i]
model.setObjective(obj)
model.Params.OutputFlag = 0
model.Params.LogToConsole = 0
model.Params.DisplayInterval = 1
model.optimize()
xx = []
for v in model.getVars():
xx.append(v.x)
obj = obj.getValue()
Pij_AC = xx[0:case_AC["nl"]]
Qij_AC = xx[case_AC["nl"]:2 * case_AC["nl"]]
Iij_AC = xx[2 * case_AC["nl"]:3 * case_AC["nl"]]
Vi_AC = xx[3 * case_AC["nl"]:3 * case_AC["nl"] + case_AC["nb"]]
Pg_AC = xx[3 * case_AC["nl"] + case_AC["nb"]:3 * case_AC["nl"] + case_AC["nb"] + case_AC["ng"]]
Qg_AC = xx[3 * case_AC["nl"] + case_AC["nb"] + case_AC["ng"]:3 * case_AC["nl"] + case_AC["nb"] + 2 * case_AC["ng"]]
primal_residual_AC = zeros(case_AC["nl"])
for i in range(case_AC["nl"]):
primal_residual_AC[i] = Pij_AC[i] * Pij_AC[i] + Qij_AC[i] * Qij_AC[i] - Iij_AC[i] * Vi_AC[int(case_AC["f"][i])]
Pij_DC = xx[case_AC["nx"]:case_AC["nx"] + case_DC["nl"]]
Iij_DC = xx[case_AC["nx"] + case_DC["nl"]:case_AC["nx"] + 2 * case_DC["nl"]]
Vi_DC = xx[case_AC["nx"] + 2 * case_DC["nl"]:case_AC["nx"] + 2 * case_DC["nl"] + case_DC["nb"]]
Pg_DC = xx[case_AC["nx"] + 2 * case_DC["nl"] + case_DC["nb"]:case_AC["nx"] + 2 * case_DC["nl"] + case_DC["nb"] +
case_DC["ng"]]
primal_residual_DC = zeros(case_DC["nl"])
for i in range(case_DC["nl"]):
primal_residual_DC[i] = Pij_DC[i] * Pij_DC[i] - Iij_DC[i] * Vi_DC[int(case_DC["f"][i])]
primal_residual_BIC = zeros(nx_BIC)
for i in range(nx_BIC):
primal_residual_BIC[i] = xx[case_AC["nx"] + 2 * case_DC["nl"] + case_DC["nb"] +
case_DC["ng"] + i] * xx[case_AC["nx"] + 2 * case_DC["nl"] + case_DC["nb"] +
case_DC["ng"] + i + nx_BIC]
sol = {"Pij_AC": Pij_AC,
"Qij_AC": Qij_AC,
"Iij_AC": Iij_AC,
"Vm_AC": power(Vi_AC, 0.5),
"Pg_AC": Pg_AC,
"Qg_AC": Qg_AC,
"Pij_DC": Pij_DC,
"Iij_DC": Iij_DC,
"Vm_DC": power(Vi_DC, 0.5),
"Pg_DC": Pg_DC,
"residual_AC": primal_residual_AC,
"residual_DC": primal_residual_DC,
"residual_BIC": primal_residual_BIC,
"obj": obj}
return sol
if __name__ == '__main__':
# A test hybrid AC DC network is connected via BIC networks
caseAC = case33.case33()
caseDC = case118.case118()
converters = case_converters.con()
sol = main(Case_AC=caseAC, Case_DC=caseDC, Converters=converters)
| [
"[email protected]"
]
| |
152e6de373d3950907e1041d754d5e444fc78569 | c71e5115b895065d2abe4120799ffc28fa729086 | /procon-archive/atcoder.jp/abc129/abc129_c/Main.py | 7e58a42e6fbe088cfc45aa4987d551c677b95895 | []
| no_license | ken0105/competitive-programming | eb82f92a7b7ad0db601ea341c1441de6c6165064 | f918f85a0ea6dfbe9cac3ef835f80503bb16a75d | refs/heads/master | 2023-06-05T09:55:25.264731 | 2021-06-29T14:38:20 | 2021-06-29T14:38:20 | 328,328,825 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | from bisect import bisect, bisect_right, bisect_left
if __name__ == "__main__":
n,m = map(int,input().split())
a = set()
for i in range(m):
a.add(int(input()))
dp = [0] * (n + 1)
dp[0] = 1
for i in range(1,n+1):
if i not in a and i >= 2:
dp[i] = (dp[i-1] + dp[i-2])
elif i not in a and i == 1:
dp[i] = dp[i-1]
print(dp[n] % 1000000007) | [
"[email protected]"
]
| |
8fb3f79b350977c88931c3266b2db486922dcec9 | ffad717edc7ab2c25d5397d46e3fcd3975ec845f | /Python/pyesri/ANSWERS/countwords.py | 3cb94d4482bdf35763fd40b40028fc5136cad2d1 | []
| no_license | shaunakv1/esri-developer-conference-2015-training | 2f74caea97aa6333aa38fb29183e12a802bd8f90 | 68b0a19aac0f9755202ef4354ad629ebd8fde6ba | refs/heads/master | 2021-01-01T20:35:48.543254 | 2015-03-09T22:13:14 | 2015-03-09T22:13:14 | 31,855,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | #!/usr/bin/python
import sys
if len(sys.argv) < 3:
print "Syntax: countwords.py PATTERN FILE ..."
sys.exit()
pattern = sys.argv[1]
for fname in sys.argv[2:]:
count = 0
with open(fname) as f:
for line in f:
if pattern in line:
count += 1
print '''"{0}" occurred on {1} lines in {2}'''.format(pattern,count,fname)
| [
"[email protected]"
]
| |
2c14b342ece31335f536bac793332b879a2c8b94 | 7f54637e347e5773dfbfded7b46b58b50544cfe5 | /8-1/chainxy/settings.py | 0f222740778cd9f63c7bbb6304924cd66e17b44f | []
| no_license | simba999/all-scrapy | 5cc26fd92b1d03366b74d4fff58c4a0641c85609 | d48aeb3c00fa2474153fbc8d131cf58402976e1d | refs/heads/master | 2021-01-25T14:24:04.715550 | 2018-03-03T13:43:13 | 2018-03-03T13:43:13 | 123,695,640 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,587 | py | # -*- coding: utf-8 -*-
# Scrapy settings for chainxy project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'chainxy'
SPIDER_MODULES = ['chainxy.spiders']
NEWSPIDER_MODULE = 'chainxy.spiders'
# Feed export
FEED_FORMAT = 'csv' # exports to csv
FEED_EXPORT_FIELDS = ['store_number', 'address'] # which fields should be exported
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'chainxy (+http://www.yourdomain.com)'
USER_AGENT = "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36"
DOWNLOADER_MIDDLEWARES = {'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,}
# Obey robots.txt rules
# ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 1
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'chainxy.middlewares.ChainxySpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'chainxy.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'chainxy.pipelines.ChainxyPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"[email protected]"
]
| |
58e695680127bb42f2f78903fc84e26e9f79b012 | 7822e658e88f3f948732e6e3e588ca4b2eb5662a | /guias/2012-2/octubre-17/torneos.py | 3cc64246b4e91ed046f843aea8d045bff0ea5db2 | []
| no_license | carlos2020Lp/progra-utfsm | 632b910e96c17b9f9bb3d28329e70de8aff64570 | a0231d62837c54d4eb8bbf00bb1b84484efc1af2 | refs/heads/master | 2021-05-28T06:00:35.711630 | 2015-02-05T02:19:18 | 2015-02-05T02:19:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,803 | py | def contar_partidos(partidos):
return len(partidos)
def obtener_equipos(partidos):
equipos = set()
for local, visita in partidos:
equipos.add(local)
equipos.add(visita)
equipos = list(equipos)
equipos.sort()
return equipos
def obtener_fechas(partidos):
fechas = set()
for p in partidos:
fecha, _ = partidos[p]
fechas.add(fecha)
fechas = list(fechas)
fechas.sort()
return fechas
def calcular_puntos(partidos, equipo):
puntos = 0
for p in partidos:
_, resultado = partidos[p]
if resultado == None:
continue
local, visita = p
gl, gv = resultado
if equipo == local:
if gl > gv:
puntos += 3
elif gl == gv:
puntos += 1
elif equipo == visita:
if gl < gv:
puntos += 3
elif gl == gv:
puntos += 1
return puntos
def calcular_diferencia(partidos, equipo):
diferencia = 0
for p in partidos:
_, resultado = partidos[p]
if resultado == None:
continue
gl, gv = resultado
local, visita = p
if equipo == local:
diferencia += (gl - gv)
elif equipo == visita:
diferencia += (gv - gl)
return diferencia
def ordenar_equipos(partidos):
equipos = obtener_equipos(partidos)
estadisticas = []
for equipo in equipos:
pts = calcular_puntos(partidos, equipo)
dif = calcular_diferencia(partidos, equipo)
estadisticas.append((pts, dif, equipo))
estadisticas.sort()
estadisticas.reverse()
equipos_ordenados = []
for _, _, equipo in estadisticas:
equipos_ordenados.append(equipo)
return equipos_ordenados
| [
"[email protected]"
]
| |
958bffbcef5c0c35574ec6229d4eb3360c9cde5e | 9d9fcf401bb47ccaaa6c3fd3fe7a8be255762855 | /libs/numpy/sort/argsort.py | 2725c26fb628d43f78413d5fa7ac417f25fcd07d | []
| no_license | hanhiver/PythonBasic | f05ef9fe713f69610860c63e5223317decee09ad | 8e012855cce61fb53437758021416e5f6deb02ea | refs/heads/master | 2022-10-11T22:57:47.931313 | 2020-12-30T12:32:44 | 2020-12-30T12:32:44 | 148,477,052 | 0 | 3 | null | 2022-10-01T05:35:03 | 2018-09-12T12:29:33 | Python | UTF-8 | Python | false | false | 187 | py | import numpy as np
a = np.random.randint(0, 10, (4, 5))
print(a, '\n')
index = np.argsort(a, axis=0)
print(index, '\n')
index_3 = index[..., 3]
print(index_3, '\n')
print(a[index_3]) | [
"[email protected]"
]
| |
385dc29e8a96a82daa9709d0c22d2c368662202c | be0d83dde6b499b60f36c14c961a125581f36a57 | /preprocess_files/mv_img.py | 592114d7036909b48ede59be9b6dcca5df06b54f | []
| no_license | zhengziqiang/gan_learning | 4acaf18f452fed0e2eeb0ddb45d861e9d10af835 | d9ffb1c18e592715b62df684e23a362f8d07ac41 | refs/heads/master | 2021-01-01T13:35:28.696378 | 2017-10-29T13:42:51 | 2017-10-29T13:42:51 | 97,583,619 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | import os
import glob
# d={}
# for files in glob.glob('/home/zzq/research/windows_file/IIIT-CFW1.0/tmp/*.jpg'):
# filepath, filename = os.path.split(files)
# # print filename
# l=filename.split('.')
# # print l[0]
# my_namee=filter(str.isalpha, l[0])
# print my_namee
# if d.has_key(my_namee):
# d[my_namee]+=1
# else:
# d[my_namee]=1
# print d
dest='/home/zzq/research/windows_file/IIIT-CFW1.0/dest/'
name={}
for files in glob.glob('/home/zzq/research/windows_file/IIIT-CFW1.0/realFaces/*.jpg'):
filepath, filename = os.path.split(files)
l=filename.split('.')
my_name=filter(str.isalpha,l[0])
if name.has_key(my_name):
name[my_name]+=1
else:
name[my_name]=1
| [
"[email protected]"
]
| |
9813d2f1469dc08e215edac52165f3615023264d | 3b2940c38412e5216527e35093396470060cca2f | /top/api/rest/AlibabaOpendspAdgroupsAddRequest.py | ecc347df1177f0300f8f99e6b18777f4d00cdb29 | []
| no_license | akingthink/goods | 842eb09daddc2611868b01ebd6e330e5dd7d50be | ffdb5868a8df5c2935fc6142edcdf4c661c84dca | refs/heads/master | 2021-01-10T14:22:54.061570 | 2016-03-04T09:48:24 | 2016-03-04T09:48:24 | 45,093,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | '''
Created by auto_sdk on 2015-01-20 12:44:32
'''
from top.api.base import RestApi
class AlibabaOpendspAdgroupsAddRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
def getapiname(self):
return 'alibaba.opendsp.adgroups.add'
| [
"[email protected]"
]
| |
cf634701ce51fc3cb9c14499ec878f065f7baad4 | 427cb811a465677542172b59f5e5f102e3cafb1a | /python/classes/subClass.py | 2244d735db508c992121644c9b9e179b8a63ef61 | []
| no_license | IzaakWN/CodeSnippets | 1ecc8cc97f18f77a2fbe980f322242c04dacfb89 | 07ad94d9126ea72c1a8ee5b7b2af176c064c8854 | refs/heads/master | 2023-07-26T21:57:10.660979 | 2023-07-20T20:35:59 | 2023-07-20T20:35:59 | 116,404,943 | 18 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,236 | py | # http://www.jesshamrick.com/2011/05/18/an-introduction-to-classes-and-inheritance-in-python/
# https://stackoverflow.com/questions/2843165/python-how-to-inherite-and-override
# http://blog.thedigitalcatonline.com/blog/2014/05/19/method-overriding-in-python/
# https://docs.python.org/2.7/library/functions.html#super
class Animal(object):
def __init__(self,name,age):
self.name = name
self.age = age
def makeNoise(self):
print ">>> %s makes a noise"%(self.name)
def printName(self):
print ">>> Animal name = \"%s\""%(self.name)
def printClassification(self):
print ">>> Animal"
class Dog(Animal):
def __init__(self,name,age):
Animal.__init__(self,name,age)
# or super(Dog,self).__init__(name,age)]
def makeNoise(self):
print ">>> %s says \"%s\""%(self.name,"Woof!")
def printName(self):
print ">>> Dog name = \"%s\""%(self.name)
def printClassification(self):
super(Dog,self).printClassification()
print ">>> Dog"
animal1 = Animal("Carrol",2)
animal2 = Dog("Yeller",4)
print "\n>>> animal 1"
animal1.makeNoise()
animal1.printName()
print ">>>\n>>> animal 2"
animal2.makeNoise()
animal2.printName()
animal2.printClassification()
print
| [
"[email protected]"
]
| |
b5f7b40cdab61e773d1bec1c144966fc8c019ad5 | b9878c92b857f73ff0452fc51c822cfc9fa4dc1c | /watson_machine_learning_client/libs/repo/swagger_client/models/connection_object_target_experiments.py | f8548c105d870dc07cfbde41d0896b443cf3f175 | []
| no_license | DavidCastilloAlvarado/WMLC_mod | 35f5d84990c59b623bfdd27369fe7461c500e0a5 | f2673b9c77bd93c0e017831ee4994f6d9789d9a1 | refs/heads/master | 2022-12-08T02:54:31.000267 | 2020-09-02T15:49:21 | 2020-09-02T15:49:21 | 292,322,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,806 | py | # coding: utf-8
"""
No descripton provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class ConnectionObjectTargetExperiments(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, type=None, connection=None, target=None):
"""
ConnectionObjectTargetExperiments - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'type': 'str',
'connection': 'dict(str, str)',
'target': 'object'
}
self.attribute_map = {
'type': 'type',
'connection': 'connection',
'target': 'target'
}
self._type = type
self._connection = connection
self._target = target
@property
def type(self):
"""
Gets the type of this ConnectionObjectTargetExperiments.
:return: The type of this ConnectionObjectTargetExperiments.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this ConnectionObjectTargetExperiments.
:param type: The type of this ConnectionObjectTargetExperiments.
:type: str
"""
self._type = type
@property
def connection(self):
"""
Gets the connection of this ConnectionObjectTargetExperiments.
:return: The connection of this ConnectionObjectTargetExperiments.
:rtype: dict(str, str)
"""
return self._connection
@connection.setter
def connection(self, connection):
"""
Sets the connection of this ConnectionObjectTargetExperiments.
:param connection: The connection of this ConnectionObjectTargetExperiments.
:type: dict(str, str)
"""
self._connection = connection
@property
def target(self):
"""
Gets the target of this ConnectionObjectTargetExperiments.
:return: The target of this ConnectionObjectTargetExperiments.
:rtype: object
"""
return self._target
@target.setter
def target(self, target):
"""
Sets the target of this ConnectionObjectTargetExperiments.
:param target: The target of this ConnectionObjectTargetExperiments.
:type: object
"""
self._target = target
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
]
| |
68740806ca9fdcb8c924b5a4b88a4c98f0efd8d7 | 3b831eedb7afede666088b6e018c829219938a93 | /Grouping_Values.py | d73419177b17ac18330e2f7223561e75e54c044e | []
| no_license | joydas65/GeeksforGeeks | f03ed1aaea88d894f4d8ac0d70f574c4cd78a64b | e58c42cb3c9fe3a87e6683d8e3fda442dc83b45b | refs/heads/master | 2023-01-12T02:19:54.967779 | 2023-01-10T17:28:41 | 2023-01-10T17:28:41 | 161,937,667 | 9 | 2 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | class Solution:
def isPossible(self, N, arr, K):
# code here
d = dict()
for i in arr:
if i in d:
d[i] += 1
else:
d[i] = 1
for i in d:
if d[i] > K*2:
return 0
return 1
| [
"[email protected]"
]
| |
773663c4df0ccd2fbf185f8bbedf2977848846c9 | 3c8b1a4d9e7d53fd643e02dabae50298a8122763 | /tests/__init__.py | 6ae0126758f6be63f55461a5786077a39670ba77 | [
"MIT"
]
| permissive | greyli/fanxiangce | f0866ed5dfd32a2cd795db92dec9e8785833d480 | c6eb8410867c7a743d1ede920b0858158fec961c | refs/heads/master | 2021-09-18T10:35:02.317823 | 2018-07-13T02:32:25 | 2018-07-13T02:32:25 | 67,604,143 | 79 | 40 | null | null | null | null | UTF-8 | Python | false | false | 21 | py | # -*-coding: utf-8-*- | [
"[email protected]"
]
| |
752c131107a11c4cca9973aa5a08f2fc22b37083 | 25a565679443dc00be245c00cd68dde43601df50 | /workrobot/libs/region/class_regionmatcher.py | 4dff7eae558942da43aac12cab870177c1aa13fc | []
| no_license | plutoese/workrobot | 2d4e929a05be5aea1d6679769ac8c30aa42a1595 | 097571be9d61a120dd676464941cb9d0618963f6 | refs/heads/master | 2020-04-12T06:45:18.737553 | 2017-04-18T17:40:24 | 2017-04-18T17:40:24 | 63,757,855 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,751 | py | # coding=UTF-8
"""
=========================================
区域匹配类
=========================================
:Author: glen
:Date: 2016.10.26
:Tags: region
:abstract: 对区域进行匹配
**类**
==================
RegionMatcher
区域匹配类
**使用方法**
==================
**示范代码**
==================
"""
import re
from libs.imexport.class_mongodb import MongoDB,MonDatabase,MonCollection
from libs.imexport.class_Excel import Excel
import regex
import pandas as pd
class RegionMatcher:
def __init__(self, region_query=None):
# 设置查询结果
if region_query is None:
mongo = MongoDB()
mdb = MonDatabase(mongodb=mongo, database_name='region')
collection = MonCollection(database=mdb, collection_name='admincode')
self.collection = collection.collection
else:
self.collection = region_query
self.collection = None
def match(self,regions=None,year=None):
pass
class RegionMatchingAlgorithm:
def __init__(self,to_be_matched=None):
self._to_be_matched = to_be_matched
class RegionMatchingOrderAlgorithm(RegionMatchingAlgorithm):
""" 顺序匹配算法类
:param pandas.DataFrame to_be_matched: 待匹配的区域数据框
:param pandas.DataFrame to_be_compared: 标准的区域数据框
:return: 无返回值
"""
def __init__(self,to_be_matched=None,to_be_compared=None):
RegionMatchingAlgorithm.__init__(self,to_be_matched=to_be_matched)
self._to_be_compared = to_be_compared
# 结果保存在_result变量中
self._result = None
def correct(self,correction='auto'):
if isinstance(collection,dict):
pass
else:
if re.match('^auto$',correction):
correction = self.auto_correction()
else:
correction = pd.read_excel(correction)
is_index_rid = False
if correction.index.name == 'rid':
is_index_rid = True
if 'rid' in correction.columns:
correction = correction.set_index('rid')
is_index_rid = True
if is_index_rid:
for ind in correction.index:
self._result.loc[ind,'region'] = correction.loc[ind,'matched']
else:
correction_dict = dict([(correction.loc[ind,'region'],correction.loc[ind,'matched']) for ind in correction.index])
for ind in RegionMatchingOrderAlgorithm.not_matched(self._result).index:
if self._result.loc[ind,'region'] in correction_dict:
self._result.loc[ind,'region'] = correction_dict[self._result.loc[ind,'region']]
@property
def simu_auto_corrected_region_list(self):
correction = self.auto_correction()
if correction.size > 0:
corr_result = pd.merge(self._result,self._to_be_compared[['cid','region']],how='left',on='cid')
corr_result = corr_result.rename(columns={'region_x':'region','region_y':'compared'})
corr_result['supplement'] = None
for ind in correction.index:
corr_result.loc[ind,'compared'] = correction.loc[ind,'matched']
corr_result.loc[ind,'acode'] = correction.loc[ind,'acode']
corr_result.loc[ind,'cid'] = correction.loc[ind,'cid']
corr_result.loc[ind,'supplement'] = self.output_of_region_set_mapping.loc[ind,'matching_regions']
del corr_result['_id']
return corr_result
@property
def simu_auto_corrected_region_list_short_version(self):
select_index = set()
for num in sorted(self.region_set_dict):
select_index.update([max(0,num-1),num,min(algo._result.shape[0]-1,num+1)])
result = self.simu_auto_corrected_region_list.loc[sorted(list(select_index)),]
return result
def find_anchor(self,type='merge'):
""" 寻找锚,即发现区域列表中确定匹配的区域(点)
:param str type: 定锚算法类型:merge(用pandas.DataFrame的merge定锚)
:return: 修改_result,无返回值
"""
if re.match('^merge$',type) is not None:
self._result = self._merge_matching()
@property
def region_set_mapping(self):
""" 定锚之后,生成缺失区域的参考区域选择映射
:return: 返回映射
"""
not_matched = RegionMatchingOrderAlgorithm.not_matched(self._result)
all_matched = RegionMatchingOrderAlgorithm.all_matched(self._result)
refer_regions_map = []
for i in not_matched.index:
region = not_matched.loc[i]['region']
# 锚定上下文位置
for m in range(i,-1,-1):
if m in all_matched.index:
search_start = int(all_matched.loc[m]['cid'])
break
for m in range(i,self._result.shape[0]):
if m in all_matched.index:
search_end = int(all_matched.loc[m]['cid'])
break
# 可选择的区域
refer_regions = [self._to_be_compared.loc[n] for n in range(search_start+1,search_end)]
# 构建映射:列表——每个元素为(区域名称,位置,可选择的匹配区域)
refer_regions_map.append((region,i,refer_regions))
return refer_regions_map
@property
def output_of_region_set_mapping(self):
""" 返回区域选择映射
:return: 返回区域选择映射
"""
result = []
for record in self.region_set_mapping:
result.append([record[0],record[1],','.join([item['region'] for item in record[2]])])
result = pd.DataFrame(result,columns=['region','rid','matching_regions'])
result = result.set_index('rid')
return result
def auto_correction(self,error='auto'):
""" 返回自动纠错匹配结果
:param error: 允许错误数量
:return: 返回自动纠错匹配结果
"""
correction = []
map = self.region_set_mapping
for record in map:
region = record[0]
index = record[1]
refer_regions = record[2]
for n in range(len(refer_regions)):
if self.fuzzy_region_matching(region,refer_regions[n]['region'],error):
correction.append([region,index,refer_regions[n]['region'],refer_regions[n]['acode'],refer_regions[n]['cid']])
correction = pd.DataFrame(correction,columns=['region','rid','matched','acode','cid'])
correction = correction.set_index('rid')
return correction
def exactly_matching_from_region_set(self):
""" 从备选区域集中选择区域,用精确匹配
:return: 无返回值
"""
map = self.region_set_mapping
for record in map:
region = record[0]
index = record[1]
refer_regions = record[2]
for n in range(len(refer_regions)):
if re.match(region,refer_regions[n]['region']) is not None:
self._result.loc[index,'acode'] = refer_regions[n]['acode']
self._result.loc[index,'cid'] = refer_regions[n]['cid']
self._result.loc[index,'_id'] = refer_regions[n]['_id']
break
@staticmethod
def fuzzy_region_matching(region,compared,error='auto'):
if re.match('^auto$',error) is not None:
error = max(1,int(len(region)*0.4))
return regex.fullmatch('(?:%s){e<=%s}' % (region, str(error)),compared) is not None
def _merge_matching(self):
""" 定锚,通过merge进行匹配
完成时self._result对象为pandas.DataFrame
region mid acode cid
0 北京市 0 110000 0
1 市辖区 1
2 东城区 2 110101 2
3 西城区 3 110102 3
:return: 无返回值
"""
# 返回初次定锚的对象:pandas.DataFrame
merge_result = pd.merge(self._to_be_matched, self._to_be_compared, how='left', on='region')
merge_result = merge_result.drop_duplicates(subset='rid',keep=False)
#merge_result = pd.merge(self._to_be_matched,merge_result,how='left',on='rid')
#del merge_result['region_y']
return merge_result.rename(columns={'region_x':'region'})
@property
def accuracy(self):
accuracy = 100*(RegionMatchingOrderAlgorithm.all_matched(self._result).shape[0]/(self._result.shape[0]))
return accuracy
@staticmethod
def not_matched(pdata=None):
return pdata[pdata.isnull().any(axis=1)]
@staticmethod
def all_matched(pdata=None):
return pdata[pdata.notnull().all(axis=1)]
@property
def region_set_dict(self):
ref_regions_dict = dict()
for record in self.region_set_mapping:
to_be_selected = []
for item in record[2]:
to_be_selected.append(item['region'])
ref_regions_dict[record[1]] = to_be_selected
return ref_regions_dict
@property
def matched_region(self):
return self._result
if __name__ == '__main__':
pop_year = '2010'
pop_region_file_2010 = r'E:\data\popcensus\origin\var_temp.xls'
raw_region_2010 = Excel(pop_region_file_2010).read()
to_be_matched = [re.sub('\s+','',item[0]) for item in raw_region_2010 if re.match('^\s*$',item[0]) is None]
pd_to_be_matched = pd.DataFrame(to_be_matched,columns=['region'])
pd_to_be_matched['rid'] = range(pd_to_be_matched.shape[0])
collection = MonCollection(database=MonDatabase(mongodb=MongoDB(), database_name='region'), collection_name='admincode')
found = collection.collection.find(filter={'year':'2010'},
projection={'acode':True,'region':True,'_id':True},
sort=[('acode',1)])
pd_to_be_compared = pd.DataFrame(list(found))
pd_to_be_compared['cid'] = range(pd_to_be_compared.shape[0])
#pd_to_be_compared['_id'] = pd_to_be_compared['_id'].apply(str)
print(pd_to_be_matched,pd_to_be_compared)
algo = RegionMatchingOrderAlgorithm(pd_to_be_matched,pd_to_be_compared)
# 首先是寻找可靠的匹配作为锚点
algo.find_anchor()
# 其次进行顺序的严格匹配
algo.exactly_matching_from_region_set()
print(algo.matched_region)
# 打印匹配率
print('Accuracy Rate: {:.2f}%.'.format(algo.accuracy))
'''
# 纠正错误
#algo.correct(correction=r'E:\data\popcensus\origin\correction.xlsx')
algo.auto_correction().to_excel(r'E:\data\popcensus\origin\correction.xlsx')
#algo.matched_region.to_excel(r'E:\data\popcensus\origin\pdoutput_before.xlsx')
algo.correct()
# 重新进行匹配
algo.exactly_matching_from_region_set()
print('Accuracy Rate: {:.2f}%.'.format(algo.accuracy))
# 输出匹配完成的结果
algo.matched_region.to_excel(r'E:\data\popcensus\origin\pdoutput.xlsx')
algo.output_of_region_set_mapping.to_excel(r'E:\data\popcensus\origin\reference_regions.xlsx')
print(algo.auto_correction())
algo.auto_correction().to_excel(r'E:\data\popcensus\origin\correction.xlsx')
print(algo.auto_correction().size)
algo.simu_auto_corrected_region_list.to_excel(r'E:\data\popcensus\origin\sim_output.xlsx')
algo.simu_auto_corrected_region_list_short_version.to_excel(r'E:\data\popcensus\origin\sim_output_short.xlsx')
result.to_excel(r'E:\data\popcensus\origin\pdoutput.xlsx')
cor_file = r'E:\data\popcensus\origin\correction.xlsx'
pdata = pd.read_excel(cor_file)
print(pdata)
'''
| [
"[email protected]"
]
| |
bfe6eb6e9734dbfe24074e1964400cdb06a23cc3 | fce1b262820539e8574e5476692096f599ca2b27 | /luffycity_s8/luffy/views/article.py | fecd0d8cf009734ca9798d3523d3afb6d261806e | []
| no_license | iyouyue/green_hand | 9386082a0589ee6e1805aafe189ee38e823c8202 | 7b80e8cc0622e4d8e9d07dde37c72ac7d6e3261c | refs/heads/master | 2020-03-26T14:39:02.224727 | 2018-08-16T14:27:57 | 2018-08-16T14:27:57 | 144,997,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,851 | py | from rest_framework.views import APIView
from rest_framework.viewsets import GenericViewSet
from rest_framework.response import Response
from rest_framework.renderers import JSONRenderer, BrowsableAPIRenderer
from django.core.exceptions import ObjectDoesNotExist
from luffy import models
from luffy.response.base import BaseResponse
from luffy.serializers.article import ArticleSerializer, ArticleDetailSerializer
from luffy.pagination.page import LuffyPageNumberPagination
class MyException(Exception):
def __init__(self, msg):
self.msg = msg
class ArticleView(GenericViewSet):
renderer_classes = [JSONRenderer,]
def list(self, request, *args, **kwargs):
ret = BaseResponse()
try:
# 1. 获取数据
article_list = models.Article.objects.all().only('id', 'title','brief').order_by('-id')
# 2. 对数据进行分页
page = LuffyPageNumberPagination()
page_article_list = page.paginate_queryset(article_list, request, self)
# 3. 对数据序列化
ser = ArticleSerializer(instance=page_article_list, many=True)
ret.data = ser.data
except Exception as e:
ret.code = 1001
ret.error = '获取数据失败'
return Response(ret.dict)
def retrieve(self, request, pk, *args, **kwargs):
ret = BaseResponse()
try:
obj = models.Article.objects.get(id=pk)
ser = ArticleDetailSerializer(instance=obj, many=False)
ret.data = ser.data
except ObjectDoesNotExist as e:
ret.code = 1001
ret.error = '查询数据不存在'
except Exception as e:
ret.code = 1002
ret.error = "查询失败"
return Response(ret.dict)
| [
"[email protected]"
]
| |
16632e1cfd929360e81b6b66540741a40107d618 | 113d9082d153adbccd637da76318b984f249baf5 | /setup.py | b2cce85ef433c74f9b005df1a6e7c62d9261ca91 | [
"BSD-3-Clause"
]
| permissive | jorcast18462/django-applepodcast | bebb6f85d4c3ed98c96e6628443ece613898ca32 | 50732acfbe1ca258e5afb44c117a6ac5fa0c1219 | refs/heads/master | 2023-03-21T13:05:08.576831 | 2018-10-06T22:19:12 | 2018-10-06T22:19:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,675 | py | from __future__ import unicode_literals
import os
from setuptools import find_packages, setup
setup(
name='django-applepodcast',
version='0.3.7',
description='A Django podcast app optimized for Apple Podcasts',
long_description=open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'README.rst')).read(),
author='Richard Cornish',
author_email='[email protected]',
url='https://github.com/richardcornish/django-applepodcast',
license='BSD',
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=('tests',)),
install_requires=[
'bleach',
'mutagen',
'pillow',
],
test_suite='podcast.tests',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| [
"[email protected]"
]
| |
a9340662bebfa1cdd1adef79408712eb2e5883fd | 7188e4eca6bb6ba03453e5c1d9e3134e9ef1b588 | /apps/clndr/apps.py | 29d6fb8f53be320b7e1c8a59f9267f426baf18ea | []
| no_license | mitshel/ghc_yapokaju | c85eb2c3cbfd9802f6fac16a6d6192ae85ad2511 | d70b53235223dc935792aac3838678cb1b4d2b2e | refs/heads/master | 2020-05-15T21:50:15.646729 | 2019-04-21T08:48:31 | 2019-04-21T08:48:31 | 182,509,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | from django.apps import AppConfig
class ClndrConfig(AppConfig):
name = 'apps.clndr'
verbose_name = 'The calendar'
def ready(self):
from . import signals
| [
"[email protected]"
]
| |
23d6a04e73cb64a8b99b1049956a491e698cfc84 | 86dc81e21f5b9e784dd087666d4d980c34781536 | /udp_bro_send.py | 596343dd578225cf7d1f4e55544f7bb7e2be5825 | []
| no_license | sheltie03/udp_python | 37b4e1f3377979c26e247a020efb958b3dfc28e5 | cb0551fc4026a3baff968e81b758ea4d7d7e5fd6 | refs/heads/master | 2021-07-09T15:37:46.684924 | 2017-10-02T08:06:25 | 2017-10-02T08:06:25 | 105,496,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | # -*- coding: utf-8 -*-
import socket
import time
def main():
host = ''
port = 4000
# local_addr = '192.168.10.255'
local_addr = '255.255.255.255'
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.bind((host, port))
while True:
msg = 'Hello Server'.encode('utf-8')
print(msg)
sock.sendto(msg, (local_addr, port))
return
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
a0f8f7b8b1b8309bccf987e46c698b39e152970c | 9c56151ff0c981f4d24aaaefd8896893225be8c2 | /fotochest/apps/administrator/__init__.py | 01dce1499601edfe311a5b67dd72fabd730a6561 | [
"MIT"
]
| permissive | ginking/fotochest | 9da4c34abb7df758e29f5f3284c93e3cd6933bcc | 0f9e6e72c7b587dec91cd5a0c3b081e28d056c62 | refs/heads/master | 2021-01-18T02:45:14.377309 | 2015-04-16T02:58:47 | 2015-04-16T02:58:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py | default_app_config = 'fotochest.apps.administrator.apps.AdminConfig' | [
"[email protected]"
]
| |
a8664286f8358d03bcf8e11702b53d8ee5865ef0 | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/matplotlib/2018/8/axes3d.py | efbaedfef284ed36e1b8f3d87fbaaf0aa8ba1c71 | []
| no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 103,008 | py | """
axes3d.py, original mplot3d version by John Porter
Created: 23 Sep 2005
Parts fixed by Reinier Heeres <[email protected]>
Minor additions by Ben Axelrod <[email protected]>
Significant updates and revisions by Ben Root <[email protected]>
Module containing Axes3D, an object which can plot 3D objects on a
2D matplotlib figure.
"""
from functools import reduce
from collections import defaultdict
import math
import warnings
import numpy as np
from matplotlib import artist
import matplotlib.axes as maxes
import matplotlib.cbook as cbook
import matplotlib.collections as mcoll
import matplotlib.colors as mcolors
import matplotlib.docstring as docstring
import matplotlib.scale as mscale
import matplotlib.transforms as mtransforms
from matplotlib.axes import Axes, rcParams
from matplotlib.colors import Normalize, LightSource
from matplotlib.transforms import Bbox
from matplotlib.tri.triangulation import Triangulation
from . import art3d
from . import proj3d
from . import axis3d
def unit_bbox():
box = Bbox(np.array([[0, 0], [1, 1]]))
return box
class Axes3D(Axes):
"""
3D axes object.
"""
name = '3d'
_shared_z_axes = cbook.Grouper()
def __init__(
self, fig, rect=None, *args,
azim=-60, elev=30, zscale=None, sharez=None, proj_type='persp',
**kwargs):
'''
Build an :class:`Axes3D` instance in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* in
:class:`~matplotlib.figure.Figure` coordinates
Optional keyword arguments:
================ =========================================
Keyword Description
================ =========================================
*azim* Azimuthal viewing angle (default -60)
*elev* Elevation viewing angle (default 30)
*zscale* [%(scale)s]
*sharez* Other axes to share z-limits with
*proj_type* 'persp' or 'ortho' (default 'persp')
================ =========================================
.. versionadded :: 1.2.1
*sharez*
''' % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])}
if rect is None:
rect = [0.0, 0.0, 1.0, 1.0]
self._cids = []
self.initial_azim = azim
self.initial_elev = elev
self.set_proj_type(proj_type)
self.xy_viewLim = unit_bbox()
self.zz_viewLim = unit_bbox()
self.xy_dataLim = unit_bbox()
self.zz_dataLim = unit_bbox()
# inihibit autoscale_view until the axes are defined
# they can't be defined until Axes.__init__ has been called
self.view_init(self.initial_elev, self.initial_azim)
self._ready = 0
self._sharez = sharez
if sharez is not None:
self._shared_z_axes.join(self, sharez)
self._adjustable = 'datalim'
super().__init__(fig, rect, frameon=True, *args, **kwargs)
# Disable drawing of axes by base class
super().set_axis_off()
# Enable drawing of axes by Axes3D class
self.set_axis_on()
self.M = None
# func used to format z -- fall back on major formatters
self.fmt_zdata = None
if zscale is not None:
self.set_zscale(zscale)
if self.zaxis is not None:
self._zcid = self.zaxis.callbacks.connect(
'units finalize', lambda: self._on_units_changed(scalez=True))
else:
self._zcid = None
self._ready = 1
self.mouse_init()
self.set_top_view()
self.patch.set_linewidth(0)
# Calculate the pseudo-data width and height
pseudo_bbox = self.transLimits.inverted().transform([(0, 0), (1, 1)])
self._pseudo_w, self._pseudo_h = pseudo_bbox[1] - pseudo_bbox[0]
self.figure.add_axes(self)
def set_axis_off(self):
self._axis3don = False
self.stale = True
def set_axis_on(self):
self._axis3don = True
self.stale = True
def have_units(self):
"""
Return *True* if units are set on the *x*, *y*, or *z* axes
"""
return (self.xaxis.have_units() or self.yaxis.have_units() or
self.zaxis.have_units())
def convert_zunits(self, z):
"""
For artists in an axes, if the zaxis has units support,
convert *z* using zaxis unit type
.. versionadded :: 1.2.1
"""
return self.zaxis.convert_units(z)
def _process_unit_info(self, xdata=None, ydata=None, zdata=None,
kwargs=None):
"""
Look for unit *kwargs* and update the axis instances as necessary
"""
super()._process_unit_info(xdata=xdata, ydata=ydata, kwargs=kwargs)
if self.xaxis is None or self.yaxis is None or self.zaxis is None:
return
if zdata is not None:
# we only need to update if there is nothing set yet.
if not self.zaxis.have_units():
self.zaxis.update_units(xdata)
# process kwargs 2nd since these will override default units
if kwargs is not None:
zunits = kwargs.pop('zunits', self.zaxis.units)
if zunits != self.zaxis.units:
self.zaxis.set_units(zunits)
# If the units being set imply a different converter,
# we need to update.
if zdata is not None:
self.zaxis.update_units(zdata)
def set_top_view(self):
# this happens to be the right view for the viewing coordinates
# moved up and to the left slightly to fit labels and axes
xdwl = (0.95/self.dist)
xdw = (0.9/self.dist)
ydwl = (0.95/self.dist)
ydw = (0.9/self.dist)
# This is purposely using the 2D Axes's set_xlim and set_ylim,
# because we are trying to place our viewing pane.
super().set_xlim(-xdwl, xdw, auto=None)
super().set_ylim(-ydwl, ydw, auto=None)
def _init_axis(self):
'''Init 3D axes; overrides creation of regular X/Y axes'''
self.w_xaxis = axis3d.XAxis('x', self.xy_viewLim.intervalx,
self.xy_dataLim.intervalx, self)
self.xaxis = self.w_xaxis
self.w_yaxis = axis3d.YAxis('y', self.xy_viewLim.intervaly,
self.xy_dataLim.intervaly, self)
self.yaxis = self.w_yaxis
self.w_zaxis = axis3d.ZAxis('z', self.zz_viewLim.intervalx,
self.zz_dataLim.intervalx, self)
self.zaxis = self.w_zaxis
for ax in self.xaxis, self.yaxis, self.zaxis:
ax.init3d()
def get_children(self):
return [self.zaxis] + super().get_children()
def _get_axis_list(self):
return super()._get_axis_list() + (self.zaxis, )
def unit_cube(self, vals=None):
minx, maxx, miny, maxy, minz, maxz = vals or self.get_w_lims()
return [(minx, miny, minz),
(maxx, miny, minz),
(maxx, maxy, minz),
(minx, maxy, minz),
(minx, miny, maxz),
(maxx, miny, maxz),
(maxx, maxy, maxz),
(minx, maxy, maxz)]
def tunit_cube(self, vals=None, M=None):
if M is None:
M = self.M
xyzs = self.unit_cube(vals)
tcube = proj3d.proj_points(xyzs, M)
return tcube
def tunit_edges(self, vals=None, M=None):
tc = self.tunit_cube(vals, M)
edges = [(tc[0], tc[1]),
(tc[1], tc[2]),
(tc[2], tc[3]),
(tc[3], tc[0]),
(tc[0], tc[4]),
(tc[1], tc[5]),
(tc[2], tc[6]),
(tc[3], tc[7]),
(tc[4], tc[5]),
(tc[5], tc[6]),
(tc[6], tc[7]),
(tc[7], tc[4])]
return edges
@artist.allow_rasterization
def draw(self, renderer):
# draw the background patch
self.patch.draw(renderer)
self._frameon = False
# first, set the aspect
# this is duplicated from `axes._base._AxesBase.draw`
# but must be called before any of the artist are drawn as
# it adjusts the view limits and the size of the bounding box
# of the axes
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
# add the projection matrix to the renderer
self.M = self.get_proj()
renderer.M = self.M
renderer.vvec = self.vvec
renderer.eye = self.eye
renderer.get_axis_position = self.get_axis_position
# Calculate projection of collections and patches and zorder them.
# Make sure they are drawn above the grids.
zorder_offset = max(axis.get_zorder()
for axis in self._get_axis_list()) + 1
for i, col in enumerate(
sorted(self.collections,
key=lambda col: col.do_3d_projection(renderer),
reverse=True)):
col.zorder = zorder_offset + i
for i, patch in enumerate(
sorted(self.patches,
key=lambda patch: patch.do_3d_projection(renderer),
reverse=True)):
patch.zorder = zorder_offset + i
if self._axis3don:
# Draw panes first
for axis in self._get_axis_list():
axis.draw_pane(renderer)
# Then axes
for axis in self._get_axis_list():
axis.draw(renderer)
# Then rest
super().draw(renderer)
def get_axis_position(self):
vals = self.get_w_lims()
tc = self.tunit_cube(vals, self.M)
xhigh = tc[1][2] > tc[2][2]
yhigh = tc[3][2] > tc[2][2]
zhigh = tc[0][2] > tc[2][2]
return xhigh, yhigh, zhigh
def _on_units_changed(self, scalex=False, scaley=False, scalez=False):
"""
Callback for processing changes to axis units.
Currently forces updates of data limits and view limits.
"""
self.relim()
self.autoscale_view(scalex=scalex, scaley=scaley, scalez=scalez)
def update_datalim(self, xys, **kwargs):
pass
def get_autoscale_on(self):
"""
Get whether autoscaling is applied for all axes on plot commands
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
return super().get_autoscale_on() and self.get_autoscalez_on()
def get_autoscalez_on(self):
"""
Get whether autoscaling for the z-axis is applied on plot commands
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
return self._autoscaleZon
def set_autoscale_on(self, b):
"""
Set whether autoscaling is applied on plot commands
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
Parameters
----------
b : bool
"""
super().set_autoscale_on(b)
self.set_autoscalez_on(b)
def set_autoscalez_on(self, b):
"""
Set whether autoscaling for the z-axis is applied on plot commands
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
Parameters
----------
b : bool
"""
self._autoscaleZon = b
def set_zmargin(self, m):
"""
Set padding of Z data limits prior to autoscaling.
*m* times the data interval will be added to each
end of that interval before it is used in autoscaling.
accepts: float in range 0 to 1
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
if m < 0 or m > 1 :
raise ValueError("margin must be in range 0 to 1")
self._zmargin = m
self.stale = True
def margins(self, *margins, x=None, y=None, z=None, tight=True):
"""
Convenience method to set or retrieve autoscaling margins.
signatures::
margins()
returns xmargin, ymargin, zmargin
::
margins(margin)
margins(xmargin, ymargin, zmargin)
margins(x=xmargin, y=ymargin, z=zmargin)
margins(..., tight=False)
All forms above set the xmargin, ymargin and zmargin
parameters. All keyword parameters are optional. A single
positional argument specifies xmargin, ymargin and zmargin.
Passing both positional and keyword arguments for xmargin,
ymargin, and/or zmargin is invalid.
The *tight* parameter
is passed to :meth:`autoscale_view`, which is executed after
a margin is changed; the default here is *True*, on the
assumption that when margins are specified, no additional
padding to match tick marks is usually desired. Setting
*tight* to *None* will preserve the previous setting.
Specifying any margin changes only the autoscaling; for example,
if *xmargin* is not None, then *xmargin* times the X data
interval will be added to each end of that interval before
it is used in autoscaling.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
if margins and x is not None and y is not None and z is not None:
raise TypeError('Cannot pass both positional and keyword '
'arguments for x, y, and/or z.')
elif len(margins) == 1:
x = y = z = margins[0]
elif len(margins) == 3:
x, y, z = margins
elif margins:
raise TypeError('Must pass a single positional argument for all '
'margins, or one for each margin (x, y, z).')
if x is None and y is None and z is None:
if tight is not True:
warnings.warn('ignoring tight=%r in get mode' % (tight,))
return self._xmargin, self._ymargin, self._zmargin
if x is not None:
self.set_xmargin(x)
if y is not None:
self.set_ymargin(y)
if z is not None:
self.set_zmargin(z)
self.autoscale_view(
tight=tight, scalex=(x is not None), scaley=(y is not None),
scalez=(z is not None)
)
def autoscale(self, enable=True, axis='both', tight=None):
"""
Convenience method for simple axis view autoscaling.
See :meth:`matplotlib.axes.Axes.autoscale` for full explanation.
Note that this function behaves the same, but for all
three axes. Therefore, 'z' can be passed for *axis*,
and 'both' applies to all three axes.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
if enable is None:
scalex = True
scaley = True
scalez = True
else:
if axis in ['x', 'both']:
self._autoscaleXon = scalex = bool(enable)
else:
scalex = False
if axis in ['y', 'both']:
self._autoscaleYon = scaley = bool(enable)
else:
scaley = False
if axis in ['z', 'both']:
self._autoscaleZon = scalez = bool(enable)
else:
scalez = False
self.autoscale_view(tight=tight, scalex=scalex, scaley=scaley,
scalez=scalez)
def auto_scale_xyz(self, X, Y, Z=None, had_data=None):
x, y, z = map(np.asarray, (X, Y, Z))
try:
x, y = x.flatten(), y.flatten()
if Z is not None:
z = z.flatten()
except AttributeError:
raise
# This updates the bounding boxes as to keep a record as
# to what the minimum sized rectangular volume holds the
# data.
self.xy_dataLim.update_from_data_xy(np.array([x, y]).T, not had_data)
if z is not None:
self.zz_dataLim.update_from_data_xy(np.array([z, z]).T, not had_data)
# Let autoscale_view figure out how to use this data.
self.autoscale_view()
def autoscale_view(self, tight=None, scalex=True, scaley=True,
scalez=True):
"""
Autoscale the view limits using the data limits.
See :meth:`matplotlib.axes.Axes.autoscale_view` for documentation.
Note that this function applies to the 3D axes, and as such
adds the *scalez* to the function arguments.
.. versionchanged :: 1.1.0
Function signature was changed to better match the 2D version.
*tight* is now explicitly a kwarg and placed first.
.. versionchanged :: 1.2.1
This is now fully functional.
"""
if not self._ready:
return
# This method looks at the rectangular volume (see above)
# of data and decides how to scale the view portal to fit it.
if tight is None:
# if image data only just use the datalim
_tight = self._tight or (
len(self.images) > 0
and len(self.lines) == len(self.patches) == 0)
else:
_tight = self._tight = bool(tight)
if scalex and self._autoscaleXon:
self._shared_x_axes.clean()
x0, x1 = self.xy_dataLim.intervalx
xlocator = self.xaxis.get_major_locator()
try:
x0, x1 = xlocator.nonsingular(x0, x1)
except AttributeError:
x0, x1 = mtransforms.nonsingular(x0, x1, increasing=False,
expander=0.05)
if self._xmargin > 0:
delta = (x1 - x0) * self._xmargin
x0 -= delta
x1 += delta
if not _tight:
x0, x1 = xlocator.view_limits(x0, x1)
self.set_xbound(x0, x1)
if scaley and self._autoscaleYon:
self._shared_y_axes.clean()
y0, y1 = self.xy_dataLim.intervaly
ylocator = self.yaxis.get_major_locator()
try:
y0, y1 = ylocator.nonsingular(y0, y1)
except AttributeError:
y0, y1 = mtransforms.nonsingular(y0, y1, increasing=False,
expander=0.05)
if self._ymargin > 0:
delta = (y1 - y0) * self._ymargin
y0 -= delta
y1 += delta
if not _tight:
y0, y1 = ylocator.view_limits(y0, y1)
self.set_ybound(y0, y1)
if scalez and self._autoscaleZon:
self._shared_z_axes.clean()
z0, z1 = self.zz_dataLim.intervalx
zlocator = self.zaxis.get_major_locator()
try:
z0, z1 = zlocator.nonsingular(z0, z1)
except AttributeError:
z0, z1 = mtransforms.nonsingular(z0, z1, increasing=False,
expander=0.05)
if self._zmargin > 0:
delta = (z1 - z0) * self._zmargin
z0 -= delta
z1 += delta
if not _tight:
z0, z1 = zlocator.view_limits(z0, z1)
self.set_zbound(z0, z1)
def get_w_lims(self):
'''Get 3D world limits.'''
minx, maxx = self.get_xlim3d()
miny, maxy = self.get_ylim3d()
minz, maxz = self.get_zlim3d()
return minx, maxx, miny, maxy, minz, maxz
def _determine_lims(self, xmin=None, xmax=None, *args, **kwargs):
if xmax is None and np.iterable(xmin):
xmin, xmax = xmin
if xmin == xmax:
xmin -= 0.05
xmax += 0.05
return (xmin, xmax)
def set_xlim3d(self, left=None, right=None, emit=True, auto=False,
*, xmin=None, xmax=None):
"""
Set 3D x limits.
See :meth:`matplotlib.axes.Axes.set_xlim` for full documentation.
"""
if right is None and np.iterable(left):
left, right = left
if xmin is not None:
cbook.warn_deprecated('3.0', name='`xmin`',
alternative='`left`', obj_type='argument')
if left is not None:
raise TypeError('Cannot pass both `xmin` and `left`')
left = xmin
if xmax is not None:
cbook.warn_deprecated('3.0', name='`xmax`',
alternative='`right`', obj_type='argument')
if right is not None:
raise TypeError('Cannot pass both `xmax` and `right`')
right = xmax
self._process_unit_info(xdata=(left, right))
left = self._validate_converted_limits(left, self.convert_xunits)
right = self._validate_converted_limits(right, self.convert_xunits)
old_left, old_right = self.get_xlim()
if left is None:
left = old_left
if right is None:
right = old_right
if left == right:
warnings.warn(('Attempting to set identical left==right results\n'
'in singular transformations; automatically expanding.\n'
'left=%s, right=%s') % (left, right))
left, right = mtransforms.nonsingular(left, right, increasing=False)
left, right = self.xaxis.limit_range_for_scale(left, right)
self.xy_viewLim.intervalx = (left, right)
if auto is not None:
self._autoscaleXon = bool(auto)
if emit:
self.callbacks.process('xlim_changed', self)
# Call all of the other x-axes that are shared with this one
for other in self._shared_x_axes.get_siblings(self):
if other is not self:
other.set_xlim(self.xy_viewLim.intervalx,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
self.stale = True
return left, right
set_xlim = set_xlim3d
def set_ylim3d(self, bottom=None, top=None, emit=True, auto=False,
*, ymin=None, ymax=None):
"""
Set 3D y limits.
See :meth:`matplotlib.axes.Axes.set_ylim` for full documentation.
"""
if top is None and np.iterable(bottom):
bottom, top = bottom
if ymin is not None:
cbook.warn_deprecated('3.0', name='`ymin`',
alternative='`bottom`', obj_type='argument')
if bottom is not None:
raise TypeError('Cannot pass both `ymin` and `bottom`')
bottom = ymin
if ymax is not None:
cbook.warn_deprecated('3.0', name='`ymax`',
alternative='`top`', obj_type='argument')
if top is not None:
raise TypeError('Cannot pass both `ymax` and `top`')
top = ymax
self._process_unit_info(ydata=(bottom, top))
bottom = self._validate_converted_limits(bottom, self.convert_yunits)
top = self._validate_converted_limits(top, self.convert_yunits)
old_bottom, old_top = self.get_ylim()
if bottom is None:
bottom = old_bottom
if top is None:
top = old_top
if top == bottom:
warnings.warn(('Attempting to set identical bottom==top results\n'
'in singular transformations; automatically expanding.\n'
'bottom=%s, top=%s') % (bottom, top))
bottom, top = mtransforms.nonsingular(bottom, top, increasing=False)
bottom, top = self.yaxis.limit_range_for_scale(bottom, top)
self.xy_viewLim.intervaly = (bottom, top)
if auto is not None:
self._autoscaleYon = bool(auto)
if emit:
self.callbacks.process('ylim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_y_axes.get_siblings(self):
if other is not self:
other.set_ylim(self.xy_viewLim.intervaly,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
self.stale = True
return bottom, top
set_ylim = set_ylim3d
def set_zlim3d(self, bottom=None, top=None, emit=True, auto=False,
*, zmin=None, zmax=None):
"""
Set 3D z limits.
See :meth:`matplotlib.axes.Axes.set_ylim` for full documentation
"""
if top is None and np.iterable(bottom):
bottom, top = bottom
if zmin is not None:
cbook.warn_deprecated('3.0', name='`zmin`',
alternative='`bottom`', obj_type='argument')
if bottom is not None:
raise TypeError('Cannot pass both `zmin` and `bottom`')
bottom = zmin
if zmax is not None:
cbook.warn_deprecated('3.0', name='`zmax`',
alternative='`top`', obj_type='argument')
if top is not None:
raise TypeError('Cannot pass both `zmax` and `top`')
top = zmax
self._process_unit_info(zdata=(bottom, top))
bottom = self._validate_converted_limits(bottom, self.convert_zunits)
top = self._validate_converted_limits(top, self.convert_zunits)
old_bottom, old_top = self.get_zlim()
if bottom is None:
bottom = old_bottom
if top is None:
top = old_top
if top == bottom:
warnings.warn(('Attempting to set identical bottom==top results\n'
'in singular transformations; automatically expanding.\n'
'bottom=%s, top=%s') % (bottom, top))
bottom, top = mtransforms.nonsingular(bottom, top, increasing=False)
bottom, top = self.zaxis.limit_range_for_scale(bottom, top)
self.zz_viewLim.intervalx = (bottom, top)
if auto is not None:
self._autoscaleZon = bool(auto)
if emit:
self.callbacks.process('zlim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_z_axes.get_siblings(self):
if other is not self:
other.set_zlim(self.zz_viewLim.intervalx,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
self.stale = True
return bottom, top
set_zlim = set_zlim3d
def get_xlim3d(self):
return tuple(self.xy_viewLim.intervalx)
get_xlim3d.__doc__ = maxes.Axes.get_xlim.__doc__
get_xlim = get_xlim3d
if get_xlim.__doc__ is not None:
get_xlim.__doc__ += """
.. versionchanged :: 1.1.0
This function now correctly refers to the 3D x-limits
"""
def get_ylim3d(self):
return tuple(self.xy_viewLim.intervaly)
get_ylim3d.__doc__ = maxes.Axes.get_ylim.__doc__
get_ylim = get_ylim3d
if get_ylim.__doc__ is not None:
get_ylim.__doc__ += """
.. versionchanged :: 1.1.0
This function now correctly refers to the 3D y-limits.
"""
def get_zlim3d(self):
'''Get 3D z limits.'''
return tuple(self.zz_viewLim.intervalx)
get_zlim = get_zlim3d
def get_zscale(self):
"""
Return the zaxis scale string %s
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
""" % (", ".join(mscale.get_scale_names()))
return self.zaxis.get_scale()
# We need to slightly redefine these to pass scalez=False
# to their calls of autoscale_view.
def set_xscale(self, value, **kwargs):
self.xaxis._set_scale(value, **kwargs)
self.autoscale_view(scaley=False, scalez=False)
self._update_transScale()
if maxes.Axes.set_xscale.__doc__ is not None:
set_xscale.__doc__ = maxes.Axes.set_xscale.__doc__ + """
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
def set_yscale(self, value, **kwargs):
self.yaxis._set_scale(value, **kwargs)
self.autoscale_view(scalex=False, scalez=False)
self._update_transScale()
self.stale = True
if maxes.Axes.set_yscale.__doc__ is not None:
set_yscale.__doc__ = maxes.Axes.set_yscale.__doc__ + """
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
@docstring.dedent_interpd
def set_zscale(self, value, **kwargs):
"""
Set the scaling of the z-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
.. note ::
Currently, Axes3D objects only supports linear scales.
Other scales may or may not work, and support for these
is improving with each release.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
self.zaxis._set_scale(value, **kwargs)
self.autoscale_view(scalex=False, scaley=False)
self._update_transScale()
self.stale = True
def set_zticks(self, *args, **kwargs):
"""
Set z-axis tick locations.
See :meth:`matplotlib.axes.Axes.set_yticks` for more details.
.. note::
Minor ticks are not supported.
.. versionadded:: 1.1.0
"""
return self.zaxis.set_ticks(*args, **kwargs)
def get_zticks(self, minor=False):
"""
Return the z ticks as a list of locations
See :meth:`matplotlib.axes.Axes.get_yticks` for more details.
.. note::
Minor ticks are not supported.
.. versionadded:: 1.1.0
"""
return self.zaxis.get_ticklocs(minor=minor)
def get_zmajorticklabels(self):
"""
Get the ztick labels as a list of Text instances
.. versionadded :: 1.1.0
"""
return cbook.silent_list('Text zticklabel',
self.zaxis.get_majorticklabels())
def get_zminorticklabels(self):
"""
Get the ztick labels as a list of Text instances
.. note::
Minor ticks are not supported. This function was added
only for completeness.
.. versionadded :: 1.1.0
"""
return cbook.silent_list('Text zticklabel',
self.zaxis.get_minorticklabels())
def set_zticklabels(self, *args, **kwargs):
"""
Set z-axis tick labels.
See :meth:`matplotlib.axes.Axes.set_yticklabels` for more details.
.. note::
Minor ticks are not supported by Axes3D objects.
.. versionadded:: 1.1.0
"""
return self.zaxis.set_ticklabels(*args, **kwargs)
def get_zticklabels(self, minor=False):
"""
Get ztick labels as a list of Text instances.
See :meth:`matplotlib.axes.Axes.get_yticklabels` for more details.
.. note::
Minor ticks are not supported.
.. versionadded:: 1.1.0
"""
return cbook.silent_list('Text zticklabel',
self.zaxis.get_ticklabels(minor=minor))
def zaxis_date(self, tz=None):
"""
Sets up z-axis ticks and labels that treat the z data as dates.
*tz* is a timezone string or :class:`tzinfo` instance.
Defaults to rc value.
.. note::
This function is merely provided for completeness.
Axes3D objects do not officially support dates for ticks,
and so this may or may not work as expected.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
self.zaxis.axis_date(tz)
def get_zticklines(self):
"""
Get ztick lines as a list of Line2D instances.
Note that this function is provided merely for completeness.
These lines are re-calculated as the display changes.
.. versionadded:: 1.1.0
"""
return self.zaxis.get_ticklines()
def clabel(self, *args, **kwargs):
"""
This function is currently not implemented for 3D axes.
Returns *None*.
"""
return None
def view_init(self, elev=None, azim=None):
"""
Set the elevation and azimuth of the axes.
This can be used to rotate the axes programmatically.
'elev' stores the elevation angle in the z plane.
'azim' stores the azimuth angle in the x,y plane.
if elev or azim are None (default), then the initial value
is used which was specified in the :class:`Axes3D` constructor.
"""
self.dist = 10
if elev is None:
self.elev = self.initial_elev
else:
self.elev = elev
if azim is None:
self.azim = self.initial_azim
else:
self.azim = azim
def set_proj_type(self, proj_type):
"""
Set the projection type.
Parameters
----------
proj_type : str
Type of projection, accepts 'persp' and 'ortho'.
"""
if proj_type == 'persp':
self._projection = proj3d.persp_transformation
elif proj_type == 'ortho':
self._projection = proj3d.ortho_transformation
else:
raise ValueError("unrecognized projection: %s" % proj_type)
def get_proj(self):
"""
Create the projection matrix from the current viewing position.
elev stores the elevation angle in the z plane
azim stores the azimuth angle in the x,y plane
dist is the distance of the eye viewing point from the object
point.
"""
relev, razim = np.pi * self.elev/180, np.pi * self.azim/180
xmin, xmax = self.get_xlim3d()
ymin, ymax = self.get_ylim3d()
zmin, zmax = self.get_zlim3d()
# transform to uniform world coordinates 0-1.0,0-1.0,0-1.0
worldM = proj3d.world_transformation(xmin, xmax,
ymin, ymax,
zmin, zmax)
# look into the middle of the new coordinates
R = np.array([0.5, 0.5, 0.5])
xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist
yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist
zp = R[2] + np.sin(relev) * self.dist
E = np.array((xp, yp, zp))
self.eye = E
self.vvec = R - E
self.vvec = self.vvec / proj3d.mod(self.vvec)
if abs(relev) > np.pi/2:
# upside down
V = np.array((0, 0, -1))
else:
V = np.array((0, 0, 1))
zfront, zback = -self.dist, self.dist
viewM = proj3d.view_transformation(E, R, V)
projM = self._projection(zfront, zback)
M0 = np.dot(viewM, worldM)
M = np.dot(projM, M0)
return M
def mouse_init(self, rotate_btn=1, zoom_btn=3):
"""Initializes mouse button callbacks to enable 3D rotation of
the axes. Also optionally sets the mouse buttons for 3D rotation
and zooming.
============ =======================================================
Argument Description
============ =======================================================
*rotate_btn* The integer or list of integers specifying which mouse
button or buttons to use for 3D rotation of the axes.
Default = 1.
*zoom_btn* The integer or list of integers specifying which mouse
button or buttons to use to zoom the 3D axes.
Default = 3.
============ =======================================================
"""
self.button_pressed = None
canv = self.figure.canvas
if canv is not None:
c1 = canv.mpl_connect('motion_notify_event', self._on_move)
c2 = canv.mpl_connect('button_press_event', self._button_press)
c3 = canv.mpl_connect('button_release_event', self._button_release)
self._cids = [c1, c2, c3]
else:
warnings.warn(
"Axes3D.figure.canvas is 'None', mouse rotation disabled. "
"Set canvas then call Axes3D.mouse_init().")
# coerce scalars into array-like, then convert into
# a regular list to avoid comparisons against None
# which breaks in recent versions of numpy.
self._rotate_btn = np.atleast_1d(rotate_btn).tolist()
self._zoom_btn = np.atleast_1d(zoom_btn).tolist()
def can_zoom(self):
"""
Return *True* if this axes supports the zoom box button functionality.
3D axes objects do not use the zoom box button.
"""
return False
def can_pan(self):
"""
Return *True* if this axes supports the pan/zoom button functionality.
3D axes objects do not use the pan/zoom button.
"""
return False
def cla(self):
"""
Clear axes
"""
# Disabling mouse interaction might have been needed a long
# time ago, but I can't find a reason for it now - BVR (2012-03)
#self.disable_mouse_rotation()
super().cla()
self.zaxis.cla()
if self._sharez is not None:
self.zaxis.major = self._sharez.zaxis.major
self.zaxis.minor = self._sharez.zaxis.minor
z0, z1 = self._sharez.get_zlim()
self.set_zlim(z0, z1, emit=False, auto=None)
self.zaxis._set_scale(self._sharez.zaxis.get_scale())
else:
self.zaxis._set_scale('linear')
try:
self.set_zlim(0, 1)
except TypeError:
pass
self._autoscaleZon = True
self._zmargin = 0
self.grid(rcParams['axes3d.grid'])
def disable_mouse_rotation(self):
"""Disable mouse button callbacks.
"""
# Disconnect the various events we set.
for cid in self._cids:
self.figure.canvas.mpl_disconnect(cid)
self._cids = []
def _button_press(self, event):
if event.inaxes == self:
self.button_pressed = event.button
self.sx, self.sy = event.xdata, event.ydata
def _button_release(self, event):
self.button_pressed = None
def format_zdata(self, z):
"""
Return *z* string formatted. This function will use the
:attr:`fmt_zdata` attribute if it is callable, else will fall
back on the zaxis major formatter
"""
try: return self.fmt_zdata(z)
except (AttributeError, TypeError):
func = self.zaxis.get_major_formatter().format_data_short
val = func(z)
return val
def format_coord(self, xd, yd):
"""
Given the 2D view coordinates attempt to guess a 3D coordinate.
Looks for the nearest edge to the point and then assumes that
the point is at the same z location as the nearest point on the edge.
"""
if self.M is None:
return ''
if self.button_pressed in self._rotate_btn:
return 'azimuth=%d deg, elevation=%d deg ' % (self.azim, self.elev)
# ignore xd and yd and display angles instead
# nearest edge
p0, p1 = min(self.tunit_edges(),
key=lambda edge: proj3d.line2d_seg_dist(
edge[0], edge[1], (xd, yd)))
# scale the z value to match
x0, y0, z0 = p0
x1, y1, z1 = p1
d0 = np.hypot(x0-xd, y0-yd)
d1 = np.hypot(x1-xd, y1-yd)
dt = d0+d1
z = d1/dt * z0 + d0/dt * z1
x, y, z = proj3d.inv_transform(xd, yd, z, self.M)
xs = self.format_xdata(x)
ys = self.format_ydata(y)
zs = self.format_zdata(z)
return 'x=%s, y=%s, z=%s' % (xs, ys, zs)
def _on_move(self, event):
"""Mouse moving
button-1 rotates by default. Can be set explicitly in mouse_init().
button-3 zooms by default. Can be set explicitly in mouse_init().
"""
if not self.button_pressed:
return
if self.M is None:
return
x, y = event.xdata, event.ydata
# In case the mouse is out of bounds.
if x is None:
return
dx, dy = x - self.sx, y - self.sy
w = self._pseudo_w
h = self._pseudo_h
self.sx, self.sy = x, y
# Rotation
if self.button_pressed in self._rotate_btn:
# rotate viewing point
# get the x and y pixel coords
if dx == 0 and dy == 0:
return
self.elev = art3d.norm_angle(self.elev - (dy/h)*180)
self.azim = art3d.norm_angle(self.azim - (dx/w)*180)
self.get_proj()
self.stale = True
self.figure.canvas.draw_idle()
# elif self.button_pressed == 2:
# pan view
# project xv,yv,zv -> xw,yw,zw
# pan
# pass
# Zoom
elif self.button_pressed in self._zoom_btn:
# zoom view
# hmmm..this needs some help from clipping....
minx, maxx, miny, maxy, minz, maxz = self.get_w_lims()
df = 1-((h - dy)/h)
dx = (maxx-minx)*df
dy = (maxy-miny)*df
dz = (maxz-minz)*df
self.set_xlim3d(minx - dx, maxx + dx)
self.set_ylim3d(miny - dy, maxy + dy)
self.set_zlim3d(minz - dz, maxz + dz)
self.get_proj()
self.figure.canvas.draw_idle()
def set_zlabel(self, zlabel, fontdict=None, labelpad=None, **kwargs):
'''
Set zlabel. See doc for :meth:`set_ylabel` for description.
'''
if labelpad is not None : self.zaxis.labelpad = labelpad
return self.zaxis.set_label_text(zlabel, fontdict, **kwargs)
def get_zlabel(self):
"""
Get the z-label text string.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
label = self.zaxis.get_label()
return label.get_text()
#### Axes rectangle characteristics
def get_frame_on(self):
"""
Get whether the 3D axes panels are drawn.
.. versionadded :: 1.1.0
"""
return self._frameon
def set_frame_on(self, b):
"""
Set whether the 3D axes panels are drawn.
.. versionadded :: 1.1.0
Parameters
----------
b : bool
"""
self._frameon = bool(b)
self.stale = True
def grid(self, b=True, **kwargs):
'''
Set / unset 3D grid.
.. note::
Currently, this function does not behave the same as
:meth:`matplotlib.axes.Axes.grid`, but it is intended to
eventually support that behavior.
.. versionchanged :: 1.1.0
This function was changed, but not tested. Please report any bugs.
'''
# TODO: Operate on each axes separately
if len(kwargs):
b = True
self._draw_grid = cbook._string_to_bool(b)
self.stale = True
def ticklabel_format(
self, *, style='', scilimits=None, useOffset=None, axis='both'):
"""
Convenience method for manipulating the ScalarFormatter
used by default for linear axes in Axed3D objects.
See :meth:`matplotlib.axes.Axes.ticklabel_format` for full
documentation. Note that this version applies to all three
axes of the Axes3D object. Therefore, the *axis* argument
will also accept a value of 'z' and the value of 'both' will
apply to all three axes.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
style = style.lower()
axis = axis.lower()
if scilimits is not None:
try:
m, n = scilimits
m+n+1 # check that both are numbers
except (ValueError, TypeError):
raise ValueError("scilimits must be a sequence of 2 integers")
if style[:3] == 'sci':
sb = True
elif style == 'plain':
sb = False
elif style == '':
sb = None
else:
raise ValueError("%s is not a valid style value")
try:
if sb is not None:
if axis in ['both', 'z']:
self.xaxis.major.formatter.set_scientific(sb)
if axis in ['both', 'y']:
self.yaxis.major.formatter.set_scientific(sb)
if axis in ['both', 'z'] :
self.zaxis.major.formatter.set_scientific(sb)
if scilimits is not None:
if axis in ['both', 'x']:
self.xaxis.major.formatter.set_powerlimits(scilimits)
if axis in ['both', 'y']:
self.yaxis.major.formatter.set_powerlimits(scilimits)
if axis in ['both', 'z']:
self.zaxis.major.formatter.set_powerlimits(scilimits)
if useOffset is not None:
if axis in ['both', 'x']:
self.xaxis.major.formatter.set_useOffset(useOffset)
if axis in ['both', 'y']:
self.yaxis.major.formatter.set_useOffset(useOffset)
if axis in ['both', 'z']:
self.zaxis.major.formatter.set_useOffset(useOffset)
except AttributeError:
raise AttributeError(
"This method only works with the ScalarFormatter.")
def locator_params(self, axis='both', tight=None, **kwargs):
"""
Convenience method for controlling tick locators.
See :meth:`matplotlib.axes.Axes.locator_params` for full
documentation. Note that this is for Axes3D objects,
therefore, setting *axis* to 'both' will result in the
parameters being set for all three axes. Also, *axis*
can also take a value of 'z' to apply parameters to the
z axis.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
_x = axis in ['x', 'both']
_y = axis in ['y', 'both']
_z = axis in ['z', 'both']
if _x:
self.xaxis.get_major_locator().set_params(**kwargs)
if _y:
self.yaxis.get_major_locator().set_params(**kwargs)
if _z:
self.zaxis.get_major_locator().set_params(**kwargs)
self.autoscale_view(tight=tight, scalex=_x, scaley=_y, scalez=_z)
def tick_params(self, axis='both', **kwargs):
"""
Convenience method for changing the appearance of ticks and
tick labels.
See :meth:`matplotlib.axes.Axes.tick_params` for more complete
documentation.
The only difference is that setting *axis* to 'both' will
mean that the settings are applied to all three axes. Also,
the *axis* parameter also accepts a value of 'z', which
would mean to apply to only the z-axis.
Also, because of how Axes3D objects are drawn very differently
from regular 2D axes, some of these settings may have
ambiguous meaning. For simplicity, the 'z' axis will
accept settings as if it was like the 'y' axis.
.. note::
While this function is currently implemented, the core part
of the Axes3D object may ignore some of these settings.
Future releases will fix this. Priority will be given to
those who file bugs.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
super().tick_params(axis, **kwargs)
if axis in ['z', 'both'] :
zkw = dict(kwargs)
zkw.pop('top', None)
zkw.pop('bottom', None)
zkw.pop('labeltop', None)
zkw.pop('labelbottom', None)
self.zaxis.set_tick_params(**zkw)
### data limits, ticks, tick labels, and formatting
def invert_zaxis(self):
"""
Invert the z-axis.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
bottom, top = self.get_zlim()
self.set_zlim(top, bottom, auto=None)
def zaxis_inverted(self):
'''
Returns True if the z-axis is inverted.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
'''
bottom, top = self.get_zlim()
return top < bottom
def get_zbound(self):
"""
Returns the z-axis numerical bounds where::
lowerBound < upperBound
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
bottom, top = self.get_zlim()
if bottom < top:
return bottom, top
else:
return top, bottom
def set_zbound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the z-axis.
This method will honor axes inversion regardless of parameter order.
It will not change the :attr:`_autoscaleZon` attribute.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
if upper is None and np.iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_zbound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.zaxis_inverted():
if lower < upper:
self.set_zlim(upper, lower, auto=None)
else:
self.set_zlim(lower, upper, auto=None)
else :
if lower < upper:
self.set_zlim(lower, upper, auto=None)
else :
self.set_zlim(upper, lower, auto=None)
def text(self, x, y, z, s, zdir=None, **kwargs):
'''
Add text to the plot. kwargs will be passed on to Axes.text,
except for the `zdir` keyword, which sets the direction to be
used as the z direction.
'''
text = super().text(x, y, s, **kwargs)
art3d.text_2d_to_3d(text, z, zdir)
return text
text3D = text
text2D = Axes.text
def plot(self, xs, ys, *args, zdir='z', **kwargs):
'''
Plot 2D or 3D data.
========== ================================================
Argument Description
========== ================================================
*xs*, *ys* x, y coordinates of vertices
*zs* z value(s), either one for all points or one for
each point.
*zdir* Which direction to use as z ('x', 'y' or 'z')
when plotting a 2D set.
========== ================================================
Other arguments are passed on to
:func:`~matplotlib.axes.Axes.plot`
'''
had_data = self.has_data()
# `zs` can be passed positionally or as keyword; checking whether
# args[0] is a string matches the behavior of 2D `plot` (via
# `_process_plot_var_args`).
if args and not isinstance(args[0], str):
zs = args[0]
args = args[1:]
if 'zs' in kwargs:
raise TypeError("plot() for multiple values for argument 'z'")
else:
zs = kwargs.pop('zs', 0)
# Match length
zs = np.broadcast_to(zs, len(xs))
lines = super().plot(xs, ys, *args, **kwargs)
for line in lines:
art3d.line_2d_to_3d(line, zs=zs, zdir=zdir)
xs, ys, zs = art3d.juggle_axes(xs, ys, zs, zdir)
self.auto_scale_xyz(xs, ys, zs, had_data)
return lines
plot3D = plot
def plot_surface(self, X, Y, Z, *args, norm=None, vmin=None,
vmax=None, lightsource=None, **kwargs):
"""
Create a surface plot.
By default it will be colored in shades of a solid color, but it also
supports color mapping by supplying the *cmap* argument.
.. note::
The *rcount* and *ccount* kwargs, which both default to 50,
determine the maximum number of samples used in each direction. If
the input data is larger, it will be downsampled (by slicing) to
these numbers of points.
Parameters
----------
X, Y, Z : 2d arrays
Data values.
rcount, ccount : int
Maximum number of samples used in each direction. If the input
data is larger, it will be downsampled (by slicing) to these
numbers of points. Defaults to 50.
.. versionadded:: 2.0
rstride, cstride : int
Downsampling stride in each direction. These arguments are
mutually exclusive with *rcount* and *ccount*. If only one of
*rstride* or *cstride* is set, the other defaults to 10.
'classic' mode uses a default of ``rstride = cstride = 10`` instead
of the new default of ``rcount = ccount = 50``.
color : color-like
Color of the surface patches.
cmap : Colormap
Colormap of the surface patches.
facecolors : array-like of colors.
Colors of each individual patch.
norm : Normalize
Normalization for the colormap.
vmin, vmax : float
Bounds for the normalization.
shade : bool
Whether to shade the face colors.
**kwargs :
Other arguments are forwarded to `.Poly3DCollection`.
"""
had_data = self.has_data()
if Z.ndim != 2:
raise ValueError("Argument Z must be 2-dimensional.")
# TODO: Support masked arrays
X, Y, Z = np.broadcast_arrays(X, Y, Z)
rows, cols = Z.shape
has_stride = 'rstride' in kwargs or 'cstride' in kwargs
has_count = 'rcount' in kwargs or 'ccount' in kwargs
if has_stride and has_count:
raise ValueError("Cannot specify both stride and count arguments")
rstride = kwargs.pop('rstride', 10)
cstride = kwargs.pop('cstride', 10)
rcount = kwargs.pop('rcount', 50)
ccount = kwargs.pop('ccount', 50)
if rcParams['_internal.classic_mode']:
# Strides have priority over counts in classic mode.
# So, only compute strides from counts
# if counts were explicitly given
compute_strides = has_count
else:
# If the strides are provided then it has priority.
# Otherwise, compute the strides from the counts.
compute_strides = not has_stride
if compute_strides:
rstride = int(max(np.ceil(rows / rcount), 1))
cstride = int(max(np.ceil(cols / ccount), 1))
if 'facecolors' in kwargs:
fcolors = kwargs.pop('facecolors')
else:
color = kwargs.pop('color', None)
if color is None:
color = self._get_lines.get_next_color()
color = np.array(mcolors.to_rgba(color))
fcolors = None
cmap = kwargs.get('cmap', None)
shade = kwargs.pop('shade', cmap is None)
# Shade the data
if shade and cmap is not None and fcolors is not None:
fcolors = self._shade_colors_lightsource(Z, cmap, lightsource)
# evenly spaced, and including both endpoints
row_inds = list(range(0, rows-1, rstride)) + [rows-1]
col_inds = list(range(0, cols-1, cstride)) + [cols-1]
colset = [] # the sampled facecolor
polys = []
for rs, rs_next in zip(row_inds[:-1], row_inds[1:]):
for cs, cs_next in zip(col_inds[:-1], col_inds[1:]):
ps = [
# +1 ensures we share edges between polygons
cbook._array_perimeter(a[rs:rs_next+1, cs:cs_next+1])
for a in (X, Y, Z)
]
# ps = np.stack(ps, axis=-1)
ps = np.array(ps).T
polys.append(ps)
if fcolors is not None:
colset.append(fcolors[rs][cs])
def get_normals(polygons):
"""
Takes a list of polygons and return an array of their normals
"""
v1 = np.empty((len(polygons), 3))
v2 = np.empty((len(polygons), 3))
for poly_i, ps in enumerate(polygons):
# pick three points around the polygon at which to find the normal
# doesn't vectorize because polygons is jagged
i1, i2, i3 = 0, len(ps)//3, 2*len(ps)//3
v1[poly_i, :] = ps[i1, :] - ps[i2, :]
v2[poly_i, :] = ps[i2, :] - ps[i3, :]
return np.cross(v1, v2)
# note that the striding causes some polygons to have more coordinates
# than others
polyc = art3d.Poly3DCollection(polys, *args, **kwargs)
if fcolors is not None:
if shade:
colset = self._shade_colors(colset, get_normals(polys))
polyc.set_facecolors(colset)
polyc.set_edgecolors(colset)
elif cmap:
# doesn't vectorize because polys is jagged
avg_z = np.array([ps[:,2].mean() for ps in polys])
polyc.set_array(avg_z)
if vmin is not None or vmax is not None:
polyc.set_clim(vmin, vmax)
if norm is not None:
polyc.set_norm(norm)
else:
if shade:
colset = self._shade_colors(color, get_normals(polys))
else:
colset = color
polyc.set_facecolors(colset)
self.add_collection(polyc)
self.auto_scale_xyz(X, Y, Z, had_data)
return polyc
def _generate_normals(self, polygons):
'''
Generate normals for polygons by using the first three points.
This normal of course might not make sense for polygons with
more than three points not lying in a plane.
'''
normals = []
for verts in polygons:
v1 = np.array(verts[0]) - np.array(verts[1])
v2 = np.array(verts[2]) - np.array(verts[0])
normals.append(np.cross(v1, v2))
return normals
def _shade_colors(self, color, normals):
'''
Shade *color* using normal vectors given by *normals*.
*color* can also be an array of the same length as *normals*.
'''
shade = np.array([np.dot(n / proj3d.mod(n), [-1, -1, 0.5])
if proj3d.mod(n) else np.nan
for n in normals])
mask = ~np.isnan(shade)
if len(shade[mask]) > 0:
norm = Normalize(min(shade[mask]), max(shade[mask]))
shade[~mask] = min(shade[mask])
color = mcolors.to_rgba_array(color)
# shape of color should be (M, 4) (where M is number of faces)
# shape of shade should be (M,)
# colors should have final shape of (M, 4)
alpha = color[:, 3]
colors = (0.5 + norm(shade)[:, np.newaxis] * 0.5) * color
colors[:, 3] = alpha
else:
colors = np.asanyarray(color).copy()
return colors
def _shade_colors_lightsource(self, data, cmap, lightsource):
if lightsource is None:
lightsource = LightSource(azdeg=135, altdeg=55)
return lightsource.shade(data, cmap)
def plot_wireframe(self, X, Y, Z, *args, **kwargs):
"""
Plot a 3D wireframe.
.. note::
The *rcount* and *ccount* kwargs, which both default to 50,
determine the maximum number of samples used in each direction. If
the input data is larger, it will be downsampled (by slicing) to
these numbers of points.
Parameters
----------
X, Y, Z : 2d arrays
Data values.
rcount, ccount : int
Maximum number of samples used in each direction. If the input
data is larger, it will be downsampled (by slicing) to these
numbers of points. Setting a count to zero causes the data to be
not sampled in the corresponding direction, producing a 3D line
plot rather than a wireframe plot. Defaults to 50.
.. versionadded:: 2.0
rstride, cstride : int
Downsampling stride in each direction. These arguments are
mutually exclusive with *rcount* and *ccount*. If only one of
*rstride* or *cstride* is set, the other defaults to 1. Setting a
stride to zero causes the data to be not sampled in the
corresponding direction, producing a 3D line plot rather than a
wireframe plot.
'classic' mode uses a default of ``rstride = cstride = 1`` instead
of the new default of ``rcount = ccount = 50``.
**kwargs :
Other arguments are forwarded to `.Line3DCollection`.
"""
had_data = self.has_data()
if Z.ndim != 2:
raise ValueError("Argument Z must be 2-dimensional.")
# FIXME: Support masked arrays
X, Y, Z = np.broadcast_arrays(X, Y, Z)
rows, cols = Z.shape
has_stride = 'rstride' in kwargs or 'cstride' in kwargs
has_count = 'rcount' in kwargs or 'ccount' in kwargs
if has_stride and has_count:
raise ValueError("Cannot specify both stride and count arguments")
rstride = kwargs.pop('rstride', 1)
cstride = kwargs.pop('cstride', 1)
rcount = kwargs.pop('rcount', 50)
ccount = kwargs.pop('ccount', 50)
if rcParams['_internal.classic_mode']:
# Strides have priority over counts in classic mode.
# So, only compute strides from counts
# if counts were explicitly given
if has_count:
rstride = int(max(np.ceil(rows / rcount), 1)) if rcount else 0
cstride = int(max(np.ceil(cols / ccount), 1)) if ccount else 0
else:
# If the strides are provided then it has priority.
# Otherwise, compute the strides from the counts.
if not has_stride:
rstride = int(max(np.ceil(rows / rcount), 1)) if rcount else 0
cstride = int(max(np.ceil(cols / ccount), 1)) if ccount else 0
# We want two sets of lines, one running along the "rows" of
# Z and another set of lines running along the "columns" of Z.
# This transpose will make it easy to obtain the columns.
tX, tY, tZ = np.transpose(X), np.transpose(Y), np.transpose(Z)
if rstride:
rii = list(range(0, rows, rstride))
# Add the last index only if needed
if rows > 0 and rii[-1] != (rows - 1):
rii += [rows-1]
else:
rii = []
if cstride:
cii = list(range(0, cols, cstride))
# Add the last index only if needed
if cols > 0 and cii[-1] != (cols - 1):
cii += [cols-1]
else:
cii = []
if rstride == 0 and cstride == 0:
raise ValueError("Either rstride or cstride must be non zero")
# If the inputs were empty, then just
# reset everything.
if Z.size == 0:
rii = []
cii = []
xlines = [X[i] for i in rii]
ylines = [Y[i] for i in rii]
zlines = [Z[i] for i in rii]
txlines = [tX[i] for i in cii]
tylines = [tY[i] for i in cii]
tzlines = [tZ[i] for i in cii]
lines = ([list(zip(xl, yl, zl))
for xl, yl, zl in zip(xlines, ylines, zlines)]
+ [list(zip(xl, yl, zl))
for xl, yl, zl in zip(txlines, tylines, tzlines)])
linec = art3d.Line3DCollection(lines, *args, **kwargs)
self.add_collection(linec)
self.auto_scale_xyz(X, Y, Z, had_data)
return linec
def plot_trisurf(self, *args, color=None, norm=None, vmin=None, vmax=None,
lightsource=None, **kwargs):
"""
============= ================================================
Argument Description
============= ================================================
*X*, *Y*, *Z* Data values as 1D arrays
*color* Color of the surface patches
*cmap* A colormap for the surface patches.
*norm* An instance of Normalize to map values to colors
*vmin* Minimum value to map
*vmax* Maximum value to map
*shade* Whether to shade the facecolors
============= ================================================
The (optional) triangulation can be specified in one of two ways;
either::
plot_trisurf(triangulation, ...)
where triangulation is a :class:`~matplotlib.tri.Triangulation`
object, or::
plot_trisurf(X, Y, ...)
plot_trisurf(X, Y, triangles, ...)
plot_trisurf(X, Y, triangles=triangles, ...)
in which case a Triangulation object will be created. See
:class:`~matplotlib.tri.Triangulation` for a explanation of
these possibilities.
The remaining arguments are::
plot_trisurf(..., Z)
where *Z* is the array of values to contour, one per point
in the triangulation.
Other arguments are passed on to
:class:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
**Examples:**
.. plot:: gallery/mplot3d/trisurf3d.py
.. plot:: gallery/mplot3d/trisurf3d_2.py
.. versionadded:: 1.2.0
This plotting function was added for the v1.2.0 release.
"""
had_data = self.has_data()
# TODO: Support custom face colours
if color is None:
color = self._get_lines.get_next_color()
color = np.array(mcolors.to_rgba(color))
cmap = kwargs.get('cmap', None)
shade = kwargs.pop('shade', cmap is None)
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
if 'Z' in kwargs:
z = np.asarray(kwargs.pop('Z'))
else:
z = np.asarray(args[0])
# We do this so Z doesn't get passed as an arg to PolyCollection
args = args[1:]
triangles = tri.get_masked_triangles()
xt = tri.x[triangles]
yt = tri.y[triangles]
zt = z[triangles]
verts = np.stack((xt, yt, zt), axis=-1)
polyc = art3d.Poly3DCollection(verts, *args, **kwargs)
if cmap:
# average over the three points of each triangle
avg_z = verts[:, :, 2].mean(axis=1)
polyc.set_array(avg_z)
if vmin is not None or vmax is not None:
polyc.set_clim(vmin, vmax)
if norm is not None:
polyc.set_norm(norm)
else:
if shade:
v1 = verts[:, 0, :] - verts[:, 1, :]
v2 = verts[:, 1, :] - verts[:, 2, :]
normals = np.cross(v1, v2)
colset = self._shade_colors(color, normals)
else:
colset = color
polyc.set_facecolors(colset)
self.add_collection(polyc)
self.auto_scale_xyz(tri.x, tri.y, z, had_data)
return polyc
def _3d_extend_contour(self, cset, stride=5):
'''
Extend a contour in 3D by creating
'''
levels = cset.levels
colls = cset.collections
dz = (levels[1] - levels[0]) / 2
for z, linec in zip(levels, colls):
paths = linec.get_paths()
if not paths:
continue
topverts = art3d.paths_to_3d_segments(paths, z - dz)
botverts = art3d.paths_to_3d_segments(paths, z + dz)
color = linec.get_color()[0]
polyverts = []
normals = []
nsteps = np.round(len(topverts[0]) / stride)
if nsteps <= 1:
if len(topverts[0]) > 1:
nsteps = 2
else:
continue
stepsize = (len(topverts[0]) - 1) / (nsteps - 1)
for i in range(int(np.round(nsteps)) - 1):
i1 = int(np.round(i * stepsize))
i2 = int(np.round((i + 1) * stepsize))
polyverts.append([topverts[0][i1],
topverts[0][i2],
botverts[0][i2],
botverts[0][i1]])
v1 = np.array(topverts[0][i1]) - np.array(topverts[0][i2])
v2 = np.array(topverts[0][i1]) - np.array(botverts[0][i1])
normals.append(np.cross(v1, v2))
colors = self._shade_colors(color, normals)
colors2 = self._shade_colors(color, normals)
polycol = art3d.Poly3DCollection(polyverts,
facecolors=colors,
edgecolors=colors2)
polycol.set_sort_zpos(z)
self.add_collection3d(polycol)
for col in colls:
self.collections.remove(col)
def add_contour_set(self, cset, extend3d=False, stride=5, zdir='z', offset=None):
zdir = '-' + zdir
if extend3d:
self._3d_extend_contour(cset, stride)
else:
for z, linec in zip(cset.levels, cset.collections):
if offset is not None:
z = offset
art3d.line_collection_2d_to_3d(linec, z, zdir=zdir)
def add_contourf_set(self, cset, zdir='z', offset=None):
zdir = '-' + zdir
for z, linec in zip(cset.levels, cset.collections):
if offset is not None :
z = offset
art3d.poly_collection_2d_to_3d(linec, z, zdir=zdir)
linec.set_sort_zpos(z)
def contour(self, X, Y, Z, *args,
extend3d=False, stride=5, zdir='z', offset=None, **kwargs):
'''
Create a 3D contour plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*extend3d* Whether to extend contour in 3D (default: False)
*stride* Stride (step size) for extending contour
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the contour
lines on this position in plane normal to zdir
========== ================================================
The positional and other keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.contour`
Returns a :class:`~matplotlib.axes.Axes.contour`
'''
had_data = self.has_data()
jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
cset = super().contour(jX, jY, jZ, *args, **kwargs)
self.add_contour_set(cset, extend3d, stride, zdir, offset)
self.auto_scale_xyz(X, Y, Z, had_data)
return cset
contour3D = contour
def tricontour(self, *args,
extend3d=False, stride=5, zdir='z', offset=None, **kwargs):
"""
Create a 3D contour plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*extend3d* Whether to extend contour in 3D (default: False)
*stride* Stride (step size) for extending contour
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the contour
lines on this position in plane normal to zdir
========== ================================================
Other keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.tricontour`
Returns a :class:`~matplotlib.axes.Axes.contour`
.. versionchanged:: 1.3.0
Added support for custom triangulations
EXPERIMENTAL: This method currently produces incorrect output due to a
longstanding bug in 3D PolyCollection rendering.
"""
had_data = self.has_data()
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(
*args, **kwargs)
X = tri.x
Y = tri.y
if 'Z' in kwargs:
Z = kwargs.pop('Z')
else:
Z = args[0]
# We do this so Z doesn't get passed as an arg to Axes.tricontour
args = args[1:]
jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
tri = Triangulation(jX, jY, tri.triangles, tri.mask)
cset = super().tricontour(tri, jZ, *args, **kwargs)
self.add_contour_set(cset, extend3d, stride, zdir, offset)
self.auto_scale_xyz(X, Y, Z, had_data)
return cset
def contourf(self, X, Y, Z, *args, zdir='z', offset=None, **kwargs):
'''
Create a 3D contourf plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the filled contour
on this position in plane normal to zdir
========== ================================================
The positional and keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.contourf`
Returns a :class:`~matplotlib.axes.Axes.contourf`
.. versionchanged :: 1.1.0
The *zdir* and *offset* kwargs were added.
'''
had_data = self.has_data()
jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
cset = super().contourf(jX, jY, jZ, *args, **kwargs)
self.add_contourf_set(cset, zdir, offset)
self.auto_scale_xyz(X, Y, Z, had_data)
return cset
contourf3D = contourf
def tricontourf(self, *args, zdir='z', offset=None, **kwargs):
"""
Create a 3D contourf plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the contour
lines on this position in plane normal to zdir
========== ================================================
Other keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.tricontour`
Returns a :class:`~matplotlib.axes.Axes.contour`
.. versionchanged :: 1.3.0
Added support for custom triangulations
EXPERIMENTAL: This method currently produces incorrect output due to a
longstanding bug in 3D PolyCollection rendering.
"""
had_data = self.has_data()
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(
*args, **kwargs)
X = tri.x
Y = tri.y
if 'Z' in kwargs:
Z = kwargs.pop('Z')
else:
Z = args[0]
# We do this so Z doesn't get passed as an arg to Axes.tricontourf
args = args[1:]
jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
tri = Triangulation(jX, jY, tri.triangles, tri.mask)
cset = super().tricontourf(tri, jZ, *args, **kwargs)
self.add_contourf_set(cset, zdir, offset)
self.auto_scale_xyz(X, Y, Z, had_data)
return cset
def add_collection3d(self, col, zs=0, zdir='z'):
'''
Add a 3D collection object to the plot.
2D collection types are converted to a 3D version by
modifying the object and adding z coordinate information.
Supported are:
- PolyCollection
- LineCollection
- PatchCollection
'''
zvals = np.atleast_1d(zs)
if len(zvals) > 0 :
zsortval = min(zvals)
else :
zsortval = 0 # FIXME: Fairly arbitrary. Is there a better value?
# FIXME: use issubclass() (although, then a 3D collection
# object would also pass.) Maybe have a collection3d
# abstract class to test for and exclude?
if type(col) is mcoll.PolyCollection:
art3d.poly_collection_2d_to_3d(col, zs=zs, zdir=zdir)
col.set_sort_zpos(zsortval)
elif type(col) is mcoll.LineCollection:
art3d.line_collection_2d_to_3d(col, zs=zs, zdir=zdir)
col.set_sort_zpos(zsortval)
elif type(col) is mcoll.PatchCollection:
art3d.patch_collection_2d_to_3d(col, zs=zs, zdir=zdir)
col.set_sort_zpos(zsortval)
super().add_collection(col)
def scatter(self, xs, ys, zs=0, zdir='z', s=20, c=None, depthshade=True,
*args, **kwargs):
'''
Create a scatter plot.
============ ========================================================
Argument Description
============ ========================================================
*xs*, *ys* Positions of data points.
*zs* Either an array of the same length as *xs* and
*ys* or a single value to place all points in
the same plane. Default is 0.
*zdir* Which direction to use as z ('x', 'y' or 'z')
when plotting a 2D set.
*s* Size in points^2. It is a scalar or an array of the
same length as *x* and *y*.
*c* A color. *c* can be a single color format string, or a
sequence of color specifications of length *N*, or a
sequence of *N* numbers to be mapped to colors using the
*cmap* and *norm* specified via kwargs (see below). Note
that *c* should not be a single numeric RGB or RGBA
sequence because that is indistinguishable from an array
of values to be colormapped. *c* can be a 2-D array in
which the rows are RGB or RGBA, however, including the
case of a single row to specify the same color for
all points.
*depthshade*
Whether or not to shade the scatter markers to give
the appearance of depth. Default is *True*.
============ ========================================================
Keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.scatter`.
Returns a :class:`~mpl_toolkits.mplot3d.art3d.Patch3DCollection`
'''
had_data = self.has_data()
xs, ys, zs = np.broadcast_arrays(
*[np.ravel(np.ma.filled(t, np.nan)) for t in [xs, ys, zs]])
s = np.ma.ravel(s) # This doesn't have to match x, y in size.
xs, ys, zs, s, c = cbook.delete_masked_points(xs, ys, zs, s, c)
patches = super().scatter(xs, ys, s=s, c=c, *args, **kwargs)
is_2d = not np.iterable(zs)
zs = np.broadcast_to(zs, len(xs))
art3d.patch_collection_2d_to_3d(patches, zs=zs, zdir=zdir,
depthshade=depthshade)
if self._zmargin < 0.05 and xs.size > 0:
self.set_zmargin(0.05)
#FIXME: why is this necessary?
if not is_2d:
self.auto_scale_xyz(xs, ys, zs, had_data)
return patches
scatter3D = scatter
def bar(self, left, height, zs=0, zdir='z', *args, **kwargs):
'''
Add 2D bar(s).
========== ================================================
Argument Description
========== ================================================
*left* The x coordinates of the left sides of the bars.
*height* The height of the bars.
*zs* Z coordinate of bars, if one value is specified
they will all be placed at the same z.
*zdir* Which direction to use as z ('x', 'y' or 'z')
when plotting a 2D set.
========== ================================================
Keyword arguments are passed onto :func:`~matplotlib.axes.Axes.bar`.
Returns a :class:`~mpl_toolkits.mplot3d.art3d.Patch3DCollection`
'''
had_data = self.has_data()
patches = super().bar(left, height, *args, **kwargs)
zs = np.broadcast_to(zs, len(left))
verts = []
verts_zs = []
for p, z in zip(patches, zs):
vs = art3d.get_patch_verts(p)
verts += vs.tolist()
verts_zs += [z] * len(vs)
art3d.patch_2d_to_3d(p, z, zdir)
if 'alpha' in kwargs:
p.set_alpha(kwargs['alpha'])
if len(verts) > 0 :
# the following has to be skipped if verts is empty
# NOTE: Bugs could still occur if len(verts) > 0,
# but the "2nd dimension" is empty.
xs, ys = list(zip(*verts))
else :
xs, ys = [], []
xs, ys, verts_zs = art3d.juggle_axes(xs, ys, verts_zs, zdir)
self.auto_scale_xyz(xs, ys, verts_zs, had_data)
return patches
def bar3d(self, x, y, z, dx, dy, dz, color=None,
zsort='average', shade=True, *args, **kwargs):
"""Generate a 3D barplot.
This method creates three dimensional barplot where the width,
depth, height, and color of the bars can all be uniquely set.
Parameters
----------
x, y, z : array-like
The coordinates of the anchor point of the bars.
dx, dy, dz : scalar or array-like
The width, depth, and height of the bars, respectively.
color : sequence of valid color specifications, optional
The color of the bars can be specified globally or
individually. This parameter can be:
- A single color value, to color all bars the same color.
- An array of colors of length N bars, to color each bar
independently.
- An array of colors of length 6, to color the faces of the
bars similarly.
- An array of colors of length 6 * N bars, to color each face
independently.
When coloring the faces of the boxes specifically, this is
the order of the coloring:
1. -Z (bottom of box)
2. +Z (top of box)
3. -Y
4. +Y
5. -X
6. +X
zsort : str, optional
The z-axis sorting scheme passed onto
:func:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
shade : bool, optional (default = True)
When true, this shades the dark sides of the bars (relative
to the plot's source of light).
Any additional keyword arguments are passed onto
:func:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
Returns
-------
collection : Poly3DCollection
A collection of three dimensional polygons representing
the bars.
"""
had_data = self.has_data()
x, y, z, dx, dy, dz = np.broadcast_arrays(
np.atleast_1d(x), y, z, dx, dy, dz)
minx = np.min(x)
maxx = np.max(x + dx)
miny = np.min(y)
maxy = np.max(y + dy)
minz = np.min(z)
maxz = np.max(z + dz)
polys = []
for xi, yi, zi, dxi, dyi, dzi in zip(x, y, z, dx, dy, dz):
polys.extend([
((xi, yi, zi), (xi + dxi, yi, zi),
(xi + dxi, yi + dyi, zi), (xi, yi + dyi, zi)),
((xi, yi, zi + dzi), (xi + dxi, yi, zi + dzi),
(xi + dxi, yi + dyi, zi + dzi), (xi, yi + dyi, zi + dzi)),
((xi, yi, zi), (xi + dxi, yi, zi),
(xi + dxi, yi, zi + dzi), (xi, yi, zi + dzi)),
((xi, yi + dyi, zi), (xi + dxi, yi + dyi, zi),
(xi + dxi, yi + dyi, zi + dzi), (xi, yi + dyi, zi + dzi)),
((xi, yi, zi), (xi, yi + dyi, zi),
(xi, yi + dyi, zi + dzi), (xi, yi, zi + dzi)),
((xi + dxi, yi, zi), (xi + dxi, yi + dyi, zi),
(xi + dxi, yi + dyi, zi + dzi), (xi + dxi, yi, zi + dzi)),
])
facecolors = []
if color is None:
color = [self._get_patches_for_fill.get_next_color()]
if len(color) == len(x):
# bar colors specified, need to expand to number of faces
for c in color:
facecolors.extend([c] * 6)
else:
# a single color specified, or face colors specified explicitly
facecolors = list(mcolors.to_rgba_array(color))
if len(facecolors) < len(x):
facecolors *= (6 * len(x))
if shade:
normals = self._generate_normals(polys)
sfacecolors = self._shade_colors(facecolors, normals)
else:
sfacecolors = facecolors
col = art3d.Poly3DCollection(polys,
zsort=zsort,
facecolor=sfacecolors,
*args, **kwargs)
self.add_collection(col)
self.auto_scale_xyz((minx, maxx), (miny, maxy), (minz, maxz), had_data)
return col
def set_title(self, label, fontdict=None, loc='center', **kwargs):
ret = super().set_title(label, fontdict=fontdict, loc=loc, **kwargs)
(x, y) = self.title.get_position()
self.title.set_y(0.92 * y)
return ret
set_title.__doc__ = maxes.Axes.set_title.__doc__
def quiver(self, *args,
length=1, arrow_length_ratio=.3, pivot='tail', normalize=False,
**kwargs):
"""
Plot a 3D field of arrows.
call signatures::
quiver(X, Y, Z, U, V, W, **kwargs)
Arguments:
*X*, *Y*, *Z*:
The x, y and z coordinates of the arrow locations (default is
tail of arrow; see *pivot* kwarg)
*U*, *V*, *W*:
The x, y and z components of the arrow vectors
The arguments could be array-like or scalars, so long as they
they can be broadcast together. The arguments can also be
masked arrays. If an element in any of argument is masked, then
that corresponding quiver element will not be plotted.
Keyword arguments:
*length*: [1.0 | float]
The length of each quiver, default to 1.0, the unit is
the same with the axes
*arrow_length_ratio*: [0.3 | float]
The ratio of the arrow head with respect to the quiver,
default to 0.3
*pivot*: [ 'tail' | 'middle' | 'tip' ]
The part of the arrow that is at the grid point; the arrow
rotates about this point, hence the name *pivot*.
Default is 'tail'
*normalize*: bool
When True, all of the arrows will be the same length. This
defaults to False, where the arrows will be different lengths
depending on the values of u,v,w.
Any additional keyword arguments are delegated to
:class:`~matplotlib.collections.LineCollection`
"""
def calc_arrow(uvw, angle=15):
"""
To calculate the arrow head. uvw should be a unit vector.
We normalize it here:
"""
# get unit direction vector perpendicular to (u,v,w)
norm = np.linalg.norm(uvw[:2])
if norm > 0:
x = uvw[1] / norm
y = -uvw[0] / norm
else:
x, y = 0, 1
# compute the two arrowhead direction unit vectors
ra = math.radians(angle)
c = math.cos(ra)
s = math.sin(ra)
# construct the rotation matrices
Rpos = np.array([[c+(x**2)*(1-c), x*y*(1-c), y*s],
[y*x*(1-c), c+(y**2)*(1-c), -x*s],
[-y*s, x*s, c]])
# opposite rotation negates all the sin terms
Rneg = Rpos.copy()
Rneg[[0,1,2,2],[2,2,0,1]] = -Rneg[[0,1,2,2],[2,2,0,1]]
# multiply them to get the rotated vector
return Rpos.dot(uvw), Rneg.dot(uvw)
had_data = self.has_data()
# handle args
argi = 6
if len(args) < argi:
raise ValueError('Wrong number of arguments. Expected %d got %d' %
(argi, len(args)))
# first 6 arguments are X, Y, Z, U, V, W
input_args = args[:argi]
# if any of the args are scalar, convert into list
input_args = [[k] if isinstance(k, (int, float)) else k
for k in input_args]
# extract the masks, if any
masks = [k.mask for k in input_args if isinstance(k, np.ma.MaskedArray)]
# broadcast to match the shape
bcast = np.broadcast_arrays(*(input_args + masks))
input_args = bcast[:argi]
masks = bcast[argi:]
if masks:
# combine the masks into one
mask = reduce(np.logical_or, masks)
# put mask on and compress
input_args = [np.ma.array(k, mask=mask).compressed()
for k in input_args]
else:
input_args = [k.flatten() for k in input_args]
if any(len(v) == 0 for v in input_args):
# No quivers, so just make an empty collection and return early
linec = art3d.Line3DCollection([], *args[argi:], **kwargs)
self.add_collection(linec)
return linec
# Following assertions must be true before proceeding
# must all be ndarray
assert all(isinstance(k, np.ndarray) for k in input_args)
# must all in same shape
assert len({k.shape for k in input_args}) == 1
shaft_dt = np.linspace(0, length, num=2)
arrow_dt = shaft_dt * arrow_length_ratio
if pivot == 'tail':
shaft_dt -= length
elif pivot == 'middle':
shaft_dt -= length/2.
elif pivot != 'tip':
raise ValueError('Invalid pivot argument: ' + str(pivot))
XYZ = np.column_stack(input_args[:3])
UVW = np.column_stack(input_args[3:argi]).astype(float)
# Normalize rows of UVW
norm = np.linalg.norm(UVW, axis=1)
# If any row of UVW is all zeros, don't make a quiver for it
mask = norm > 0
XYZ = XYZ[mask]
if normalize:
UVW = UVW[mask] / norm[mask].reshape((-1, 1))
else:
UVW = UVW[mask]
if len(XYZ) > 0:
# compute the shaft lines all at once with an outer product
shafts = (XYZ - np.multiply.outer(shaft_dt, UVW)).swapaxes(0, 1)
# compute head direction vectors, n heads by 2 sides by 3 dimensions
head_dirs = np.array([calc_arrow(d) for d in UVW])
# compute all head lines at once, starting from where the shaft ends
heads = shafts[:, :1] - np.multiply.outer(arrow_dt, head_dirs)
# stack left and right head lines together
heads.shape = (len(arrow_dt), -1, 3)
# transpose to get a list of lines
heads = heads.swapaxes(0, 1)
lines = [*shafts, *heads]
else:
lines = []
linec = art3d.Line3DCollection(lines, *args[argi:], **kwargs)
self.add_collection(linec)
self.auto_scale_xyz(XYZ[:, 0], XYZ[:, 1], XYZ[:, 2], had_data)
return linec
quiver3D = quiver
def voxels(self, *args, facecolors=None, edgecolors=None, **kwargs):
"""
ax.voxels([x, y, z,] /, filled, **kwargs)
Plot a set of filled voxels
All voxels are plotted as 1x1x1 cubes on the axis, with filled[0,0,0]
placed with its lower corner at the origin. Occluded faces are not
plotted.
Call signatures::
voxels(filled, facecolors=fc, edgecolors=ec, **kwargs)
voxels(x, y, z, filled, facecolors=fc, edgecolors=ec, **kwargs)
.. versionadded:: 2.1
Parameters
----------
filled : 3D np.array of bool
A 3d array of values, with truthy values indicating which voxels
to fill
x, y, z : 3D np.array, optional
The coordinates of the corners of the voxels. This should broadcast
to a shape one larger in every dimension than the shape of `filled`.
These can be used to plot non-cubic voxels.
If not specified, defaults to increasing integers along each axis,
like those returned by :func:`~numpy.indices`.
As indicated by the ``/`` in the function signature, these arguments
can only be passed positionally.
facecolors, edgecolors : array_like, optional
The color to draw the faces and edges of the voxels. Can only be
passed as keyword arguments.
This parameter can be:
- A single color value, to color all voxels the same color. This
can be either a string, or a 1D rgb/rgba array
- ``None``, the default, to use a single color for the faces, and
the style default for the edges.
- A 3D ndarray of color names, with each item the color for the
corresponding voxel. The size must match the voxels.
- A 4D ndarray of rgb/rgba data, with the components along the
last axis.
**kwargs
Additional keyword arguments to pass onto
:func:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
Returns
-------
faces : dict
A dictionary indexed by coordinate, where ``faces[i,j,k]`` is a
`Poly3DCollection` of the faces drawn for the voxel
``filled[i,j,k]``. If no faces were drawn for a given voxel, either
because it was not asked to be drawn, or it is fully occluded, then
``(i,j,k) not in faces``.
Examples
--------
.. plot:: gallery/mplot3d/voxels.py
.. plot:: gallery/mplot3d/voxels_rgb.py
.. plot:: gallery/mplot3d/voxels_torus.py
.. plot:: gallery/mplot3d/voxels_numpy_logo.py
"""
# work out which signature we should be using, and use it to parse
# the arguments. Name must be voxels for the correct error message
if len(args) >= 3:
# underscores indicate position only
def voxels(__x, __y, __z, filled, **kwargs):
return (__x, __y, __z), filled, kwargs
else:
def voxels(filled, **kwargs):
return None, filled, kwargs
xyz, filled, kwargs = voxels(*args, **kwargs)
# check dimensions
if filled.ndim != 3:
raise ValueError("Argument filled must be 3-dimensional")
size = np.array(filled.shape, dtype=np.intp)
# check xyz coordinates, which are one larger than the filled shape
coord_shape = tuple(size + 1)
if xyz is None:
x, y, z = np.indices(coord_shape)
else:
x, y, z = (np.broadcast_to(c, coord_shape) for c in xyz)
def _broadcast_color_arg(color, name):
if np.ndim(color) in (0, 1):
# single color, like "red" or [1, 0, 0]
return np.broadcast_to(color, filled.shape + np.shape(color))
elif np.ndim(color) in (3, 4):
# 3D array of strings, or 4D array with last axis rgb
if np.shape(color)[:3] != filled.shape:
raise ValueError(
"When multidimensional, {} must match the shape of "
"filled".format(name))
return color
else:
raise ValueError("Invalid {} argument".format(name))
# broadcast and default on facecolors
if facecolors is None:
facecolors = self._get_patches_for_fill.get_next_color()
facecolors = _broadcast_color_arg(facecolors, 'facecolors')
# broadcast but no default on edgecolors
edgecolors = _broadcast_color_arg(edgecolors, 'edgecolors')
# always scale to the full array, even if the data is only in the center
self.auto_scale_xyz(x, y, z)
# points lying on corners of a square
square = np.array([
[0, 0, 0],
[0, 1, 0],
[1, 1, 0],
[1, 0, 0]
], dtype=np.intp)
voxel_faces = defaultdict(list)
def permutation_matrices(n):
""" Generator of cyclic permutation matices """
mat = np.eye(n, dtype=np.intp)
for i in range(n):
yield mat
mat = np.roll(mat, 1, axis=0)
# iterate over each of the YZ, ZX, and XY orientations, finding faces to
# render
for permute in permutation_matrices(3):
# find the set of ranges to iterate over
pc, qc, rc = permute.T.dot(size)
pinds = np.arange(pc)
qinds = np.arange(qc)
rinds = np.arange(rc)
square_rot = square.dot(permute.T)
# iterate within the current plane
for p in pinds:
for q in qinds:
# iterate perpendicularly to the current plane, handling
# boundaries. We only draw faces between a voxel and an
# empty space, to avoid drawing internal faces.
# draw lower faces
p0 = permute.dot([p, q, 0])
i0 = tuple(p0)
if filled[i0]:
voxel_faces[i0].append(p0 + square_rot)
# draw middle faces
for r1, r2 in zip(rinds[:-1], rinds[1:]):
p1 = permute.dot([p, q, r1])
p2 = permute.dot([p, q, r2])
i1 = tuple(p1)
i2 = tuple(p2)
if filled[i1] and not filled[i2]:
voxel_faces[i1].append(p2 + square_rot)
elif not filled[i1] and filled[i2]:
voxel_faces[i2].append(p2 + square_rot)
# draw upper faces
pk = permute.dot([p, q, rc-1])
pk2 = permute.dot([p, q, rc])
ik = tuple(pk)
if filled[ik]:
voxel_faces[ik].append(pk2 + square_rot)
# iterate over the faces, and generate a Poly3DCollection for each voxel
polygons = {}
for coord, faces_inds in voxel_faces.items():
# convert indices into 3D positions
if xyz is None:
faces = faces_inds
else:
faces = []
for face_inds in faces_inds:
ind = face_inds[:, 0], face_inds[:, 1], face_inds[:, 2]
face = np.empty(face_inds.shape)
face[:, 0] = x[ind]
face[:, 1] = y[ind]
face[:, 2] = z[ind]
faces.append(face)
poly = art3d.Poly3DCollection(faces,
facecolors=facecolors[coord],
edgecolors=edgecolors[coord],
**kwargs
)
self.add_collection3d(poly)
polygons[coord] = poly
return polygons
def get_test_data(delta=0.05):
'''
Return a tuple X, Y, Z with a test data set.
'''
x = y = np.arange(-3.0, 3.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = np.exp(-(X**2 + Y**2) / 2) / (2 * np.pi)
Z2 = (np.exp(-(((X - 1) / 1.5)**2 + ((Y - 1) / 0.5)**2) / 2) /
(2 * np.pi * 0.5 * 1.5))
Z = Z2 - Z1
X = X * 10
Y = Y * 10
Z = Z * 500
return X, Y, Z
########################################################
# Register Axes3D as a 'projection' object available
# for use just like any other axes
########################################################
import matplotlib.projections as proj
proj.projection_registry.register(Axes3D)
| [
"[email protected]"
]
| |
c261a3aa2393582101930b0d509c572623981a2b | 29eacf3b29753d65d8ec0ab4a60ea1f7ddecbd68 | /lightly/openapi_generated/swagger_client/models/docker_run_scheduled_priority.py | 8f59946a24631b8670f78eced6e272cd1b4e2588 | [
"MIT"
]
| permissive | lightly-ai/lightly | 5b655fe283b7cc2ddf1d7f5bd098603fc1cce627 | 5650ee8d4057139acf8aa10c884d5d5cdc2ccb17 | refs/heads/master | 2023-08-17T11:08:00.135920 | 2023-08-16T12:43:02 | 2023-08-16T12:43:02 | 303,705,119 | 2,473 | 229 | MIT | 2023-09-14T14:47:16 | 2020-10-13T13:02:56 | Python | UTF-8 | Python | false | false | 1,014 | py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import json
import pprint
import re # noqa: F401
from enum import Enum
from aenum import no_arg # type: ignore
class DockerRunScheduledPriority(str, Enum):
"""
DockerRunScheduledPriority
"""
"""
allowed enum values
"""
LOW = 'LOW'
MID = 'MID'
HIGH = 'HIGH'
CRITICAL = 'CRITICAL'
@classmethod
def from_json(cls, json_str: str) -> 'DockerRunScheduledPriority':
"""Create an instance of DockerRunScheduledPriority from a JSON string"""
return DockerRunScheduledPriority(json.loads(json_str))
| [
"[email protected]"
]
| |
2613f41ca4dc3a52d8a9eba8b22d5db1b4f73c1e | 04d9ee05feb6dddf19b9f7653f4dd9e9ce3ee95c | /rbtools/commands/install.py | 03724c98f41a1da2e2262344e0806a96951d6e81 | [
"MIT"
]
| permissive | pbwkoswara/rbtools | 2fa44ade1c60b4f076198bb8206a5d624dd40cd2 | 8ea5ff8843d2a3d44056ad4358d75c81a066cf28 | refs/heads/master | 2021-07-17T22:22:20.906220 | 2017-10-20T22:11:03 | 2017-10-25T17:05:21 | 108,022,324 | 0 | 0 | null | 2017-10-23T18:26:30 | 2017-10-23T18:26:30 | null | UTF-8 | Python | false | false | 7,398 | py | from __future__ import division, print_function, unicode_literals
import hashlib
import logging
import os
import shutil
import tempfile
import zipfile
import tqdm
from six.moves.urllib.error import HTTPError, URLError
from six.moves.urllib.request import urlopen
from rbtools.commands import Command, CommandError
from rbtools.utils.appdirs import user_data_dir
from rbtools.utils.checks import check_install
from rbtools.utils.process import execute
class Install(Command):
"""Install a dependency.
This allows RBTools to install external dependencies that may be needed for
some features.
"""
name = 'install'
author = 'The Review Board Project'
description = 'Install an optional dependency.'
args = '<package>'
option_list = []
package_urls = {
'tfs': 'http://downloads.beanbaginc.com/rb-tfs/rb-tfs.zip'
}
def main(self, package):
"""Run the command.
Args:
package (unicode):
The name of the package to install.
Raises:
rbtools.commands.CommandError:
An error occurred during installation.
"""
try:
url = self.package_urls[package]
except KeyError:
err = 'Package "%s" not found. Available packages are:\n' % package
err += '\n'.join(
' %s' % package_name
for package_name in self.package_urls.keys()
)
raise CommandError(err)
label = 'Downloading %s' % package
zip_filename = self.download_file(url, label=label)
try:
self.check_download(url, zip_filename)
self.unzip(
zip_filename,
os.path.join(user_data_dir('rbtools'), 'packages', package))
finally:
os.unlink(zip_filename)
def check_download(self, url, zip_filename):
"""Check to see if the file was successfully downloaded.
If the user has :command:`gpg` installed on their system, use that to
check that the package was signed. Otherwise, check the sha256sum.
Args:
url (unicode):
The URL that the file came from.
zip_filename (unicode):
The filename of the downloaded copy.
Raises:
rbtools.commands.CommandError:
The authenticity of the file could not be verified.
"""
if check_install('gpg'):
execute(['gpg', '--recv-keys', '4ED1F993'])
sig_filename = self.download_file('%s.asc' % url)
try:
retcode, output, errors = execute(
['gpg', '--verify', sig_filename, zip_filename],
with_errors=False, ignore_errors=True,
return_error_code=True, return_errors=True)
if retcode == 0:
logging.debug('Verified file signature')
else:
raise CommandError(
'Unable to verify authenticity of file downloaded '
'from %s:\n%s' % (url, errors))
finally:
os.unlink(sig_filename)
else:
logging.info('"gpg" not installed. Skipping signature validation.')
try:
sha_url = '%s.sha256sum' % url
logging.debug('Downloading %s', sha_url)
response = urlopen(sha_url)
real_sha = response.read().split(' ')[0]
except (HTTPError, URLError) as e:
raise CommandError('Error when downloading file: %s' % e)
with open(zip_filename, 'r') as f:
our_sha = hashlib.sha256(f.read()).hexdigest()
if real_sha == our_sha:
logging.debug('Verified SHA256 hash')
else:
logging.debug('SHA256 hash does not match!')
logging.debug(' Downloaded file hash was: %s', our_sha)
logging.debug(' Expected hash was: %s', real_sha)
raise CommandError(
'Unable to verify the checksum of the downloaded copy of '
'%s.\n'
'This could be due to an invasive proxy or an attempted '
'man-in-the-middle attack.' % url)
def unzip(self, zip_filename, package_dir):
"""Unzip a .zip file.
This method will unpack the contents of a .zip file into a target
directory. If that directory already exists, it will first be removed.
Args:
zip_filename (unicode):
The absolute path to the .zip file to unpack.
package_dir (unicode):
The directory to unzip the files into.
Raises:
rbtools.commands.CommandError:
The file could not be unzipped.
"""
logging.debug('Extracting %s to %s', zip_filename, package_dir)
try:
if os.path.exists(package_dir):
if os.path.isdir(package_dir):
shutil.rmtree(package_dir)
else:
os.remove(package_dir)
os.makedirs(package_dir)
except (IOError, OSError) as e:
raise CommandError('Failed to set up package directory %s: %s'
% (package_dir, e))
zip_file = zipfile.ZipFile(zip_filename, 'r')
try:
zip_file.extractall(package_dir)
except Exception as e:
raise CommandError('Failed to extract file: %s' % e)
finally:
zip_file.close()
def download_file(self, url, label=None):
"""Download the given file.
This is intended to be used as a context manager, and the bound value
will be the filename of the downloaded file.
Args:
url (unicode):
The URL of the file to download.
label (unicode, optional):
The label to use for the progress bar. If this is not
specified, no progress bar will be shown.
Yields:
unicode:
The filename of the downloaded file.
Raises:
rbtools.commands.CommandError:
An error occurred while downloading the file.
"""
logging.debug('Downloading %s', url)
try:
response = urlopen(url)
total_bytes = int(
response.info().getheader('Content-Length').strip())
read_bytes = 0
bar_format = '{desc} {bar} {percentage:3.0f}% [{remaining}]'
with tqdm.tqdm(total=total_bytes, desc=label or '',
ncols=80, disable=label is None,
bar_format=bar_format) as bar:
try:
f = tempfile.NamedTemporaryFile(delete=False)
while read_bytes != total_bytes:
chunk = response.read(8192)
chunk_length = len(chunk)
read_bytes += chunk_length
f.write(chunk)
bar.update(chunk_length)
finally:
f.close()
return f.name
except (HTTPError, URLError) as e:
raise CommandError('Error when downloading file: %s' % e)
| [
"[email protected]"
]
| |
20b361ed82e3c4f5ca631042f72ead83915be1a7 | 46ae8264edb9098c9875d2a0a508bc071201ec8b | /res/scripts/clientclientarena.py | 694ad470a4f1b89adc4300277077ab1452ac612e | []
| no_license | Difrex/wotsdk | 1fc6156e07e3a5302e6f78eafdea9bec4c897cfb | 510a34c67b8f4c02168a9830d23f5b00068d155b | refs/heads/master | 2021-01-01T19:12:03.592888 | 2016-10-08T12:06:04 | 2016-10-08T12:06:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,496 | py | # Embedded file name: scripts/client/ClientArena.py
import Math
import BigWorld
import ResMgr
import ArenaType
from items import vehicles
import constants
import cPickle
import zlib
import Event
from constants import ARENA_PERIOD, ARENA_UPDATE, FLAG_STATE
from PlayerEvents import g_playerEvents
from debug_utils import *
from CTFManager import g_ctfManager
from helpers.EffectsList import FalloutDestroyEffect
import arena_components.client_arena_component_assembler as assembler
class ClientArena(object):
__onUpdate = {ARENA_UPDATE.VEHICLE_LIST: '_ClientArena__onVehicleListUpdate',
ARENA_UPDATE.VEHICLE_ADDED: '_ClientArena__onVehicleAddedUpdate',
ARENA_UPDATE.PERIOD: '_ClientArena__onPeriodInfoUpdate',
ARENA_UPDATE.STATISTICS: '_ClientArena__onStatisticsUpdate',
ARENA_UPDATE.VEHICLE_STATISTICS: '_ClientArena__onVehicleStatisticsUpdate',
ARENA_UPDATE.VEHICLE_KILLED: '_ClientArena__onVehicleKilled',
ARENA_UPDATE.AVATAR_READY: '_ClientArena__onAvatarReady',
ARENA_UPDATE.BASE_POINTS: '_ClientArena__onBasePointsUpdate',
ARENA_UPDATE.BASE_CAPTURED: '_ClientArena__onBaseCaptured',
ARENA_UPDATE.TEAM_KILLER: '_ClientArena__onTeamKiller',
ARENA_UPDATE.VEHICLE_UPDATED: '_ClientArena__onVehicleUpdatedUpdate',
ARENA_UPDATE.COMBAT_EQUIPMENT_USED: '_ClientArena__onCombatEquipmentUsed',
ARENA_UPDATE.RESPAWN_AVAILABLE_VEHICLES: '_ClientArena__onRespawnAvailableVehicles',
ARENA_UPDATE.RESPAWN_COOLDOWNS: '_ClientArena__onRespawnCooldowns',
ARENA_UPDATE.RESPAWN_RANDOM_VEHICLE: '_ClientArena__onRespawnRandomVehicle',
ARENA_UPDATE.RESPAWN_RESURRECTED: '_ClientArena__onRespawnResurrected',
ARENA_UPDATE.FLAG_TEAMS: '_ClientArena__onFlagTeamsReceived',
ARENA_UPDATE.FLAG_STATE_CHANGED: '_ClientArena__onFlagStateChanged',
ARENA_UPDATE.INTERACTIVE_STATS: '_ClientArena__onInteractiveStats',
ARENA_UPDATE.DISAPPEAR_BEFORE_RESPAWN: '_ClientArena__onDisappearVehicleBeforeRespawn',
ARENA_UPDATE.RESOURCE_POINT_STATE_CHANGED: '_ClientArena__onResourcePointStateChanged',
ARENA_UPDATE.OWN_VEHICLE_INSIDE_RP: '_ClientArena__onOwnVehicleInsideRP',
ARENA_UPDATE.OWN_VEHICLE_LOCKED_FOR_RP: '_ClientArena__onOwnVehicleLockedForRP'}
def __init__(self, arenaUniqueID, arenaTypeID, arenaBonusType, arenaGuiType, arenaExtraData, weatherPresetID):
self.__vehicles = {}
self.__vehicleIndexToId = {}
self.__positions = {}
self.__statistics = {}
self.__periodInfo = (ARENA_PERIOD.WAITING,
0,
0,
None)
self.__eventManager = Event.EventManager()
em = self.__eventManager
self.onNewVehicleListReceived = Event.Event(em)
self.onVehicleAdded = Event.Event(em)
self.onVehicleUpdated = Event.Event(em)
self.onPositionsUpdated = Event.Event(em)
self.onPeriodChange = Event.Event(em)
self.onNewStatisticsReceived = Event.Event(em)
self.onVehicleStatisticsUpdate = Event.Event(em)
self.onVehicleKilled = Event.Event(em)
self.onAvatarReady = Event.Event(em)
self.onTeamBasePointsUpdate = Event.Event(em)
self.onTeamBaseCaptured = Event.Event(em)
self.onTeamKiller = Event.Event(em)
self.onCombatEquipmentUsed = Event.Event(em)
self.onRespawnAvailableVehicles = Event.Event(em)
self.onRespawnCooldowns = Event.Event(em)
self.onRespawnRandomVehicle = Event.Event(em)
self.onRespawnResurrected = Event.Event(em)
self.onInteractiveStats = Event.Event(em)
self.onVehicleWillRespawn = Event.Event(em)
self.arenaUniqueID = arenaUniqueID
self.arenaType = ArenaType.g_cache.get(arenaTypeID, None)
if self.arenaType is None:
LOG_ERROR('Arena ID not found ', arenaTypeID)
self.bonusType = arenaBonusType
self.guiType = arenaGuiType
self.extraData = arenaExtraData
self.__arenaBBCollider = None
self.__spaceBBCollider = None
self.componentSystem = assembler.createComponentSystem(self.bonusType)
return
vehicles = property(lambda self: self.__vehicles)
positions = property(lambda self: self.__positions)
statistics = property(lambda self: self.__statistics)
period = property(lambda self: self.__periodInfo[0])
periodEndTime = property(lambda self: self.__periodInfo[1])
periodLength = property(lambda self: self.__periodInfo[2])
periodAdditionalInfo = property(lambda self: self.__periodInfo[3])
def destroy(self):
self.__eventManager.clear()
assembler.destroyComponentSystem(self.componentSystem)
def update(self, updateType, argStr):
delegateName = self.__onUpdate.get(updateType, None)
if delegateName is not None:
getattr(self, delegateName)(argStr)
self.componentSystem.update(updateType, argStr)
return
def updatePositions(self, indices, positions):
self.__positions.clear()
lenPos = indices and len(positions)
lenInd = len(indices)
if not lenPos == 2 * lenInd:
raise AssertionError
indexToId = self.__vehicleIndexToId
for i in xrange(0, lenInd):
if indices[i] in indexToId:
positionTuple = (positions[2 * i], 0, positions[2 * i + 1])
self.__positions[indexToId[indices[i]]] = positionTuple
self.onPositionsUpdated()
def collideWithArenaBB(self, start, end):
if self.__arenaBBCollider is None:
if not self.__setupBBColliders():
return
return self.__arenaBBCollider.collide(start, end)
def collideWithSpaceBB(self, start, end):
if self.__spaceBBCollider is None:
if not self.__setupBBColliders():
return
return self.__spaceBBCollider.collide(start, end)
def __setupBBColliders(self):
if BigWorld.wg_getSpaceBounds().length == 0.0:
return False
arenaBB = self.arenaType.boundingBox
spaceBB = _convertToList(BigWorld.wg_getSpaceBounds())
self.__arenaBBCollider = _BBCollider(arenaBB, (-500.0, 500.0))
self.__spaceBBCollider = _BBCollider(spaceBB, (-500.0, 500.0))
return True
def __onVehicleListUpdate(self, argStr):
list = cPickle.loads(zlib.decompress(argStr))
vehicles = self.__vehicles
vehicles.clear()
for infoAsTuple in list:
id, info = self.__vehicleInfoAsDict(infoAsTuple)
vehicles[id] = info
self.__rebuildIndexToId()
self.onNewVehicleListReceived()
def __onVehicleAddedUpdate(self, argStr):
infoAsTuple = cPickle.loads(zlib.decompress(argStr))
id, info = self.__vehicleInfoAsDict(infoAsTuple)
self.__vehicles[id] = info
self.__rebuildIndexToId()
self.onVehicleAdded(id)
def __onVehicleUpdatedUpdate(self, argStr):
infoAsTuple = cPickle.loads(zlib.decompress(argStr))
id, info = self.__vehicleInfoAsDict(infoAsTuple)
self.__vehicles[id] = info
self.onVehicleUpdated(id)
def __onPeriodInfoUpdate(self, argStr):
self.__periodInfo = cPickle.loads(zlib.decompress(argStr))
self.onPeriodChange(*self.__periodInfo)
g_playerEvents.onArenaPeriodChange(*self.__periodInfo)
def __onStatisticsUpdate(self, argStr):
self.__statistics = {}
statList = cPickle.loads(zlib.decompress(argStr))
for s in statList:
vehicleID, stats = self.__vehicleStatisticsAsDict(s)
self.__statistics[vehicleID] = stats
self.onNewStatisticsReceived()
def __onVehicleStatisticsUpdate(self, argStr):
vehicleID, stats = self.__vehicleStatisticsAsDict(cPickle.loads(zlib.decompress(argStr)))
self.__statistics[vehicleID] = stats
self.onVehicleStatisticsUpdate(vehicleID)
def __onVehicleKilled(self, argStr):
victimID, killerID, equipmentID, reason = cPickle.loads(argStr)
vehInfo = self.__vehicles.get(victimID, None)
if vehInfo is not None:
vehInfo['isAlive'] = False
self.onVehicleKilled(victimID, killerID, equipmentID, reason)
return
def __onAvatarReady(self, argStr):
vehicleID = cPickle.loads(argStr)
vehInfo = self.__vehicles.get(vehicleID, None)
if vehInfo is not None:
vehInfo['isAvatarReady'] = True
self.onAvatarReady(vehicleID)
return
def __onBasePointsUpdate(self, argStr):
team, baseID, points, timeLeft, invadersCnt, capturingStopped = cPickle.loads(argStr)
self.onTeamBasePointsUpdate(team, baseID, points, timeLeft, invadersCnt, capturingStopped)
def __onBaseCaptured(self, argStr):
team, baseID = cPickle.loads(argStr)
self.onTeamBaseCaptured(team, baseID)
def __onTeamKiller(self, argStr):
vehicleID = cPickle.loads(argStr)
vehInfo = self.__vehicles.get(vehicleID, None)
if vehInfo is not None:
vehInfo['isTeamKiller'] = True
self.onTeamKiller(vehicleID)
return
def __onCombatEquipmentUsed(self, argStr):
shooterID, equipmentID = cPickle.loads(argStr)
self.onCombatEquipmentUsed(shooterID, equipmentID)
def __onRespawnAvailableVehicles(self, argStr):
vehsList = cPickle.loads(zlib.decompress(argStr))
self.onRespawnAvailableVehicles(vehsList)
LOG_DEBUG_DEV('[RESPAWN] onRespawnAvailableVehicles', vehsList)
def __onRespawnCooldowns(self, argStr):
cooldowns = cPickle.loads(zlib.decompress(argStr))
self.onRespawnCooldowns(cooldowns)
def __onRespawnRandomVehicle(self, argStr):
respawnInfo = cPickle.loads(zlib.decompress(argStr))
self.onRespawnRandomVehicle(respawnInfo)
def __onRespawnResurrected(self, argStr):
respawnInfo = cPickle.loads(zlib.decompress(argStr))
self.onRespawnResurrected(respawnInfo)
def __onDisappearVehicleBeforeRespawn(self, argStr):
vehID = cPickle.loads(argStr)
FalloutDestroyEffect.play(vehID)
self.onVehicleWillRespawn(vehID)
def __onFlagTeamsReceived(self, argStr):
data = cPickle.loads(argStr)
LOG_DEBUG('[FLAGS] flag teams', data)
g_ctfManager.onFlagTeamsReceived(data)
def __onFlagStateChanged(self, argStr):
data = cPickle.loads(argStr)
LOG_DEBUG('[FLAGS] flag state changed', data)
g_ctfManager.onFlagStateChanged(data)
def __onResourcePointStateChanged(self, argStr):
data = cPickle.loads(argStr)
LOG_DEBUG('[RESOURCE POINTS] state changed', data)
g_ctfManager.onResourcePointStateChanged(data)
def __onOwnVehicleInsideRP(self, argStr):
pointInfo = cPickle.loads(argStr)
LOG_DEBUG('[RESOURCE POINTS] own vehicle inside point', pointInfo)
g_ctfManager.onOwnVehicleInsideRP(pointInfo)
def __onOwnVehicleLockedForRP(self, argStr):
unlockTime = cPickle.loads(argStr)
LOG_DEBUG('[RESOURCE POINTS] own vehicle is locked', unlockTime)
g_ctfManager.onOwnVehicleLockedForRP(unlockTime)
def __onInteractiveStats(self, argStr):
stats = cPickle.loads(zlib.decompress(argStr))
self.onInteractiveStats(stats)
LOG_DEBUG_DEV('[RESPAWN] onInteractiveStats', stats)
def __rebuildIndexToId(self):
vehicles = self.__vehicles
self.__vehicleIndexToId = dict(zip(range(len(vehicles)), sorted(vehicles.keys())))
def __vehicleInfoAsDict(self, info):
getVehicleType = lambda cd: (None if cd is None else vehicles.VehicleDescr(compactDescr=cd))
infoAsDict = {'vehicleType': getVehicleType(info[1]),
'name': info[2],
'team': info[3],
'isAlive': info[4],
'isAvatarReady': info[5],
'isTeamKiller': info[6],
'accountDBID': info[7],
'clanAbbrev': info[8],
'clanDBID': info[9],
'prebattleID': info[10],
'isPrebattleCreator': bool(info[11]),
'forbidInBattleInvitations': bool(info[12]),
'events': info[13],
'igrType': info[14],
'potapovQuestIDs': info[15]}
return (info[0], infoAsDict)
def __vehicleStatisticsAsDict(self, stats):
return (stats[0], {'frags': stats[1]})
def _convertToList(vec4):
return ((vec4.x, vec4.y), (vec4.z, vec4.w))
def _pointInBB(bottomLeft2D, upperRight2D, point3D, minMaxHeight):
return bottomLeft2D[0] < point3D[0] < upperRight2D[0] and bottomLeft2D[1] < point3D[2] < upperRight2D[1] and minMaxHeight[0] < point3D[1] < minMaxHeight[1]
class _BBCollider():
def __init__(self, bb, heightLimits):
self.__planes = list()
self.__bb = bb
self.__heightLimits = heightLimits
self.__planes.append(Plane(Math.Vector3(0.0, 0.0, 1.0), bb[0][1]))
self.__planes.append(Plane(Math.Vector3(0.0, 0.0, -1.0), -bb[1][1]))
self.__planes.append(Plane(Math.Vector3(1.0, 0.0, 0.0), bb[0][0]))
self.__planes.append(Plane(Math.Vector3(-1.0, 0.0, 0.0), -bb[1][0]))
self.__planes.append(Plane(Math.Vector3(0.0, 1.0, 0.0), heightLimits[0]))
self.__planes.append(Plane(Math.Vector3(0.0, -1.0, 0.0), -heightLimits[1]))
def collide(self, start, end):
if not _pointInBB(self.__bb[0], self.__bb[1], end, self.__heightLimits):
finalPoint = None
dist = 0
for plane in self.__planes:
intersecPoint = plane.intersectSegment(start, end)
if intersecPoint:
tmpDist = (intersecPoint - start).length
if tmpDist < dist or dist == 0:
dist = tmpDist
finalPoint = intersecPoint
if finalPoint is not None:
return finalPoint
else:
return start
return
class Plane():
def __init__(self, n, d):
self.n = n
self.d = d
def intersectSegment(self, a, b):
ab = b - a
normalDotDir = self.n.dot(ab)
if normalDotDir == 0:
return None
else:
t = (self.d - self.n.dot(a)) / normalDotDir
if t >= 0.0 and t <= 1.0:
return a + ab.scale(t)
return None
def testPoint(self, point):
if self.n.dot(point) - self.d >= 0.0:
return True
return False | [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.