blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
29818ba52762eb59c2db173b8688f4636cf37b75 | c4591b23aebde4a1ec262a6b3f5cc124fea0d638 | /ExceptionHandling/02-code.py | a072cdaae3db241510b0ae750e404b9e9a643154 | []
| no_license | ravi4all/PythonApril_21 | b41f2c845c4003d4291d46d52294767741d4f0d8 | 52a1f538182a7ce78b2c90db3f745d37ea321897 | refs/heads/main | 2023-05-04T01:48:11.414424 | 2021-05-24T12:27:30 | 2021-05-24T12:27:30 | 356,850,524 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | try:
file = open('file_1.txt','w')
file.write("Hello")
data = file.read()
print(data)
# file.close()
except BaseException as ex:
print(ex)
finally:
print("Finally will always execute")
file.close() | [
"[email protected]"
]
| |
547407b8be9ec042211d5cdf47758cab6df9f6e4 | 2a3743ced45bd79826dcdc55f304da049f627f1b | /venv/lib/python3.7/site-packages/notebook/log.py | 3621a70caef590f7d25e7f206b7a6f9826090430 | [
"MIT"
]
| permissive | Dimasik007/Deribit_funding_rate_indicator | 12cc8cd7c0be564d6e34d9eae91940c62492ae2a | 3251602ae5249069489834f9afb57b11ff37750e | refs/heads/master | 2023-05-26T10:14:20.395939 | 2019-08-03T11:35:51 | 2019-08-03T11:35:51 | 198,705,946 | 5 | 3 | MIT | 2023-05-22T22:29:24 | 2019-07-24T20:32:19 | Python | UTF-8 | Python | false | false | 1,788 | py | #-----------------------------------------------------------------------------
# Copyright (c) Jupyter Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import json
from tornado.log import access_log
from .prometheus.log_functions import prometheus_log_method
def log_request(handler):
"""log a bit more information about each request than tornado's default
- move static file get success to debug-level (reduces noise)
- get proxied IP instead of proxy IP
- log referer for redirect and failed requests
- log user-agent for failed requests
"""
status = handler.get_status()
request = handler.request
if status < 300 or status == 304:
# Successes (or 304 FOUND) are debug-level
log_method = access_log.debug
elif status < 400:
log_method = access_log.info
elif status < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * handler.request.request_time()
ns = dict(
status=status,
method=request.method,
ip=request.remote_ip,
uri=request.uri,
request_time=request_time,
)
msg = "{status} {method} {uri} ({ip}) {request_time:.2f}ms"
if status >= 400:
# log bad referers
ns['referer'] = request.headers.get('Referer', 'None')
msg = msg + ' referer={referer}'
if status >= 500 and status != 502:
# log all headers if it caused an error
log_method(json.dumps(dict(request.headers), indent=2))
log_method(msg.format(**ns))
prometheus_log_method(handler)
| [
"[email protected]"
]
| |
323b337749b6214dcef22befb999fdc3ec1afa0c | 14804b282e567bf45c974b9a55cbdfa1907c5958 | /7_Modules/E_from_Import_module.py | f330334ed400c444afaf4beadb73d5e60f834495 | [
"MIT"
]
| permissive | Oscar-Oliveira/Python-3 | cfdcbcf4548144fb2488625f53f76b20e4d8c5b0 | fa791225a6810b75890d24407b73c5e1b514acbe | refs/heads/master | 2021-09-26T06:27:16.367956 | 2018-10-27T10:42:21 | 2018-10-27T10:42:21 | 101,991,657 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | """
Import module example
"""
from C_my_module import my_sum, __version__, __sprint__, some_value
print(my_sum(1.25, 3.2))
print(__version__)
print(__sprint__)
print(some_value)
| [
"[email protected]"
]
| |
a04ef2491409689efa7fb5b643c15eede0ce6500 | 2e60017779c5c286629ab5a3a7aeb27a6b19a60b | /python/problem_38.py | 7a9e34b9da335053313aef540af6db768d6ad191 | []
| no_license | jamesjiang52/10000-Lines-of-Code | f8c7cb4b8d5e441693f3e0f6919731ce4680f60d | 3b6c20b288bad1de5390ad672c73272d98e93ae0 | refs/heads/master | 2020-03-15T03:50:38.104917 | 2018-05-07T04:41:52 | 2018-05-07T04:41:52 | 131,952,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | if __name__ == '__main__':
import sys
sys.path.insert(0, 'C:\\Users\\James Jiang\\Documents\\Project Euler')
from functions import *
from progress import Progress
answers_list = ['dummy']
with open('C:\\Users\\James Jiang\\Documents\\Project Euler\\answers.txt') as answers:
for line in answers:
answers_list.append(int(line))
progress_ = Progress("Problem 038: Pandigital multiples", 0, 5000)
for i in range(10000, 5000, -1):
progress_.count = 10000 - i
progress_.progress()
if is_pandigital(str(i) + str(2*i)):
break
progress_.count = int(str(i) + str(2*i))
progress_.total = answers_list[38]
progress_.progress()
if __name__ == '__main__':
input()
| [
"[email protected]"
]
| |
ef1e26ac2e2027166701c8f393f5f6cbba7dd26a | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/pybites/beginner/beginner-bite-32-dont-mutability-fool-you.py | bb552f90e582a2a3dc772ff09e46161df47d251e | []
| no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 606 | py | '''
In this Bite you are presented with a function that copies the given items data structure.
There is a problem though, the tests fail. Can you fix it?
This can be done in a one liner. If you know which module to use it will be easy,
if not you will learn something new today.
Regardless we want you to think about Python's mutability. Have fun!
'''
items [{'id': 1, 'name': 'laptop', 'value': 1000},
{'id': 2, 'name': 'chair', 'value': 300},
{'id': 3, 'name': 'book', 'value': 20}]
___ duplicate_items(items
r.. items |
items_copy items
print(id(items
print(id(items_copy | [
"[email protected]"
]
| |
dbe92292a15e5a2a15eb70407f835d192eb3601a | 63d3a6255f2677f9d92205d62163b9d22a74c5c7 | /modules/accounts/migrations/0024_alter_user_protec_sub_pass.py | dbe591a3aaa8fc2e6defef1a0cf0a10b14cdb8ac | [
"Apache-2.0"
]
| permissive | GPCRmd/GPCRmd | 9204f39b1bfbc800b13512b316e05e54ddd8af23 | 47d7a4e71025b70e15a0f752760873249932c54e | refs/heads/main | 2023-09-04T11:13:44.285629 | 2023-08-29T13:43:01 | 2023-08-29T13:43:01 | 260,036,875 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | # Generated by Django 4.1.5 on 2023-06-19 15:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0023_user_protec_sub_pass'),
]
operations = [
migrations.AlterField(
model_name='user',
name='protec_sub_pass',
field=models.BinaryField(),
),
]
| [
"[email protected]"
]
| |
ec6c5b7a1efe7a36852626b0475450430a86dd61 | f4434c85e3814b6347f8f8099c081ed4af5678a5 | /sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/v2_1_preview_2/operations/_form_recognizer_client_operations.py | 225081ac70a14e39e7de6d1cfd0f8eb41d89781e | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
]
| permissive | yunhaoling/azure-sdk-for-python | 5da12a174a37672ac6ed8e3c1f863cb77010a506 | c4eb0ca1aadb76ad892114230473034830116362 | refs/heads/master | 2022-06-11T01:17:39.636461 | 2020-12-08T17:42:08 | 2020-12-08T17:42:08 | 177,675,796 | 1 | 0 | MIT | 2020-03-31T20:35:17 | 2019-03-25T22:43:40 | Python | UTF-8 | Python | false | false | 82,644 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.polling.base_polling import LROBasePolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, IO, Iterable, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class FormRecognizerClientOperationsMixin(object):
def _train_custom_model_async_initial(
self,
train_request, # type: "models.TrainRequest"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._train_custom_model_async_initial.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(train_request, 'TrainRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
if cls:
return cls(pipeline_response, None, response_headers)
_train_custom_model_async_initial.metadata = {'url': '/custom/models'} # type: ignore
def begin_train_custom_model_async(
self,
train_request, # type: "models.TrainRequest"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Train Custom Model.
Create and train a custom model. The request must include a source parameter that is either an
externally accessible Azure storage blob container Uri (preferably a Shared Access Signature
Uri) or valid path to a data folder in a locally mounted drive. When local paths are specified,
they must follow the Linux/Unix path format and be an absolute path rooted to the input mount
configuration setting value e.g., if '{Mounts:Input}' configuration setting value is '/input'
then a valid source path would be '/input/contosodataset'. All data to be trained is expected
to be under the source folder or sub folders under it. Models are trained using documents that
are of the following content type - 'application/pdf', 'image/jpeg', 'image/png', 'image/tiff'.
Other type of content is ignored.
:param train_request: Training request parameters.
:type train_request: ~azure.ai.formrecognizer.models.TrainRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', False) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._train_custom_model_async_initial(
train_request=train_request,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
if polling is True: polling_method = LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_train_custom_model_async.metadata = {'url': '/custom/models'} # type: ignore
def get_custom_model(
self,
model_id, # type: str
include_keys=False, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> "models.Model"
"""Get Custom Model.
Get detailed information about a custom model.
:param model_id: Model identifier.
:type model_id: str
:param include_keys: Include list of extracted keys in model information.
:type include_keys: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Model, or the result of cls(response)
:rtype: ~azure.ai.formrecognizer.models.Model
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.Model"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_custom_model.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'modelId': self._serialize.url("model_id", model_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if include_keys is not None:
query_parameters['includeKeys'] = self._serialize.query("include_keys", include_keys, 'bool')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('Model', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_custom_model.metadata = {'url': '/custom/models/{modelId}'} # type: ignore
def delete_custom_model(
self,
model_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete Custom Model.
Mark model for deletion. Model artifacts will be permanently removed within a predetermined
period.
:param model_id: Model identifier.
:type model_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_custom_model.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'modelId': self._serialize.url("model_id", model_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
delete_custom_model.metadata = {'url': '/custom/models/{modelId}'} # type: ignore
def _analyze_with_custom_model_initial(
self,
model_id, # type: str
include_text_details=False, # type: Optional[bool]
file_stream=None, # type: Optional[Union[IO, "models.SourcePath"]]
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._analyze_with_custom_model_initial.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'modelId': self._serialize.url("model_id", model_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if include_text_details is not None:
query_parameters['includeTextDetails'] = self._serialize.query("include_text_details", include_text_details, 'bool')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if header_parameters['Content-Type'].split(";")[0] in ['application/pdf', 'image/jpeg', 'image/png', 'image/tiff']:
body_content_kwargs['stream_content'] = file_stream
elif header_parameters['Content-Type'].split(";")[0] in ['application/json']:
if file_stream is not None:
body_content = self._serialize.body(file_stream, 'SourcePath')
else:
body_content = None
body_content_kwargs['content'] = body_content
else:
raise ValueError(
"The content_type '{}' is not one of the allowed values: "
"['application/pdf', 'image/jpeg', 'image/png', 'image/tiff', 'application/json']".format(header_parameters['Content-Type'])
)
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location'))
if cls:
return cls(pipeline_response, None, response_headers)
_analyze_with_custom_model_initial.metadata = {'url': '/custom/models/{modelId}/analyze'} # type: ignore
def begin_analyze_with_custom_model(
self,
model_id, # type: str
include_text_details=False, # type: Optional[bool]
file_stream=None, # type: Optional[Union[IO, "models.SourcePath"]]
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Analyze Form.
Extract key-value pairs, tables, and semantic values from a given document. The input document
must be of one of the supported content types - 'application/pdf', 'image/jpeg', 'image/png' or
'image/tiff'. Alternatively, use 'application/json' type to specify the location (Uri or local
path) of the document to be analyzed.
:param model_id: Model identifier.
:type model_id: str
:param include_text_details: Include text lines and element references in the result.
:type include_text_details: bool
:param file_stream: .json, .pdf, .jpg, .png or .tiff type file stream.
:type file_stream: IO or ~azure.ai.formrecognizer.models.SourcePath
:keyword str content_type: Media type of the body sent to the API. Default value is "application/json".
Allowed values are: "application/pdf", "image/jpeg", "image/png", "image/tiff", "application/json".
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', False) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._analyze_with_custom_model_initial(
model_id=model_id,
include_text_details=include_text_details,
file_stream=file_stream,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'modelId': self._serialize.url("model_id", model_id, 'str'),
}
if polling is True: polling_method = LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_analyze_with_custom_model.metadata = {'url': '/custom/models/{modelId}/analyze'} # type: ignore
def get_analyze_form_result(
self,
model_id, # type: str
result_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.AnalyzeOperationResult"
"""Get Analyze Form Result.
Obtain current status and the result of the analyze form operation.
:param model_id: Model identifier.
:type model_id: str
:param result_id: Analyze operation result identifier.
:type result_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AnalyzeOperationResult, or the result of cls(response)
:rtype: ~azure.ai.formrecognizer.models.AnalyzeOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AnalyzeOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_analyze_form_result.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'modelId': self._serialize.url("model_id", model_id, 'str'),
'resultId': self._serialize.url("result_id", result_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('AnalyzeOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_analyze_form_result.metadata = {'url': '/custom/models/{modelId}/analyzeResults/{resultId}'} # type: ignore
def _copy_custom_model_initial(
self,
model_id, # type: str
copy_request, # type: "models.CopyRequest"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._copy_custom_model_initial.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'modelId': self._serialize.url("model_id", model_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(copy_request, 'CopyRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location'))
if cls:
return cls(pipeline_response, None, response_headers)
_copy_custom_model_initial.metadata = {'url': '/custom/models/{modelId}/copy'} # type: ignore
def begin_copy_custom_model(
self,
model_id, # type: str
copy_request, # type: "models.CopyRequest"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Copy Custom Model.
Copy custom model stored in this resource (the source) to user specified target Form Recognizer
resource.
:param model_id: Model identifier.
:type model_id: str
:param copy_request: Copy request parameters.
:type copy_request: ~azure.ai.formrecognizer.models.CopyRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', False) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._copy_custom_model_initial(
model_id=model_id,
copy_request=copy_request,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'modelId': self._serialize.url("model_id", model_id, 'str'),
}
if polling is True: polling_method = LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_copy_custom_model.metadata = {'url': '/custom/models/{modelId}/copy'} # type: ignore
def get_custom_model_copy_result(
self,
model_id, # type: str
result_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.CopyOperationResult"
"""Get Custom Model Copy Result.
Obtain current status and the result of a custom model copy operation.
:param model_id: Model identifier.
:type model_id: str
:param result_id: Copy operation result identifier.
:type result_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CopyOperationResult, or the result of cls(response)
:rtype: ~azure.ai.formrecognizer.models.CopyOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CopyOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_custom_model_copy_result.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'modelId': self._serialize.url("model_id", model_id, 'str'),
'resultId': self._serialize.url("result_id", result_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CopyOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_custom_model_copy_result.metadata = {'url': '/custom/models/{modelId}/copyResults/{resultId}'} # type: ignore
def generate_model_copy_authorization(
self,
**kwargs # type: Any
):
# type: (...) -> "models.CopyAuthorizationResult"
"""Generate Copy Authorization.
Generate authorization to copy a model into the target Form Recognizer resource.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CopyAuthorizationResult, or the result of cls(response)
:rtype: ~azure.ai.formrecognizer.models.CopyAuthorizationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CopyAuthorizationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.generate_model_copy_authorization.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
deserialized = self._deserialize('CopyAuthorizationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
generate_model_copy_authorization.metadata = {'url': '/custom/models/copyAuthorization'} # type: ignore
def _compose_custom_models_async_initial(
self,
compose_request, # type: "models.ComposeRequest"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._compose_custom_models_async_initial.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(compose_request, 'ComposeRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
if cls:
return cls(pipeline_response, None, response_headers)
_compose_custom_models_async_initial.metadata = {'url': '/custom/models/compose'} # type: ignore
def begin_compose_custom_models_async(
self,
compose_request, # type: "models.ComposeRequest"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Compose trained with labels models into one composed model.
Compose request would include list of models ids.
It would validate what all models either trained with labels model or composed model.
It would validate limit of models put together.
:param compose_request: Compose models.
:type compose_request: ~azure.ai.formrecognizer.models.ComposeRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', False) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._compose_custom_models_async_initial(
compose_request=compose_request,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
if polling is True: polling_method = LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_compose_custom_models_async.metadata = {'url': '/custom/models/compose'} # type: ignore
def _analyze_business_card_async_initial(
self,
include_text_details=False, # type: Optional[bool]
locale=None, # type: Optional[Union[str, "models.Locale"]]
file_stream=None, # type: Optional[Union[IO, "models.SourcePath"]]
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._analyze_business_card_async_initial.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if include_text_details is not None:
query_parameters['includeTextDetails'] = self._serialize.query("include_text_details", include_text_details, 'bool')
if locale is not None:
query_parameters['locale'] = self._serialize.query("locale", locale, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if header_parameters['Content-Type'].split(";")[0] in ['application/pdf', 'image/bmp', 'image/jpeg', 'image/png', 'image/tiff']:
body_content_kwargs['stream_content'] = file_stream
elif header_parameters['Content-Type'].split(";")[0] in ['application/json']:
if file_stream is not None:
body_content = self._serialize.body(file_stream, 'SourcePath')
else:
body_content = None
body_content_kwargs['content'] = body_content
else:
raise ValueError(
"The content_type '{}' is not one of the allowed values: "
"['application/pdf', 'image/bmp', 'image/jpeg', 'image/png', 'image/tiff', 'application/json']".format(header_parameters['Content-Type'])
)
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location'))
if cls:
return cls(pipeline_response, None, response_headers)
_analyze_business_card_async_initial.metadata = {'url': '/prebuilt/businessCard/analyze'} # type: ignore
def begin_analyze_business_card_async(
self,
include_text_details=False, # type: Optional[bool]
locale=None, # type: Optional[Union[str, "models.Locale"]]
file_stream=None, # type: Optional[Union[IO, "models.SourcePath"]]
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Analyze Business Card.
Extract field text and semantic values from a given business card document. The input document
must be of one of the supported content types - 'application/pdf', 'image/jpeg', 'image/png' or
'image/tiff'. Alternatively, use 'application/json' type to specify the location (Uri) of the
document to be analyzed.
:param include_text_details: Include text lines and element references in the result.
:type include_text_details: bool
:param locale: Locale of the input document. Supported locales include: en-AU, en-CA, en-GB,
en-IN, en-US(default).
:type locale: str or ~azure.ai.formrecognizer.models.Locale
:param file_stream: .json, .pdf, .jpg, .png or .tiff type file stream.
:type file_stream: IO or ~azure.ai.formrecognizer.models.SourcePath
:keyword str content_type: Media type of the body sent to the API. Default value is "application/json".
Allowed values are: "application/pdf", "image/bmp", "image/jpeg", "image/png", "image/tiff", "application/json".
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', False) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._analyze_business_card_async_initial(
include_text_details=include_text_details,
locale=locale,
file_stream=file_stream,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
if polling is True: polling_method = LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_analyze_business_card_async.metadata = {'url': '/prebuilt/businessCard/analyze'} # type: ignore
def get_analyze_business_card_result(
self,
result_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.AnalyzeOperationResult"
"""Get Analyze Business Card Result.
Track the progress and obtain the result of the analyze business card operation.
:param result_id: Analyze operation result identifier.
:type result_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AnalyzeOperationResult, or the result of cls(response)
:rtype: ~azure.ai.formrecognizer.models.AnalyzeOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AnalyzeOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_analyze_business_card_result.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'resultId': self._serialize.url("result_id", result_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('AnalyzeOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_analyze_business_card_result.metadata = {'url': '/prebuilt/businessCard/analyzeResults/{resultId}'} # type: ignore
def _analyze_invoice_async_initial(
self,
include_text_details=False, # type: Optional[bool]
locale=None, # type: Optional[Union[str, "models.Locale"]]
file_stream=None, # type: Optional[Union[IO, "models.SourcePath"]]
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._analyze_invoice_async_initial.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if include_text_details is not None:
query_parameters['includeTextDetails'] = self._serialize.query("include_text_details", include_text_details, 'bool')
if locale is not None:
query_parameters['locale'] = self._serialize.query("locale", locale, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if header_parameters['Content-Type'].split(";")[0] in ['application/pdf', 'image/bmp', 'image/jpeg', 'image/png', 'image/tiff']:
body_content_kwargs['stream_content'] = file_stream
elif header_parameters['Content-Type'].split(";")[0] in ['application/json']:
if file_stream is not None:
body_content = self._serialize.body(file_stream, 'SourcePath')
else:
body_content = None
body_content_kwargs['content'] = body_content
else:
raise ValueError(
"The content_type '{}' is not one of the allowed values: "
"['application/pdf', 'image/bmp', 'image/jpeg', 'image/png', 'image/tiff', 'application/json']".format(header_parameters['Content-Type'])
)
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location'))
if cls:
return cls(pipeline_response, None, response_headers)
_analyze_invoice_async_initial.metadata = {'url': '/prebuilt/invoice/analyze'} # type: ignore
def begin_analyze_invoice_async(
self,
include_text_details=False, # type: Optional[bool]
locale=None, # type: Optional[Union[str, "models.Locale"]]
file_stream=None, # type: Optional[Union[IO, "models.SourcePath"]]
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Analyze Invoice Document.
Extract field text and semantic values from a given invoice document. The input document must
be of one of the supported content types - 'application/pdf', 'image/jpeg', 'image/png' or
'image/tiff'. Alternatively, use 'application/json' type to specify the location (Uri) of the
document to be analyzed.
:param include_text_details: Include text lines and element references in the result.
:type include_text_details: bool
:param locale: Locale of the input document. Supported locales include: en-AU, en-CA, en-GB,
en-IN, en-US(default).
:type locale: str or ~azure.ai.formrecognizer.models.Locale
:param file_stream: .json, .pdf, .jpg, .png or .tiff type file stream.
:type file_stream: IO or ~azure.ai.formrecognizer.models.SourcePath
:keyword str content_type: Media type of the body sent to the API. Default value is "application/json".
Allowed values are: "application/pdf", "image/bmp", "image/jpeg", "image/png", "image/tiff", "application/json".
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', False) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._analyze_invoice_async_initial(
include_text_details=include_text_details,
locale=locale,
file_stream=file_stream,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
if polling is True: polling_method = LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_analyze_invoice_async.metadata = {'url': '/prebuilt/invoice/analyze'} # type: ignore
def get_analyze_invoice_result(
self,
result_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.AnalyzeOperationResult"
"""Get Analyze Invoice Result.
Track the progress and obtain the result of the analyze invoice operation.
:param result_id: Analyze operation result identifier.
:type result_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AnalyzeOperationResult, or the result of cls(response)
:rtype: ~azure.ai.formrecognizer.models.AnalyzeOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AnalyzeOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_analyze_invoice_result.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'resultId': self._serialize.url("result_id", result_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('AnalyzeOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_analyze_invoice_result.metadata = {'url': '/prebuilt/invoice/analyzeResults/{resultId}'} # type: ignore
def _analyze_receipt_async_initial(
self,
include_text_details=False, # type: Optional[bool]
locale=None, # type: Optional[Union[str, "models.Locale"]]
file_stream=None, # type: Optional[Union[IO, "models.SourcePath"]]
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._analyze_receipt_async_initial.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if include_text_details is not None:
query_parameters['includeTextDetails'] = self._serialize.query("include_text_details", include_text_details, 'bool')
if locale is not None:
query_parameters['locale'] = self._serialize.query("locale", locale, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if header_parameters['Content-Type'].split(";")[0] in ['application/pdf', 'image/bmp', 'image/jpeg', 'image/png', 'image/tiff']:
body_content_kwargs['stream_content'] = file_stream
elif header_parameters['Content-Type'].split(";")[0] in ['application/json']:
if file_stream is not None:
body_content = self._serialize.body(file_stream, 'SourcePath')
else:
body_content = None
body_content_kwargs['content'] = body_content
else:
raise ValueError(
"The content_type '{}' is not one of the allowed values: "
"['application/pdf', 'image/bmp', 'image/jpeg', 'image/png', 'image/tiff', 'application/json']".format(header_parameters['Content-Type'])
)
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location'))
if cls:
return cls(pipeline_response, None, response_headers)
_analyze_receipt_async_initial.metadata = {'url': '/prebuilt/receipt/analyze'} # type: ignore
def begin_analyze_receipt_async(
self,
include_text_details=False, # type: Optional[bool]
locale=None, # type: Optional[Union[str, "models.Locale"]]
file_stream=None, # type: Optional[Union[IO, "models.SourcePath"]]
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Analyze Receipt.
Extract field text and semantic values from a given receipt document. The input document must
be of one of the supported content types - 'application/pdf', 'image/jpeg', 'image/png' or
'image/tiff'. Alternatively, use 'application/json' type to specify the location (Uri) of the
document to be analyzed.
:param include_text_details: Include text lines and element references in the result.
:type include_text_details: bool
:param locale: Locale of the input document. Supported locales include: en-AU, en-CA, en-GB,
en-IN, en-US(default).
:type locale: str or ~azure.ai.formrecognizer.models.Locale
:param file_stream: .json, .pdf, .jpg, .png or .tiff type file stream.
:type file_stream: IO or ~azure.ai.formrecognizer.models.SourcePath
:keyword str content_type: Media type of the body sent to the API. Default value is "application/json".
Allowed values are: "application/pdf", "image/bmp", "image/jpeg", "image/png", "image/tiff", "application/json".
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', False) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._analyze_receipt_async_initial(
include_text_details=include_text_details,
locale=locale,
file_stream=file_stream,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
if polling is True: polling_method = LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_analyze_receipt_async.metadata = {'url': '/prebuilt/receipt/analyze'} # type: ignore
def get_analyze_receipt_result(
self,
result_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.AnalyzeOperationResult"
"""Get Analyze Receipt Result.
Track the progress and obtain the result of the analyze receipt operation.
:param result_id: Analyze operation result identifier.
:type result_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AnalyzeOperationResult, or the result of cls(response)
:rtype: ~azure.ai.formrecognizer.models.AnalyzeOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AnalyzeOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_analyze_receipt_result.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'resultId': self._serialize.url("result_id", result_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('AnalyzeOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_analyze_receipt_result.metadata = {'url': '/prebuilt/receipt/analyzeResults/{resultId}'} # type: ignore
def _analyze_layout_async_initial(
self,
language=None, # type: Optional[Union[str, "models.Language"]]
pages=None, # type: Optional[List[str]]
file_stream=None, # type: Optional[Union[IO, "models.SourcePath"]]
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._analyze_layout_async_initial.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if language is not None:
query_parameters['language'] = self._serialize.query("language", language, 'str')
if pages is not None:
query_parameters['Pages'] = self._serialize.query("pages", pages, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if header_parameters['Content-Type'].split(";")[0] in ['application/pdf', 'image/bmp', 'image/jpeg', 'image/png', 'image/tiff']:
body_content_kwargs['stream_content'] = file_stream
elif header_parameters['Content-Type'].split(";")[0] in ['application/json']:
if file_stream is not None:
body_content = self._serialize.body(file_stream, 'SourcePath')
else:
body_content = None
body_content_kwargs['content'] = body_content
else:
raise ValueError(
"The content_type '{}' is not one of the allowed values: "
"['application/pdf', 'image/bmp', 'image/jpeg', 'image/png', 'image/tiff', 'application/json']".format(header_parameters['Content-Type'])
)
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location'))
if cls:
return cls(pipeline_response, None, response_headers)
_analyze_layout_async_initial.metadata = {'url': '/layout/analyze'} # type: ignore
def begin_analyze_layout_async(
self,
language=None, # type: Optional[Union[str, "models.Language"]]
pages=None, # type: Optional[List[str]]
file_stream=None, # type: Optional[Union[IO, "models.SourcePath"]]
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Analyze Layout.
Extract text and layout information from a given document. The input document must be of one of
the supported content types - 'application/pdf', 'image/jpeg', 'image/png', 'image/tiff' or
'image/bmp'. Alternatively, use 'application/json' type to specify the location (Uri or local
path) of the document to be analyzed.
:param language: The BCP-47 language code of the text in the document. Currently, only English
('en'), Dutch (‘nl’), French (‘fr’), German (‘de’), Italian (‘it’), Portuguese (‘pt'),
simplified Chinese ('zh-Hans') and Spanish ('es') are supported (print – nine languages and
handwritten – English only). Layout supports auto language identification and multi language
documents, so only provide a language code if you would like to force the documented to be
processed as that specific language.
:type language: str or ~azure.ai.formrecognizer.models.Language
:param pages: Custom page numbers for multi-page documents(PDF/TIFF), input the number of the
pages you want to get OCR result. For a range of pages, use a hyphen. Separate each page or
range with a comma or space.
:type pages: list[str]
:param file_stream: .json, .pdf, .jpg, .png or .tiff type file stream.
:type file_stream: IO or ~azure.ai.formrecognizer.models.SourcePath
:keyword str content_type: Media type of the body sent to the API. Default value is "application/json".
Allowed values are: "application/pdf", "image/bmp", "image/jpeg", "image/png", "image/tiff", "application/json".
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', False) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._analyze_layout_async_initial(
language=language,
pages=pages,
file_stream=file_stream,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
if polling is True: polling_method = LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_analyze_layout_async.metadata = {'url': '/layout/analyze'} # type: ignore
def get_analyze_layout_result(
self,
result_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.AnalyzeOperationResult"
"""Get Analyze Layout Result.
Track the progress and obtain the result of the analyze layout operation.
:param result_id: Analyze operation result identifier.
:type result_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AnalyzeOperationResult, or the result of cls(response)
:rtype: ~azure.ai.formrecognizer.models.AnalyzeOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AnalyzeOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_analyze_layout_result.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'resultId': self._serialize.url("result_id", result_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('AnalyzeOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_analyze_layout_result.metadata = {'url': '/layout/analyzeResults/{resultId}'} # type: ignore
def list_custom_models(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.Models"]
"""List Custom Models.
Get information about all custom models.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Models or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.ai.formrecognizer.models.Models]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.Models"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
op = "full"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_custom_models.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['op'] = self._serialize.query("op", op, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('Models', pipeline_response)
list_of_elem = deserialized.model_list
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_custom_models.metadata = {'url': '/custom/models'} # type: ignore
def get_custom_models(
self,
**kwargs # type: Any
):
# type: (...) -> "models.Models"
"""Get Custom Models.
Get information about all custom models.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Models, or the result of cls(response)
:rtype: ~azure.ai.formrecognizer.models.Models
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.Models"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
op = "summary"
accept = "application/json"
# Construct URL
url = self.get_custom_models.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['op'] = self._serialize.query("op", op, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('Models', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_custom_models.metadata = {'url': '/custom/models'} # type: ignore
| [
"[email protected]"
]
| |
8c9aa6151183c5a58c9b2f4629ee57dc75958092 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_204/193.py | e70d2b19de1b0addcc2ca7ee2d5b7e204e5ad372 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,054 | py | import math
def make_kits(num_ingredients, num_packages, ingredients_proportion, packages):
for i in xrange(0, num_ingredients):
packages[i].sort()
counter = 0
pointers = [0]*num_ingredients
for i in xrange(0, num_packages):
num_servings = serving(packages[0][i], ingredients_proportion[0])
# print "i: ", i, " num_servings: ", num_servings
for num_serving in xrange(num_servings[0], num_servings[1]+1):
flag = 0
for j in xrange(1, num_ingredients):
while pointers[j] < num_packages and too_little(packages[j][pointers[j]], ingredients_proportion[j], num_serving):
pointers[j] = pointers[j]+1
if pointers[j] == num_packages or too_much(packages[j][pointers[j]], ingredients_proportion[j], num_serving):
flag = -1
break
if flag == 0:
# print "counter: ", counter
# print i, " ", pointers[1]
pointers = [x+1 for x in pointers]
counter = counter+1
break
return counter
def serving(weight, unit):
res = []
res.append(int(math.ceil(weight/1.1/unit)))
res.append(int(math.floor(weight/0.9/unit)))
return res
def too_little(weight, unit, num_serving):
if weight < unit*num_serving*0.9:
return True
return False
def too_much(weight, unit, num_serving):
if weight > unit*num_serving*1.1:
return True
return False
# raw_input() reads a string with a line of input, stripping the '\n' (newline) at the end.
# This is all you need for most Google Code Jam problems.
t = int(raw_input()) # read a line with a single integer
for i in xrange(1, t + 1):
num_ingredients, num_packages = [int(s) for s in raw_input().split(" ")] # read a list of integers, 2 in this case
ingredients_proportion = [int(s) for s in raw_input().split(" ")]
packages = [[] for k in xrange(1, num_ingredients+1)]
for j in xrange(0, num_ingredients):
packages[j] = [int(s) for s in raw_input().split(" ")]
res = make_kits(num_ingredients, num_packages, ingredients_proportion, packages)
print "Case #{}: {}".format(i, res)
# check out .format's specification for more formatting options
| [
"[email protected]"
]
| |
b6edd7683db0062d6bd530a81b71c14729977618 | f08336ac8b6f8040f6b2d85d0619d1a9923c9bdf | /3-lengthOfLongestSubstring.py | e11ad2357de806386f05b36afa99e4988224ba70 | []
| no_license | MarshalLeeeeee/myLeetCodes | fafadcc35eef44f431a008c1be42b1188e7dd852 | 80e78b153ad2bdfb52070ba75b166a4237847d75 | refs/heads/master | 2020-04-08T16:07:47.943755 | 2019-02-21T01:43:16 | 2019-02-21T01:43:16 | 159,505,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,156 | py | '''
3. Longest Substring Without Repeating Characters
Given a string, find the length of the longest substring without repeating characters.
Example 1:
Input: "abcabcbb"
Output: 3
Explanation: The answer is "abc", with the length of 3.
Example 2:
Input: "bbbbb"
Output: 1
Explanation: The answer is "b", with the length of 1.
Example 3:
Input: "pwwkew"
Output: 3
Explanation: The answer is "wke", with the length of 3.
Note that the answer must be a substring, "pwke" is a subsequence and not a substring.
'''
class Solution:
# O(n)
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
l = len(s)
hashmap = dict()
maxLen = 0
currLen = 0
head = 0
for i in range(l):
try:
if (hashmap[s[i]] < head):
currLen += 1
else:
currLen = i - hashmap[s[i]]
head = hashmap[s[i]]
except:
currLen += 1
maxLen = currLen if (currLen > maxLen) else maxLen
hashmap[s[i]] = i
return maxLen | [
"[email protected]"
]
| |
1b02c0d09ab89fd002e5a0b4d19bc4797f962554 | 823105ac7d892cf214ed9dcd8eaba315b01c1ed7 | /model/unet.py | 7105c7f28fcfcc05af73a60055e12a885fd43f39 | []
| no_license | jiye-ML/lane_detection_baidu_2019 | ccee82a1272bace80f9e128c24ae5ff64b827bd7 | 6ed35de00a34a8714a32c2a3ff649c4b0b1f1407 | refs/heads/master | 2022-12-10T20:46:27.983785 | 2020-08-29T03:26:25 | 2020-08-29T03:26:25 | 223,421,841 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,593 | py | import torch
import torch.nn as nn
from model.network import ResNet101v2
from model.module import Block
class UNetConvBlock(nn.Module):
def __init__(self, in_chans, out_chans):
super(UNetConvBlock, self).__init__()
block = [
nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1),
nn.ReLU(),
nn.BatchNorm2d(out_chans),
nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1),
nn.ReLU(),
nn.BatchNorm2d(out_chans)
]
self.block = nn.Sequential(*block)
def forward(self, x):
out = self.block(x)
return out
class UNetUpBlock(nn.Module):
def __init__(self, in_chans, out_chans):
super(UNetUpBlock, self).__init__()
self.up = nn.Sequential(
nn.Upsample(mode='bilinear', scale_factor=2),
nn.Conv2d(in_chans, out_chans, kernel_size=1)
)
self.conv_block = UNetConvBlock(in_chans, out_chans)
@staticmethod
def center_crop(layer, target_size):
_, _, layer_height, layer_width = layer.size()
diff_y = (layer_height - target_size[0]) // 2
diff_x = (layer_width - target_size[1]) // 2
return layer[
:, :, diff_y: (diff_y + target_size[0]), diff_x: (diff_x + target_size[1])
]
def forward(self, x, bridge):
up = self.up(x)
crop1 = self.center_crop(bridge, up.shape[2:])
out = torch.cat([up, crop1], 1)
out = self.conv_block(out)
return out
class ResNetUNet(nn.Module):
def __init__(self, config):
super(ResNetUNet, self).__init__()
self.n_classes = config.NUM_CLASSES
self.encode = ResNet101v2()
prev_channels = 2048
self.up_path = nn.ModuleList()
for i in range(3):
self.up_path.append(UNetUpBlock(prev_channels, prev_channels // 2))
prev_channels //= 2
self.cls_conv_block1 = Block(prev_channels, 32)
self.cls_conv_block2 = Block(32, 16)
self.last = nn.Conv2d(16, self.n_classes, kernel_size=1)
self.init_weight()
def forward(self, x):
input_size = x.size()[2:]
blocks = self.encode(x)
x = blocks[-1]
for i, up in enumerate(self.up_path):
x = up(x, blocks[-i - 2])
x = nn.Upsample(size=input_size, mode='bilinear', align_corners=True)(x)
x = self.cls_conv_block1(x)
x = self.cls_conv_block2(x)
x = self.last(x)
return x
def init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
| [
"[email protected]"
]
| |
e97328b2b234b843a30b30be30a3cb8b6667e23b | 90ff4b63aa6cbc90c918d7f1944a719e50fa4dc7 | /dnanexus/filter_qc/src/filter_qc.py | f790e100eba87df546e7184db96ecf798762e847 | [
"MIT"
]
| permissive | kchatzistergos/chip-seq-pipeline | 9a3639034f1602d4d3898335f838748ea5de4875 | b3ebbbc4fc787849e7cbd7bac5124922540a3bd4 | refs/heads/master | 2021-01-05T03:09:20.873929 | 2019-08-07T23:49:14 | 2019-08-07T23:49:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,389 | py | #!/usr/bin/env python
# filter_qc 0.0.1
# Generated by dx-app-wizard.
#
# Basic execution pattern: Your app will run on a single machine from
# beginning to end.
#
# See https://wiki.dnanexus.com/Developer-Portal for documentation and
# tutorials on how to modify this file.
#
# DNAnexus Python Bindings (dxpy) documentation:
# http://autodoc.dnanexus.com/bindings/python/current/
import os
import subprocess
import shlex
import re
import common
import dxpy
import logging
from pprint import pprint, pformat
logger = logging.getLogger(__name__)
logger.addHandler(dxpy.DXLogHandler())
logger.propagate = False
logger.setLevel(logging.INFO)
def dup_parse(fname):
with open(fname, 'r') as dup_file:
if not dup_file:
return None
lines = iter(dup_file.read().splitlines())
for line in lines:
if line.startswith('## METRICS CLASS'):
headers = lines.next().rstrip('\n').lower()
metrics = lines.next().rstrip('\n')
break
headers = headers.split('\t')
metrics = metrics.split('\t')
headers.pop(0)
metrics.pop(0)
dup_qc = dict(zip(headers, metrics))
return dup_qc
def pbc_parse(fname):
with open(fname, 'r') as pbc_file:
if not pbc_file:
return None
lines = pbc_file.read().splitlines()
line = lines[0].rstrip('\n')
# PBC File output:
# TotalReadPairs <tab>
# DistinctReadPairs <tab>
# OneReadPair <tab>
# TwoReadPairs <tab>
# NRF=Distinct/Total <tab>
# PBC1=OnePair/Distinct <tab>
# PBC2=OnePair/TwoPair
headers = ['TotalReadPairs',
'DistinctReadPairs',
'OneReadPair',
'TwoReadPairs',
'NRF',
'PBC1',
'PBC2']
metrics = line.split('\t')
pbc_qc = dict(zip(headers, metrics))
return pbc_qc
def flagstat_parse(fname):
with open(fname, 'r') as flagstat_file:
if not flagstat_file:
return None
flagstat_lines = flagstat_file.read().splitlines()
qc_dict = {
# values are regular expressions,
# will be replaced with scores [hiq, lowq]
'in_total': 'in total',
'duplicates': 'duplicates',
'mapped': 'mapped',
'paired_in_sequencing': 'paired in sequencing',
'read1': 'read1',
'read2': 'read2',
'properly_paired': 'properly paired',
'with_self_mate_mapped': 'with itself and mate mapped',
'singletons': 'singletons',
# i.e. at the end of the line
'mate_mapped_different_chr': 'with mate mapped to a different chr$',
# RE so must escape
'mate_mapped_different_chr_hiQ':
'with mate mapped to a different chr \(mapQ>=5\)'
}
for (qc_key, qc_pattern) in qc_dict.items():
qc_metrics = next(re.split(qc_pattern, line)
for line in flagstat_lines
if re.search(qc_pattern, line))
(hiq, lowq) = qc_metrics[0].split(' + ')
qc_dict[qc_key] = [int(hiq.rstrip()), int(lowq.rstrip())]
return qc_dict
@dxpy.entry_point('main')
def main(input_bam, paired_end, samtools_params, scrub, debug):
if debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
raw_bam_file = dxpy.DXFile(input_bam)
raw_bam_filename = raw_bam_file.name
raw_bam_basename = raw_bam_file.name.rstrip('.bam')
raw_bam_file_mapstats_filename = raw_bam_basename + '.flagstat.qc'
dxpy.download_dxfile(raw_bam_file.get_id(), raw_bam_filename)
subprocess.check_output('set -x; ls -l', shell=True)
# Generate initial mapping statistics
with open(raw_bam_file_mapstats_filename, 'w') as fh:
flagstat_command = "samtools flagstat %s" % (raw_bam_filename)
logger.info(flagstat_command)
subprocess.check_call(shlex.split(flagstat_command), stdout=fh)
filt_bam_prefix = raw_bam_basename + ".filt.srt"
filt_bam_filename = filt_bam_prefix + ".bam"
if paired_end:
# =============================
# Remove unmapped, mate unmapped
# not primary alignment, reads failing platform
# Remove low MAPQ reads
# Only keep properly paired reads
# Obtain name sorted BAM file
# ==================
tmp_filt_bam_prefix = "tmp.%s" % (filt_bam_prefix) # was tmp.prefix.nmsrt
tmp_filt_bam_filename = tmp_filt_bam_prefix + ".bam"
out, err = common.run_pipe([
# filter: -F 1804 FlAG bits to exclude; -f 2 FLAG bits to reqire;
# -q 30 exclude MAPQ < 30; -u uncompressed output
# exclude FLAG 1804: unmapped, next segment unmapped, secondary
# alignments, not passing platform q, PCR or optical duplicates
# require FLAG 2: properly aligned
"samtools view -F 1804 -f 2 %s -u %s" % (samtools_params, raw_bam_filename),
# sort: -n sort by name; - take input from stdin;
# out to specified filename
# Will produce name sorted BAM
"samtools sort -n - %s" % (tmp_filt_bam_prefix)])
if err:
logger.error("samtools error: %s" % (err))
# Remove orphan reads (pair was removed)
# and read pairs mapping to different chromosomes
# Obtain position sorted BAM
subprocess.check_output('set -x; ls -l', shell=True)
out, err = common.run_pipe([
# fill in mate coordinates, ISIZE and mate-related flags
# fixmate requires name-sorted alignment; -r removes secondary and
# unmapped (redundant here because already done above?)
# - send output to stdout
"samtools fixmate -r %s -" % (tmp_filt_bam_filename),
# repeat filtering after mate repair
"samtools view -F 1804 -f 2 -u -",
# produce the coordinate-sorted BAM
"samtools sort - %s" % (filt_bam_prefix)])
subprocess.check_output('set -x; ls -l', shell=True)
else: # single-end data
# =============================
# Remove unmapped, mate unmapped
# not primary alignment, reads failing platform
# Remove low MAPQ reads
# Obtain name sorted BAM file
# ==================
with open(filt_bam_filename, 'w') as fh:
samtools_filter_command = (
"samtools view -F 1804 %s -b %s"
% (samtools_params, raw_bam_filename)
)
logger.info(samtools_filter_command)
subprocess.check_call(
shlex.split(samtools_filter_command),
stdout=fh)
# ========================
# Mark duplicates
# ======================
tmp_filt_bam_filename = raw_bam_basename + ".dupmark.bam"
dup_file_qc_filename = raw_bam_basename + ".dup.qc"
picard_string = ' '.join([
"java -Xmx4G -jar /picard/MarkDuplicates.jar",
"INPUT=%s" % (filt_bam_filename),
"OUTPUT=%s" % (tmp_filt_bam_filename),
"METRICS_FILE=%s" % (dup_file_qc_filename),
"VALIDATION_STRINGENCY=LENIENT",
"ASSUME_SORTED=true",
"REMOVE_DUPLICATES=false"
])
logger.info(picard_string)
subprocess.check_output(shlex.split(picard_string))
os.rename(tmp_filt_bam_filename, filt_bam_filename)
if paired_end:
final_bam_prefix = raw_bam_basename + ".filt.srt.nodup"
else:
final_bam_prefix = raw_bam_basename + ".filt.nodup.srt"
final_bam_filename = final_bam_prefix + ".bam" # To be stored
final_bam_index_filename = final_bam_filename + ".bai" # To be stored
# QC file
final_bam_file_mapstats_filename = final_bam_prefix + ".flagstat.qc"
if paired_end:
samtools_dedupe_command = \
"samtools view -F 1804 -f2 -b %s" % (filt_bam_filename)
else:
samtools_dedupe_command = \
"samtools view -F 1804 -b %s" % (filt_bam_filename)
# ============================
# Remove duplicates
# Index final position sorted BAM
# ============================
with open(final_bam_filename, 'w') as fh:
logger.info(samtools_dedupe_command)
subprocess.check_call(
shlex.split(samtools_dedupe_command),
stdout=fh)
# Index final bam file
samtools_index_command = \
"samtools index %s %s" % (final_bam_filename, final_bam_index_filename)
logger.info(samtools_index_command)
subprocess.check_output(shlex.split(samtools_index_command))
# Generate mapping statistics
with open(final_bam_file_mapstats_filename, 'w') as fh:
flagstat_command = "samtools flagstat %s" % (final_bam_filename)
logger.info(flagstat_command)
subprocess.check_call(shlex.split(flagstat_command), stdout=fh)
# =============================
# Compute library complexity
# =============================
# Sort by name
# convert to bedPE and obtain fragment coordinates
# sort by position and strand
# Obtain unique count statistics
pbc_file_qc_filename = final_bam_prefix + ".pbc.qc"
# PBC File output
# TotalReadPairs [tab]
# DistinctReadPairs [tab]
# OneReadPair [tab]
# TwoReadPairs [tab]
# NRF=Distinct/Total [tab]
# PBC1=OnePair/Distinct [tab]
# PBC2=OnePair/TwoPair
if paired_end:
steps = [
"samtools sort -no %s -" % (filt_bam_filename),
"bamToBed -bedpe -i stdin",
r"""awk 'BEGIN{OFS="\t"}{print $1,$2,$4,$6,$9,$10}'"""]
else:
steps = [
"bamToBed -i %s" % (filt_bam_filename),
r"""awk 'BEGIN{OFS="\t"}{print $1,$2,$3,$6}'"""]
steps.extend([
"grep -v 'chrM'",
"sort",
"uniq -c",
r"""awk 'BEGIN{mt=0;m0=0;m1=0;m2=0} ($1==1){m1=m1+1} ($1==2){m2=m2+1} {m0=m0+1} {mt=mt+$1} END{printf "%d\t%d\t%d\t%d\t%f\t%f\t%f\n",mt,m0,m1,m2,m0/mt,m1/m0,m1/m2}'"""
])
out, err = common.run_pipe(steps, pbc_file_qc_filename)
if err:
logger.error("PBC file error: %s" % (err))
output = {}
logger.info("Uploading results files to the project")
filtered_bam = dxpy.upload_local_file(final_bam_filename)
filtered_bam_index = dxpy.upload_local_file(final_bam_index_filename)
output.update({
"filtered_bam": dxpy.dxlink(filtered_bam),
"filtered_bam_index": dxpy.dxlink(filtered_bam_index)
})
# If the scrub parameter is true, pass the bams to the scrub applet.
if scrub:
scrub_applet = dxpy.find_one_data_object(
classname='applet',
name='scrub',
project=dxpy.PROJECT_CONTEXT_ID,
zero_ok=False,
more_ok=False,
return_handler=True)
scrub_subjob = \
scrub_applet.run(
{"input_bams": [input_bam, dxpy.dxlink(filtered_bam)]},
name='Scrub bams')
scrubbed_unfiltered_bam = scrub_subjob.get_output_ref("scrubbed_bams", index=0)
scrubbed_filtered_bam = scrub_subjob.get_output_ref("scrubbed_bams", index=1)
# Add the optional scrubbed outputs.
output.update({
"scrubbed_unfiltered_bam": dxpy.dxlink(scrubbed_unfiltered_bam),
"scrubbed_filtered_bam": dxpy.dxlink(scrubbed_filtered_bam)
})
# Upload or calculate the remaining outputs.
filtered_mapstats = \
dxpy.upload_local_file(final_bam_file_mapstats_filename)
dup_file = dxpy.upload_local_file(dup_file_qc_filename)
pbc_file = dxpy.upload_local_file(pbc_file_qc_filename)
logger.info("Calcualting QC metrics")
dup_qc = dup_parse(dup_file_qc_filename)
pbc_qc = pbc_parse(pbc_file_qc_filename)
initial_mapstats_qc = flagstat_parse(raw_bam_file_mapstats_filename)
final_mapstats_qc = flagstat_parse(final_bam_file_mapstats_filename)
if paired_end:
useable_fragments = final_mapstats_qc.get('in_total')[0]/2
else:
useable_fragments = final_mapstats_qc.get('in_total')[0]
logger.info("initial_mapstats_qc: %s" % (initial_mapstats_qc)),
logger.info("final_mapstats_qc: %s" % (final_mapstats_qc)),
logger.info("dup_qc: %s" % (dup_qc))
logger.info("pbc_qc: %s" % (pbc_qc))
# Return links to the output files and values.
output.update({
"filtered_mapstats": dxpy.dxlink(filtered_mapstats),
"dup_file_qc": dxpy.dxlink(dup_file),
"pbc_file_qc": dxpy.dxlink(pbc_file),
"paired_end": paired_end,
"n_reads_input": str(initial_mapstats_qc.get('in_total')[0]),
"picard_read_pairs_examined": str(dup_qc.get('read_pairs_examined')),
"picard_unpaired_reads_examined": str(dup_qc.get('unpaired_reads_examined')),
"picard_read_pair_duplicates": str(dup_qc.get('read_pair_duplicates')),
"picard_unpaired_read_duplicates": str(dup_qc.get('unpaired_read_duplicates')),
"useable_fragments": str(useable_fragments),
"NRF": str(pbc_qc.get('NRF')),
"PBC1": str(pbc_qc.get('PBC1')),
"PBC2": str(pbc_qc.get('PBC2')),
"duplicate_fraction": str(dup_qc.get('percent_duplication'))
})
logger.info("Exiting with output:\n%s" % (pformat(output)))
return output
dxpy.run()
| [
"[email protected]"
]
| |
aceb1fcdf196d716cb53b0a7e02874bfd259fffa | e2426d7c01500ca4a2df4e4555f217f957baf957 | /cows/service/imps/geoplot_wms_backend/slabs/slab_base.py | 3375ea42382e0d9856eba9bfa7bb8f66d03e401f | [
"BSD-3-Clause",
"BSD-2-Clause"
]
| permissive | cedadev/cows | 959a5e1ad220cfe0cce48a2131d6971106c765aa | db9ed729c886b271ce85355b97e39243081e8246 | refs/heads/master | 2020-03-16T15:17:45.710584 | 2018-05-09T10:35:47 | 2018-05-09T10:36:37 | 132,736,968 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,755 | py | # BSD Licence
# Copyright (c) 2010, Science & Technology Facilities Council (STFC)
# All rights reserved.
#
# See the LICENSE file in the source distribution of this software for
# the full license text.
import logging
import time
import numpy
from geoplot.utils import isRangeInLimits
import geoplot.colour_scheme as colour_scheme
from cows.service.wms_iface import IwmsLayerSlab
from cows.service.imps.image_import import Image
from cows.service.imps.geoplot_wms_backend.slab_options_parser import SlabOptionsParser
from cows.service.imps.geoplot_wms_backend.rendering_option import RenderingOption
log = logging.getLogger(__name__)
class SlabBase(IwmsLayerSlab):
"""
A layer slab that implements the IwmsLayerSlab interface and uses geoplot
to render the required images.
This is an abstract base class and should not be used directly.
"""
renderingOptions = [
RenderingOption('cmap', "Colour Scheme" ,str,'jet',["bone","jet", "copper", "gray", "winter"] ),
RenderingOption('cmap_min', "Legend Min" ,float,None),
RenderingOption('cmap_max', "Legend Max" ,float,None),
RenderingOption('cmap_scale', "Colour Bar Scale" ,str ,'linear', ['linear','log']),
]
"""
constructor
@param variable: the netcdf variable that contains the data for this slab
@param title: the title of the variable that is to be used
@param crs: the coordinate refrence system the data is stored in
@param dimValues: the dimension values for this slab
@param transparent: indicates if the produced image should be transparent or
not.
@param bbox: the bounds of the data in lat/lon
@param renderOpts: the additional parameters recieved by the WMS, may include
some custom rendering options.
"""
def __init__(self, variable, title, crs, dimValues, transparent, bgcolor, bbox, renderOpts):
self.title = title
self.renderOpts = renderOpts
self.bgcolor = bgcolor
self.transparent = transparent
self.variable = variable
#log.debug("renderOpts = %s" % (renderOpts,))
# Check for non-default, but valid, colour map.
cmapName = renderOpts.get('cmap', None)
self._setUpColourMap(cmapName)
self.parser = SlabOptionsParser(self.renderingOptions, renderOpts)
self.ld = self._setupLayerDrawer()
@classmethod
def _setUpColourMap(cls, cmapName):
"""Adds a colour map to those defined in the rendering options if it is valid and not
present already.
@param cmapName: name of colour map
"""
log.debug("Checking for cmap %s" % cmapName)
cmapOptions = [r for r in cls.renderingOptions if r.name == 'cmap'][0]
if cmapName not in cmapOptions.options:
log.debug("Not found in renderingOptions %s" % cmapName)
if colour_scheme.isValidCmapName(cmapName):
log.debug("Valid cmap name %s" % cmapName)
cmapOptions.options.append(cmapName)
log.debug("All known cmaps %s" % cmapOptions)
"""
Creates the layer drawer object so that it can be used in getImage
"""
def _setupLayerDrawer(self):
raise NotImplementedError()
"""
returns an image of the data constructed using the layer drawer
@param bbox: the limits of the image requested
@param width: the width in px of the image
@param height: the height in px of the image
"""
def getImage(self, bbox, width, height):
"""
Create an image of a sub-bbox of a given size.
:ivar bbox: A bbox 4-tuple.
:ivar width: width in pixels.`
:ivar height: height in pixels.
:return: A PIL Image object.
"""
#log.debug("GetImage called with bbox=%s, width=%s, height = %s" % (bbox, width, height,))
xLimits = (bbox[0], bbox[2])
yLimits = (bbox[1], bbox[3])
if sorted(self.variable.getAxisIds()) == sorted(['latitude','longitude']):
if not self._areBoundsInLimits(bbox, xLimits, yLimits):
img = numpy.zeros((height,width,4), numpy.uint8)
pilImage = Image.fromarray(img, 'RGBA')
log.debug("empty image used as no data found for id=%s (%sx%s), lon=%s, lat=%s " % \
(self.variable.id, width, height, xLimits, yLimits))
return pilImage
st = time.time()
im = self.ld.makeImage(xLimits, yLimits, width, height)
log.debug("generated contour image id=%s (%sx%s, lon=%s, lat=%s in %.2fs" % \
(self.variable.id, width, height, xLimits, yLimits, time.time() - st,))
return im
def _areBoundsInLimits(self, bbox, xLimits, yLimits):
if self.variable.getAxisIds()[0] == 'longitude':
lonAx, latAx = self.variable.getAxisList()
else:
latAx, lonAx = self.variable.getAxisList()
xRange = [ lonAx.getBounds().min(), lonAx.getBounds().max()]
yRange = [ latAx.getBounds().min(), latAx.getBounds().max()]
log.debug("xLimits = %s" % (xLimits,))
log.debug("yLimits = %s" % (yLimits,))
log.debug("xRange = %s" % (xRange,))
log.debug("yRange = %s" % (yRange,))
log.debug("x range is circular: %s" % ("True" if lonAx.isCircular() else "False",))
isInLimits = ((lonAx.isCircular() or isRangeInLimits(xRange, xLimits)) and
isRangeInLimits(yRange, yLimits))
log.debug("isInLimits = %s" % (isInLimits,))
return isInLimits
| [
"[email protected]"
]
| |
406894c61e6011d157c6b62a0eccbe5a91f21124 | 58fb8cebdb51a83c6afd29f6b0d745d07ccfb441 | /Cmonitor/statTasks/tasks.py | be2edf49342afdb91b45469ba2b8bd66d0a2a4b2 | []
| no_license | amonlong/Cmonitor | 7980baabc139a62f9870fe0110076b761b7890b6 | 6cf1ec84db69236c9ff79c7bc475a0fa26e40e12 | refs/heads/master | 2020-03-09T22:39:58.661338 | 2018-04-23T06:01:56 | 2018-04-23T06:01:56 | 129,039,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,420 | py | #-*- coding: utf-8 -*-
from __future__ import absolute_import,unicode_literals
import uuid
import time
from celery import states
from apps.record.models import TaskState, TaskItem
from statTasks.celery import app
from statTasks.subtasks import index, userInfo, business, risk, uniId
def makeRecord(taskname, stime, state, memo):
task = TaskItem.objects.filter(taskname=taskname)
if task:
sname = taskname + str(time.time())
ts = TaskState(
task_id = uuid.uuid3(uuid.NAMESPACE_DNS, sname),
taskname = task[0],
state = state,
memo = memo,
runtime = time.time() - stime
)
ts.save()
#index mession
@app.task
def indexHead(taskname):
stime = time.time()
state, memo = index.indexHead()
makeRecord(taskname, stime, state, memo)
return state, memo
@app.task
def indexHopper(taskname):
stime = time.time()
state, memo = index.indexHopper()
makeRecord(taskname, stime, state, memo)
return state, memo
@app.task
def indexPlace(taskname):
stime = time.time()
state, memo = index.indexPlace()
makeRecord(taskname, stime, state, memo)
return state, memo
#userInfo mession
@app.task
def userIncrease(taskname):
stime = time.time()
state, memo = userInfo.userIncrease()
makeRecord(taskname, stime, state, memo)
return state, memo
@app.task
def userAge(taskname):
stime = time.time()
state, memo = userInfo.userAge()
makeRecord(taskname, stime, state, memo)
return state, memo
#business
@app.task
def flowLoanMoneyNO(taskname):
stime = time.time()
state, memo = business.flowLoanMoneyNO()
makeRecord(taskname, stime, state, memo)
return state, memo
@app.task
def flowRepayMoney(taskname):
stime = time.time()
state, memo = business.flowRepayMoney()
makeRecord(taskname, stime, state, memo)
return state, memo
@app.task
def flowDelayRate(taskname):
stime = time.time()
state, memo = business.flowDelayRate()
makeRecord(taskname, stime, state, memo)
return state, memo
#risk mession
@app.task
def passRate(taskname):
stime = time.time()
state, memo = risk.passRate()
makeRecord(taskname, stime, state, memo)
return state, memo
@app.task
def overdueRate(taskname):
stime = time.time()
state, memo = risk.overdueRate()
makeRecord(taskname, stime, state, memo)
return state, memo
#uniId
def productFirm(taskname):
stime = time.time()
state, memo = uniId.productFirm()
makeRecord(taskname, stime, state, memo)
return state, memo
| [
"[email protected]"
]
| |
0e1c132f5b2b42abc4a92492dce22887936f8ee9 | fe7d80aa667ea7f34b60fc927e54d279f7bf81cb | /history/myslim/nets/mobilenet_v1.py | dca7ac604253d03b809107330d7b021f6449a5eb | []
| no_license | qq191513/myRecognize | 3b20b8ca7f1935d6b177b368eb72f0282db8799e | 8a183ca1e8ababd4f52b87a86f92c78eda5f4dc5 | refs/heads/master | 2020-03-21T18:26:24.286107 | 2019-06-26T08:21:40 | 2019-06-26T08:21:40 | 138,891,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,301 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""MobileNet v1.
MobileNet is a general architecture and can be used for multiple use cases.
Depending on the use case, it can use different input layer size and different
head (for example: embeddings, localization and classification).
As described in https://arxiv.org/abs/1704.04861.
MobileNets: Efficient Convolutional Neural Networks for
Mobile Vision Applications
Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang,
Tobias Weyand, Marco Andreetto, Hartwig Adam
100% Mobilenet V1 (base) with input size 224x224:
See mobilenet_v1()
Layer params macs
--------------------------------------------------------------------------------
MobilenetV1/Conv2d_0/Conv2D: 864 10,838,016
MobilenetV1/Conv2d_1_depthwise/depthwise: 288 3,612,672
MobilenetV1/Conv2d_1_pointwise/Conv2D: 2,048 25,690,112
MobilenetV1/Conv2d_2_depthwise/depthwise: 576 1,806,336
MobilenetV1/Conv2d_2_pointwise/Conv2D: 8,192 25,690,112
MobilenetV1/Conv2d_3_depthwise/depthwise: 1,152 3,612,672
MobilenetV1/Conv2d_3_pointwise/Conv2D: 16,384 51,380,224
MobilenetV1/Conv2d_4_depthwise/depthwise: 1,152 903,168
MobilenetV1/Conv2d_4_pointwise/Conv2D: 32,768 25,690,112
MobilenetV1/Conv2d_5_depthwise/depthwise: 2,304 1,806,336
MobilenetV1/Conv2d_5_pointwise/Conv2D: 65,536 51,380,224
MobilenetV1/Conv2d_6_depthwise/depthwise: 2,304 451,584
MobilenetV1/Conv2d_6_pointwise/Conv2D: 131,072 25,690,112
MobilenetV1/Conv2d_7_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_7_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_8_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_8_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_9_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_9_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_10_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_10_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_11_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_11_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_12_depthwise/depthwise: 4,608 225,792
MobilenetV1/Conv2d_12_pointwise/Conv2D: 524,288 25,690,112
MobilenetV1/Conv2d_13_depthwise/depthwise: 9,216 451,584
MobilenetV1/Conv2d_13_pointwise/Conv2D: 1,048,576 51,380,224
--------------------------------------------------------------------------------
Total: 3,185,088 567,716,352
75% Mobilenet V1 (base) with input size 128x128:
See mobilenet_v1_075()
Layer params macs
--------------------------------------------------------------------------------
MobilenetV1/Conv2d_0/Conv2D: 648 2,654,208
MobilenetV1/Conv2d_1_depthwise/depthwise: 216 884,736
MobilenetV1/Conv2d_1_pointwise/Conv2D: 1,152 4,718,592
MobilenetV1/Conv2d_2_depthwise/depthwise: 432 442,368
MobilenetV1/Conv2d_2_pointwise/Conv2D: 4,608 4,718,592
MobilenetV1/Conv2d_3_depthwise/depthwise: 864 884,736
MobilenetV1/Conv2d_3_pointwise/Conv2D: 9,216 9,437,184
MobilenetV1/Conv2d_4_depthwise/depthwise: 864 221,184
MobilenetV1/Conv2d_4_pointwise/Conv2D: 18,432 4,718,592
MobilenetV1/Conv2d_5_depthwise/depthwise: 1,728 442,368
MobilenetV1/Conv2d_5_pointwise/Conv2D: 36,864 9,437,184
MobilenetV1/Conv2d_6_depthwise/depthwise: 1,728 110,592
MobilenetV1/Conv2d_6_pointwise/Conv2D: 73,728 4,718,592
MobilenetV1/Conv2d_7_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_7_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_8_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_8_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_9_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_9_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_10_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_10_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_11_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_11_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_12_depthwise/depthwise: 3,456 55,296
MobilenetV1/Conv2d_12_pointwise/Conv2D: 294,912 4,718,592
MobilenetV1/Conv2d_13_depthwise/depthwise: 6,912 110,592
MobilenetV1/Conv2d_13_pointwise/Conv2D: 589,824 9,437,184
--------------------------------------------------------------------------------
Total: 1,800,144 106,002,432
"""
# Tensorflow mandates these.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import functools
import tensorflow as tf
slim = tf.contrib.slim
# Conv and DepthSepConv namedtuple define layers of the MobileNet architecture
# Conv defines 3x3 convolution layers
# DepthSepConv defines 3x3 depthwise convolution followed by 1x1 convolution.
# stride is the stride of the convolution
# depth is the number of channels or filters in a layer
Conv = namedtuple('Conv', ['kernel', 'stride', 'depth'])
DepthSepConv = namedtuple('DepthSepConv', ['kernel', 'stride', 'depth'])
# _CONV_DEFS specifies the MobileNet body
_CONV_DEFS = [
Conv(kernel=[3, 3], stride=2, depth=32),
DepthSepConv(kernel=[3, 3], stride=1, depth=64),
DepthSepConv(kernel=[3, 3], stride=2, depth=128),
DepthSepConv(kernel=[3, 3], stride=1, depth=128),
DepthSepConv(kernel=[3, 3], stride=2, depth=256),
DepthSepConv(kernel=[3, 3], stride=1, depth=256),
DepthSepConv(kernel=[3, 3], stride=2, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=2, depth=1024),
DepthSepConv(kernel=[3, 3], stride=1, depth=1024)
]
def _fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Pads the input such that if it was used in a convolution with 'VALID' padding,
the output would have the same dimensions as if the unpadded input was used
in a convolution with 'SAME' padding.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]],
[pad_beg[1], pad_end[1]], [0, 0]])
return padded_inputs
def mobilenet_v1_base(inputs,
final_endpoint='Conv2d_13_pointwise',
min_depth=8,
depth_multiplier=1.0,
conv_defs=None,
output_stride=None,
use_explicit_padding=False,
scope=None):
"""Mobilenet v1.
Constructs a Mobilenet v1 network from inputs to the given final endpoint.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_0', 'Conv2d_1_pointwise', 'Conv2d_2_pointwise',
'Conv2d_3_pointwise', 'Conv2d_4_pointwise', 'Conv2d_5'_pointwise,
'Conv2d_6_pointwise', 'Conv2d_7_pointwise', 'Conv2d_8_pointwise',
'Conv2d_9_pointwise', 'Conv2d_10_pointwise', 'Conv2d_11_pointwise',
'Conv2d_12_pointwise', 'Conv2d_13_pointwise'].
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
conv_defs: A list of ConvDef namedtuples specifying the net architecture.
output_stride: An integer that specifies the requested ratio of input to
output spatial resolution. If not None, then we invoke atrous convolution
if necessary to prevent the network from reducing the spatial resolution
of the activation maps. Allowed values are 8 (accurate fully convolutional
mode), 16 (fast fully convolutional mode), 32 (classification mode).
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
scope: Optional variable_scope.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0, or the target output_stride is not
allowed.
"""
depth = lambda d: max(int(d * depth_multiplier), min_depth)
end_points = {}
# Used to find thinned depths for each layer.
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
if conv_defs is None:
conv_defs = _CONV_DEFS
if output_stride is not None and output_stride not in [8, 16, 32]:
raise ValueError('Only allowed output_stride values are 8, 16, 32.')
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
with tf.variable_scope(scope, 'MobilenetV1', [inputs]):
with slim.arg_scope([slim.conv2d, slim.separable_conv2d], padding=padding):
# The current_stride variable keeps track of the output stride of the
# activations, i.e., the running product of convolution strides up to the
# current network layer. This allows us to invoke atrous convolution
# whenever applying the next convolution would result in the activations
# having output stride larger than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
net = inputs
for i, conv_def in enumerate(conv_defs):
end_point_base = 'Conv2d_%d' % i
if output_stride is not None and current_stride == output_stride:
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
layer_stride = 1
layer_rate = rate
rate *= conv_def.stride
else:
layer_stride = conv_def.stride
layer_rate = 1
current_stride *= conv_def.stride
if isinstance(conv_def, Conv):
end_point = end_point_base
if use_explicit_padding:
net = _fixed_padding(net, conv_def.kernel)
net = slim.conv2d(net, depth(conv_def.depth), conv_def.kernel,
stride=conv_def.stride,
normalizer_fn=slim.batch_norm,
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
elif isinstance(conv_def, DepthSepConv):
end_point = end_point_base + '_depthwise'
# By passing filters=None
# separable_conv2d produces only a depthwise convolution layer
if use_explicit_padding:
net = _fixed_padding(net, conv_def.kernel, layer_rate)
net = slim.separable_conv2d(net, None, conv_def.kernel,
depth_multiplier=1,
stride=layer_stride,
rate=layer_rate,
normalizer_fn=slim.batch_norm,
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
end_point = end_point_base + '_pointwise'
net = slim.conv2d(net, depth(conv_def.depth), [1, 1],
stride=1,
normalizer_fn=slim.batch_norm,
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
else:
raise ValueError('Unknown convolution type %s for layer %d'
% (conv_def.ltype, i))
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def mobilenet_v1(inputs,
num_classes=1000,
dropout_keep_prob=0.999,
is_training=True,
min_depth=8,
depth_multiplier=1.0,
conv_defs=None,
prediction_fn=tf.contrib.layers.softmax,
spatial_squeeze=True,
reuse=None,
scope='MobilenetV1',
global_pool=False):
"""Mobilenet v1 model for classification.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
dropout_keep_prob: the percentage of activation values that are retained.
is_training: whether is training or not.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
conv_defs: A list of ConvDef namedtuples specifying the net architecture.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
global_pool: Optional boolean flag to control the avgpooling before the
logits layer. If false or unset, pooling is done with a fixed window
that reduces default-sized inputs to 1x1, while larger inputs lead to
larger outputs. If true, any input size is pooled down to 1x1.
Returns:
net: a 2D Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the non-dropped-out input to the logits layer
if num_classes is 0 or None.
end_points: a dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: Input rank is invalid.
"""
input_shape = inputs.get_shape().as_list()
if len(input_shape) != 4:
raise ValueError('Invalid input tensor rank, expected 4, was: %d' %
len(input_shape))
with tf.variable_scope(scope, 'MobilenetV1', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = mobilenet_v1_base(inputs, scope=scope,
min_depth=min_depth,
depth_multiplier=depth_multiplier,
conv_defs=conv_defs)
with tf.variable_scope('Logits'):
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
end_points['global_pool'] = net
else:
# Pooling with a fixed kernel size.
kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7])
net = slim.avg_pool2d(net, kernel_size, padding='VALID',
scope='AvgPool_1a')
end_points['AvgPool_1a'] = net
if not num_classes:
return net, end_points
# 1 x 1 x 1024
net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
if prediction_fn:
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
mobilenet_v1.default_image_size = 32
def wrapped_partial(func, *args, **kwargs):
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
mobilenet_v1_075 = wrapped_partial(mobilenet_v1, depth_multiplier=0.75)
mobilenet_v1_050 = wrapped_partial(mobilenet_v1, depth_multiplier=0.50)
mobilenet_v1_025 = wrapped_partial(mobilenet_v1, depth_multiplier=0.25)
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [min(shape[1], kernel_size[0]),
min(shape[2], kernel_size[1])]
return kernel_size_out
def mobilenet_v1_arg_scope(is_training=True,
weight_decay=0.00004,
stddev=0.09,
regularize_depthwise=False,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001):
"""Defines the default MobilenetV1 arg scope.
Args:
is_training: Whether or not we're training the model.
weight_decay: The weight decay to use for regularizing the model.
stddev: The standard deviation of the trunctated normal weight initializer.
regularize_depthwise: Whether or not apply regularization on depthwise.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
Returns:
An `arg_scope` to use for the mobilenet v1 model.
"""
batch_norm_params = {
'is_training': is_training,
'center': True,
'scale': True,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
}
# Set weight_decay for weights in Conv and DepthSepConv layers.
weights_init = tf.truncated_normal_initializer(stddev=stddev)
regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
if regularize_depthwise:
depthwise_regularizer = regularizer
else:
depthwise_regularizer = None
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
weights_initializer=weights_init,
activation_fn=tf.nn.relu6, normalizer_fn=slim.batch_norm):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
with slim.arg_scope([slim.separable_conv2d],
weights_regularizer=depthwise_regularizer) as sc:
return sc
| [
"[email protected]"
]
| |
11061c8608613ee3897a45acfe68832ba4ec274e | 77900cdd9a815caf1cd04705321ca93f5072179f | /Project2/Project2/.history/blog/admin_20211115153912.py | aabc2adbfaef442cdd6a022985cdf4696abffa70 | []
| no_license | Bom19990111/helloword_python | 717799d994223d65de5adaeabecf396ff2bc1fb7 | 2ee2e67a60043f03c1ce4b070470c7d2dcdc72a7 | refs/heads/master | 2023-09-06T04:17:02.057628 | 2021-11-21T20:00:46 | 2021-11-21T20:00:46 | 407,063,273 | 0 | 1 | null | 2021-11-21T20:00:47 | 2021-09-16T07:18:35 | Python | UTF-8 | Python | false | false | 570 | py | from django.contrib import admin
from .models import Blog
from django import forms
from ckeditor_uploader.widgets import CKEditorUploadingWidget
# Register your models here.
class LessonForm(forms.Mode):
content = forms.CharField(widget=CKEditorUploadingWidget)
class Meta:
model = Blog
fields = '__all__'
class BlogAdmin(admin.ModelAdmin):
forms = LessonForm
list_display = ('title', 'slug', 'status', 'created_on')
list_filter = ('status',)
search_field = ['title', 'content']
admin.site.register(Blog, BlogAdmin)
| [
"[email protected]"
]
| |
2a428d4157e23eb314c57e4cb9f438c38a5c599d | 3bae1ed6460064f997264091aca0f37ac31c1a77 | /apps/cloud_api_generator/generatedServer/tasklets/sso/changeAgentPassword/sso_changeAgentPassword.py | 6e7965e0ea2c880b5f9870c2b34e27563455236e | []
| no_license | racktivity/ext-pylabs-core | 04d96b80ac1942754257d59e91460c3a141f0a32 | 53d349fa6bee0ccead29afd6676979b44c109a61 | refs/heads/master | 2021-01-22T10:33:18.523799 | 2017-06-08T09:09:28 | 2017-06-08T09:09:28 | 54,314,984 | 0 | 0 | null | 2017-06-08T09:09:29 | 2016-03-20T11:55:01 | Python | UTF-8 | Python | false | false | 186 | py | __author__ = 'aserver'
__tags__ = 'sso', 'changeAgentPassword'
__priority__= 3
def main(q, i, params, tags):
params['result'] = ''
def match(q, i, params, tags):
return True
| [
"devnull@localhost"
]
| devnull@localhost |
dc87b8ec1e10aade6ad80e91571d12ee9671758b | c5b062551f2131b4d9d68de44d0eceebb57403d9 | /tests/refresh_token/test_shortcuts.py | 1a69514235e1294ddcef5bd8aed414ebeb26f760 | [
"MIT"
]
| permissive | PedroBern/django-graphql-jwt | e78437257e6d948ba48c32107596742c4e9753b9 | 6e816445b72e7582d0595fda9e7e5d0486026045 | refs/heads/master | 2020-12-05T10:12:20.893450 | 2019-12-05T15:28:42 | 2019-12-05T15:28:42 | 232,077,280 | 1 | 0 | MIT | 2020-01-06T10:28:28 | 2020-01-06T10:28:28 | null | UTF-8 | Python | false | false | 521 | py | from graphql_jwt import shortcuts
from graphql_jwt.exceptions import JSONWebTokenError
from ..testcases import UserTestCase
class ShortcutsTests(UserTestCase):
def test_get_refresh_token(self):
refresh_token = shortcuts.create_refresh_token(self.user)
user = shortcuts.get_refresh_token(refresh_token).user
self.assertEqual(user, self.user)
def test_get_refresh_token_error(self):
with self.assertRaises(JSONWebTokenError):
shortcuts.get_refresh_token('invalid')
| [
"[email protected]"
]
| |
e6293c0f6d02105de2139fc70ed4a725fedec707 | 7c19fbfe632d6fc32b1d2ba4f53aac17f9351483 | /test.py | b312087306712d3d8e304f240172b11d2a06c079 | []
| no_license | ymsk-sky/capture_tube | 3cdea1e0634d6252a8980aa685f963cc3de12518 | bc6e9bb5c88e4b9212c02b249eef8b40f1e5aa24 | refs/heads/master | 2020-12-27T16:27:04.528906 | 2020-04-08T11:08:16 | 2020-04-08T11:08:16 | 237,970,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,158 | py | # -*- coding: utf-8 -*-
import cv2
import pafy
import youtube_dl
def main():
src = 'test.mp4'
video = cv2.VideoCapture(src)
if not video.isOpened():
return
# fpsを取得
fps = int(video.get(cv2.CAP_PROP_FPS))
# 分類器を作成
cascade_file = 'lbpcascade_animeface.xml'
clf = cv2.CascadeClassifier(cascade_file)
# 1フレームごとに処理を行なう
while video.isOpened():
ret, frame = video.read()
if not ret:
break
# グレイスケール→二値化
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 検出
faces = clf.detectMultiScale(gray)
# 描画
for x, y, w, h in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0 ,255), 2)
cv2.imshow('tube', frame)
key = cv2.waitKey(fps) & 0xFF
if key == ord('q'):
break
video.release()
cv2.destroyAllWindows()
def dl(url):
ydl = youtube_dl.YoutubeDL({'outtmple': '%(id)s%(ext)s', 'format': '137'})
with ydl:
result = ydl.extract_info(url, download=True)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
13b88e53752bbfb7de8405c6c5af6b3a53d11bd7 | 3d4a6bd2297ac04b112bc8d24fa1118f692a4e2b | /procon_python/src/atcoder/abc/past/B_044_BeautifulStrings.py | 5bc6369e3357ba32fb294ba56744f3eb855c7957 | []
| no_license | WAT36/procon_work | beba626d9b9c1effded8c9b9f56fbc37abd13636 | 2e6bc42e6f25938afe740682ad7b6c21a0838d42 | refs/heads/master | 2021-08-16T11:56:25.048392 | 2021-06-17T14:13:10 | 2021-06-17T14:13:10 | 162,991,707 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | w = input()
word = {}
l = len(w)
for i in range(l):
n = word.get(w[i], 0)
n = n+1
word[w[i]] = n
flag = True
for i in word.values():
if(i % 2 == 1):
flag = False
break
if(flag):
print("Yes")
else:
print("No")
| [
"[email protected]"
]
| |
1f9d11610567675172e7807ed1c7ec80be28c1f8 | 623f4682aac5a5dca3e59edfb3595e4c5a718933 | /torch/_dynamo/eval_frame.py | 33a59e7ea5197afee65b3e9d250f12a2ac69257d | [
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
]
| permissive | 1div0/pytorch | e06297660a99499ab3f3eb4d1a6e87e6993f5071 | 01b662bafe54dfda561d442015dc512edf8b1564 | refs/heads/master | 2023-09-03T20:10:11.831755 | 2023-09-02T17:37:36 | 2023-09-02T17:37:36 | 149,109,844 | 0 | 0 | null | 2018-09-17T10:39:50 | 2018-09-17T10:39:49 | null | UTF-8 | Python | false | false | 51,992 | py | from __future__ import annotations
import contextlib
import dis
import functools
import inspect
import logging
import os
import sys
import textwrap
import threading
import traceback
import types
import warnings
from enum import Enum
from os.path import dirname, join
from typing import (
Any,
Callable,
Dict,
List,
NamedTuple,
Optional,
Set,
Tuple,
TYPE_CHECKING,
Union,
)
from unittest.mock import patch
import torch
import torch.fx
import torch.utils._pytree as pytree
import torch.utils.checkpoint
from torch import _guards
from torch._subclasses import fake_tensor
from torch.export import Constraint
from torch.fx.experimental.proxy_tensor import make_fx, maybe_disable_fake_tensor_mode
from torch.fx.graph import _PyTreeCodeGen, _PyTreeInfo
from torch.nn.parallel.distributed import DistributedDataParallel
from ..fx import GraphModule
from .backends.registry import CompilerFn, lookup_backend
from .hooks import Hooks
if TYPE_CHECKING:
from torch._C._dynamo.eval_frame import ( # noqa: F401
reset_code,
set_eval_frame,
set_guard_error_hook,
set_guard_fail_hook,
skip_code,
unsupported,
)
else:
for name in dir(torch._C._dynamo.eval_frame):
if name.startswith("__"):
continue
globals()[name] = getattr(torch._C._dynamo.eval_frame, name)
from . import config, convert_frame, external_utils, skipfiles, utils
from .exc import CondOpArgsMismatchError, ResetRequired, UserError, UserErrorType
from .mutation_guard import install_generation_tagging_init
from .types import DynamoCallback
from .utils import compile_times
log = logging.getLogger(__name__)
from torch._dispatch.python import enable_python_dispatcher
from torch.utils._python_dispatch import _disable_current_modes
always_optimize_code_objects = utils.ExactWeakKeyDictionary()
null_context = contextlib.nullcontext
import sympy
from torch.fx.experimental.symbolic_shapes import ConstraintViolationError
# See https://github.com/python/typing/pull/240
class Unset(Enum):
token = 0
unset = Unset.token
compile_lock = threading.RLock()
most_recent_backend: Optional[CompilerFn] = None
DONT_WRAP_FILES = {
# For tracing into fx modules
inspect.getsourcefile(GraphModule),
join(dirname(dirname(__file__)), "onnx/_internal/fx/dynamo_graph_extractor.py"),
}
# This class has a `check_fn` field for the guard,
# and a `code` field for the code object.
CacheEntry = torch._C._dynamo.eval_frame._CacheEntry
def _debug_get_cache_entry_list(
code: Union[types.CodeType, Callable[..., Any]]
) -> List[CacheEntry]: # type: ignore[valid-type]
"""
Given a code object or a callable object, retrieve the cache entries
stored in this code.
"""
if callable(code):
code = code.__code__
cache_head = torch._C._dynamo.eval_frame._debug_get_cache_entry_list(code)
cache_list = []
while cache_head is not None:
cache_list.append(cache_head)
cache_head = cache_head.next
return cache_list
class OptimizedModule(torch.nn.Module):
"""
Wraps the original nn.Module object and later patches its
forward method to optimized self.forward method.
"""
def __init__(self, mod: torch.nn.Module, dynamo_ctx):
super().__init__()
# Installs the params/buffer
self._orig_mod = mod
self.dynamo_ctx = dynamo_ctx
self._initialize()
def _initialize(self):
# Do this stuff in constructor to lower overhead slightly
if isinstance(self._orig_mod.forward, types.MethodType) and skipfiles.check(
inspect.getsourcefile(self._orig_mod.forward)
):
# This may be a torch.nn.* instance in skipfiles.py which
# won't trigger a frame evaluation workaround to add an extra
# frame we can capture
self.forward = self.dynamo_ctx(external_utils.wrap_inline(self._orig_mod))
else:
# Invoke hooks outside of dynamo then pickup the inner frame
self.forward = self.dynamo_ctx(self._orig_mod.__call__)
if hasattr(self._orig_mod, "_initialize_hook"):
self._forward = self.forward
self.forward = self._call_lazy_check
def __getstate__(self):
state = dict(self.__dict__)
state.pop("forward", None)
state.pop("__call__", None)
return state
def __setstate__(self, state):
self.__dict__ = state
self._initialize()
def __getattr__(self, name):
if name == "_orig_mod":
return self._modules["_orig_mod"]
return getattr(self._orig_mod, name)
def _call_lazy_check(self, *args, **kwargs):
if hasattr(self._orig_mod, "_initialize_hook"):
# In the case of a lazy module, we want to run
# the pre-hooks which initialize it.
# Afterwards, lazy module deletes its pre-hooks
# to avoid treating it as lazy on subsequent recompile.
assert len(kwargs) == 0
self._orig_mod._infer_parameters(self._orig_mod, args)
return self._forward(*args, **kwargs)
def __dir__(self):
orig_mod_attrs = self._orig_mod.__dir__()
return orig_mod_attrs + [
attr for attr in super().__dir__() if attr not in orig_mod_attrs
]
def remove_from_cache(f):
"""
Make sure f.__code__ is not cached to force a recompile
"""
if isinstance(f, types.CodeType):
reset_code(f)
elif hasattr(f, "__code__"):
reset_code(f.__code__)
elif hasattr(getattr(f, "forward", None), "__code__"):
reset_code(f.forward.__code__)
else:
from . import reset # type: ignore[attr-defined]
reset()
log.warning("could not determine __code__ for %s", f)
def nothing():
pass
def innermost_fn(fn):
"""
In case of nesting of _TorchDynamoContext calls, find the innermost
function. TorchDynamo caches on fn.__code__ object, so its necessary to find
the innermost function to pass on the optimize, run, disable etc.
"""
unaltered_fn = fn
while hasattr(unaltered_fn, "_torchdynamo_orig_callable"):
unaltered_fn = unaltered_fn._torchdynamo_orig_callable
assert callable(unaltered_fn)
return unaltered_fn
@contextlib.contextmanager
def enable_dynamic(enable: Optional[bool] = None, export: bool = False):
if enable is None:
yield
elif enable:
# Assume everything is dynamic by deafult
with config.patch(assume_static_by_default=False):
yield
else:
with config.patch(
automatic_dynamic_shapes=False, assume_static_by_default=True
):
yield
class _TorchDynamoContext:
def __init__(
self,
callback: DynamoCallback,
on_enter=nothing,
backend_ctx_ctor=null_context,
patch_fn=nothing,
first_ctx=False,
*,
export=False,
dynamic=None,
compiler_config=None,
):
super().__init__()
assert callable(callback) or callback is False or callback is None
self.callback: DynamoCallback = callback
self.prior: Union[Unset, DynamoCallback] = unset
self.on_enter = on_enter
self.extra_ctx_ctor = backend_ctx_ctor
self.first_ctx = first_ctx
self.export = export
self.dynamic = dynamic
self.compiler_config = compiler_config
patch_fn()
def __enter__(self):
if config.raise_on_ctx_manager_usage:
raise RuntimeError(
"torch._dynamo.optimize(...) is used with a context manager. "
"Please refer to https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html "
"to use torch._dynamo.optimize(...) as an annotation/decorator. "
)
self.on_enter()
self.prior = set_eval_frame(self.callback)
self.backend_ctx = self.extra_ctx_ctor()
self.backend_ctx.__enter__()
self.dynamic_ctx = enable_dynamic(self.dynamic, self.export)
self.dynamic_ctx.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
assert self.prior is not unset
set_eval_frame(self.prior)
self.prior = unset
# TODO: This is totally not the right way to chain contexts manually
self.dynamic_ctx.__exit__(exc_type, exc_val, exc_tb)
self.backend_ctx.__exit__(exc_type, exc_val, exc_tb)
def __call__(self, fn):
# public api for compiler config/options
def get_compiler_config():
return self.compiler_config
fn = innermost_fn(fn)
# Optimize the forward method of torch.nn.Module object
if isinstance(fn, torch.nn.Module):
mod = fn
new_mod = OptimizedModule(mod, self)
# Save the function pointer to find the original callable while nesting
# of decorators.
new_mod._torchdynamo_orig_callable = mod.forward
# when compiling torch.nn.Module,
# provide public api OptimizedModule.get_compiler_config()
assert not hasattr(new_mod, "get_compiler_config")
new_mod.get_compiler_config = get_compiler_config # type: ignore[attr-defined]
return new_mod
assert callable(fn)
try:
filename = inspect.getsourcefile(fn)
except TypeError:
filename = None
if (
(filename is None or skipfiles.check(filename))
and (
getattr(fn, "__name__", "") not in ["_call_impl", "_wrapped_call_impl"]
)
and filename not in DONT_WRAP_FILES
):
# call to a builtin without a frame for us to capture
fn = external_utils.wrap_inline(fn)
callback = self.callback
on_enter = self.on_enter
backend_ctx_ctor = self.extra_ctx_ctor
@functools.wraps(fn)
def _fn(*args, **kwargs):
if (
not isinstance(self, DisableContext)
and torch.fx._symbolic_trace.is_fx_tracing()
):
if config.error_on_nested_fx_trace:
raise RuntimeError(
"Detected that you are using FX to symbolically trace "
"a dynamo-optimized function. This is not supported at the moment."
)
else:
return fn(*args, **kwargs)
on_enter()
prior = set_eval_frame(callback)
backend_ctx = backend_ctx_ctor()
backend_ctx.__enter__()
dynamic_ctx = enable_dynamic(self.dynamic, self.export)
dynamic_ctx.__enter__()
try:
return fn(*args, **kwargs)
finally:
set_eval_frame(prior)
dynamic_ctx.__exit__(None, None, None)
backend_ctx.__exit__(None, None, None)
# hooks to properly handle inlining
if isinstance(self, DisableContext):
_fn._torchdynamo_disable = True # type: ignore[attr-defined]
else:
_fn._torchdynamo_inline = fn # type: ignore[attr-defined]
# Save the function pointer to find the original callable while nesting
# of decorators.
_fn._torchdynamo_orig_callable = fn # type: ignore[attr-defined]
# when compiling user function instead of nn.Module
# provide public api _fn.get_compiler_config()
assert not hasattr(_fn, "get_compiler_config")
_fn.get_compiler_config = get_compiler_config # type: ignore[attr-defined]
# If the function is called using torch._dynamo.optimize decorator, we
# should prevent any type of skipping.
if callback not in (None, False):
if not hasattr(fn, "__code__"):
raise RuntimeError(
textwrap.dedent(
"""
torch._dynamo.optimize is called on a non function object.
If this is a callable class, please wrap the relevant code into a function and optimize the
wrapper function.
>> class CallableClass:
>> def __init__(self):
>> super().__init__()
>> self.relu = torch.nn.ReLU()
>>
>> def __call__(self, x):
>> return self.relu(torch.sin(x))
>>
>> def print_hello(self):
>> print("Hello world")
>>
>> mod = CallableClass()
If you want to optimize the __call__ function and other code, wrap that up in a function
>> def wrapper_fn(x):
>> y = mod(x)
>> return y.sum()
and then optimize the wrapper_fn
>> opt_wrapper_fn = torch._dynamo.optimize(wrapper_fn)
"""
)
)
always_optimize_code_objects[fn.__code__] = True
return _fn
class OptimizeContext(_TorchDynamoContext):
@staticmethod
def _different_backend(old, new):
return not (old == new or old is None)
def __init__(
self,
callback,
backend_ctx_ctor,
first_ctx=False,
*,
export=False,
dynamic=None,
compiler_config=None,
):
def on_enter():
global most_recent_backend
if OptimizeContext._different_backend(most_recent_backend, compiler_fn):
if config.raise_on_backend_change:
raise ResetRequired()
else:
warnings.warn(
"changing options to `torch.compile()` may require "
"calling `torch._dynamo.reset()` to take effect"
)
most_recent_backend = compiler_fn
install_generation_tagging_init()
compiler_fn = innermost_fn(callback)
super().__init__(
callback=callback,
on_enter=on_enter,
backend_ctx_ctor=backend_ctx_ctor,
patch_fn=TorchPatcher.patch,
first_ctx=first_ctx,
export=export,
dynamic=dynamic,
compiler_config=compiler_config,
)
class RunOnlyContext(_TorchDynamoContext):
def __init__(self):
# cudagraph trees relies on generation increment
def on_enter():
torch._dynamo.mutation_guard.GenerationTracker.generation += 1
super().__init__(callback=False, on_enter=on_enter)
class DisableContext(_TorchDynamoContext):
def __init__(self):
super().__init__(callback=None)
def first_real_inst_idx(code):
if sys.version_info < (3, 11):
return 0
for inst in dis.get_instructions(code):
if inst.opname == "RESUME":
return inst.offset // 2
raise RuntimeError("RESUME instruction not found in code")
def catch_errors_wrapper(callback, hooks: Hooks):
@functools.wraps(callback)
def catch_errors(frame, cache_entry, frame_state):
assert frame_state is not None
if (
# TODO: the first condition is not covered by any test
frame.f_lasti >= first_real_inst_idx(frame.f_code)
or skipfiles.check(frame.f_code.co_filename)
or config.disable
):
log.debug("skipping %s %s", frame.f_code.co_name, frame.f_code.co_filename)
return None
if frame.f_code.co_filename == "<string>" and frame.f_code.co_name == "__new__":
# nametuple constructor
return None
if config.optimize_ddp:
ddp_module = DistributedDataParallel._get_active_ddp_module()
if ddp_module:
with compile_lock:
from torch._dynamo.backends.distributed import DDPOptimizer
ddp_optimizer = DDPOptimizer(
bucket_bytes_cap=ddp_module.bucket_bytes_cap,
backend_compile_fn=callback._torchdynamo_orig_callable,
)
assert hasattr(
callback, "_clone_with_backend"
), "DDPOptimizer only supports callback fns that know how to clone themselves."
hijacked_callback = callback._clone_with_backend(
ddp_optimizer.compile_fn,
)
return hijacked_callback(frame, cache_entry, hooks, frame_state)
with compile_lock, _disable_current_modes():
return callback(frame, cache_entry, hooks, frame_state)
catch_errors._torchdynamo_orig_callable = callback # type: ignore[attr-defined]
return catch_errors
def _optimize_catch_errors(
compile_fn,
hooks: Hooks,
backend_ctx_ctor=null_context,
export=False,
dynamic=None,
compiler_config=None,
):
return OptimizeContext(
catch_errors_wrapper(compile_fn, hooks),
backend_ctx_ctor=backend_ctx_ctor,
first_ctx=True,
export=export,
dynamic=dynamic,
compiler_config=compiler_config,
)
def get_compiler_fn(compiler_fn):
from .repro.after_dynamo import wrap_backend_debug
if hasattr(compiler_fn, "compiler_name"):
compiler_str = compiler_fn.compiler_name
elif isinstance(compiler_fn, str):
compiler_str = compiler_fn
else:
compiler_str = None
compiler_fn = lookup_backend(compiler_fn)
return wrap_backend_debug(compiler_fn, compiler_str)
class _NullDecorator(contextlib.nullcontext): # type: ignore[type-arg]
def __call__(self, fn):
assert callable(fn)
return fn
def check_if_dynamo_supported():
if sys.platform == "win32":
raise RuntimeError("Windows not yet supported for torch.compile")
if sys.version_info >= (3, 12):
raise RuntimeError("Python 3.12+ not yet supported for torch.compile")
def is_dynamo_supported():
try:
check_if_dynamo_supported()
return True
except Exception:
return False
def optimize(
backend="inductor",
*,
nopython=False,
guard_export_fn=None,
guard_fail_fn=None,
disable=False,
dynamic=None,
):
"""
The main entrypoint of TorchDynamo. Do graph capture and call
backend() to optimize extracted graphs.
Args:
backend: One of the two things:
- Either, a function/callable taking a torch.fx.GraphModule and
example_inputs and returning a python callable that runs the
graph faster.
One can also provide additional context for the backend, like
torch.jit.fuser("fuser2"), by setting the backend_ctx_ctor attribute.
See AOTAutogradMemoryEfficientFusionWithContext for the usage.
- Or, a string backend name in `torch._dynamo.list_backends()`
nopython: If True, graph breaks will be errors and there will
be a single whole-program graph.
disable: If True, turn this decorator into a no-op
dynamic: If True, upfront compile as dynamic a kernel as possible. If False,
disable all dynamic shapes support (always specialize). If None, automatically
detect when sizes vary and generate dynamic kernels upon recompile.
Example Usage::
@torch._dynamo.optimize()
def toy_example(a, b):
...
"""
check_if_dynamo_supported()
# Note: The hooks object could be global instead of passed around, *however* that would make
# for a confusing API usage and plumbing story wherein we nest multiple .optimize calls.
# There is some prior art around this, w/r/t nesting backend calls are enforced to be the same
# compiler, however, this feels onerous for callback and hooks, and it feels better to give our users an
# easier to understand UX at the cost of a little more plumbing on our end.
hooks = Hooks(guard_export_fn=guard_export_fn, guard_fail_fn=guard_fail_fn)
torch._C._log_api_usage_once("torch._dynamo.optimize")
if disable or os.environ.get("TORCHDYNAMO_DISABLE", "") == "1":
return _NullDecorator()
backend = get_compiler_fn(backend)
# Find if backend has any extra context manager
backend_ctx_ctor = getattr(backend, "backend_ctx_ctor", null_context)
if nopython:
return optimize_assert(
backend,
dynamic=dynamic,
hooks=hooks,
)
return _optimize_catch_errors(
convert_frame.convert_frame(backend, hooks=hooks),
hooks,
backend_ctx_ctor,
dynamic=dynamic,
compiler_config=backend.get_compiler_config()
if hasattr(backend, "get_compiler_config")
else None,
)
# TODO(voz): Consider making "explain" output alongside a run / part of a run
@patch("torch._dynamo.symbolic_convert.explain", True)
def explain(f, *extra_args, **extra_kwargs):
def inner(*args, **kwargs):
# TODO(voz): Do we want a decorator for this?
from . import reset # type: ignore[attr-defined]
reset()
graphs: List[torch.fx.GraphModule] = []
break_reasons: List[Any] = []
op_count: int = 0
ops_per_graph: List[torch.fx.Node] = []
out_guards: List[_guards.Guard] = []
def dynamo_graph_accumulating_compiler(
gm: torch.fx.GraphModule, example_inputs
):
from .backends.debugging import _explain_graph_detail
nonlocal graphs
nonlocal op_count
nonlocal ops_per_graph
nonlocal break_reasons
gm, graphs, op_count, ops_per_graph, break_reasons = _explain_graph_detail(
gm, graphs, op_count, ops_per_graph, break_reasons
)
return gm.forward
def guard_export_print(guards):
nonlocal out_guards
out_guards.extend(guards)
with patch(f"{__name__}.most_recent_backend", None):
opt_f = optimize(
dynamo_graph_accumulating_compiler,
nopython=False,
guard_export_fn=guard_export_print,
)(f)
# TODO(voz): We may have instances of `f` that mutate inputs, we should track sideffects and reject.
opt_f(*args, **kwargs)
graph_count = len(graphs)
# For the explanation summary, dedupe reasons by the innermost stack frame and dedupe by it.
deduped_reasons = {}
for reason in break_reasons:
innermost_frame = reason.user_stack[-1]
# __repr__ uniquely identifies a FrameSummary so we can use it for deduping
deduped_reasons[repr(innermost_frame)] = reason
formatted_list = ""
for idx, break_reason in enumerate(deduped_reasons.values()):
formatted_stack = "".join(traceback.format_list(break_reason.user_stack))
msg = f"{idx + 1}. Reason: {break_reason.reason}\n User Stack: {formatted_stack}\n"
formatted_list += msg
graph_break_count = graph_count - 1
compile_time = compile_times(repr="str")
# TODO(voz): Do we want a decorator for this?
reset()
from .backends.debugging import ExplainOutput
return ExplainOutput(
graphs,
graph_count,
graph_break_count,
break_reasons,
op_count,
ops_per_graph,
out_guards,
compile_time,
)
if extra_args or extra_kwargs:
warnings.warn(
"explain(f, *args, **kwargs) is deprecated, use explain(f)(*args, **kwargs) instead. "
"If you don't migrate, we may break your explain call in the future if your user defined kwargs "
"conflict with future kwargs added to explain(f)."
)
return inner(*extra_args, **extra_kwargs)
else:
return inner
class FlattenInputOutputSignature(torch.fx.interpreter.Transformer):
def __init__(
self,
m: torch.fx.GraphModule,
flat_args: Tuple[Any],
matched_input_elements_positions: List[int],
matched_output_elements_positions: List[int],
example_fake_inputs: List[torch.Tensor],
fake_mode: Optional[fake_tensor.FakeTensorMode] = None,
):
super().__init__(m)
matched_input_elements_to_fake = {
val: example_fake_inputs[ix]
for ix, val in enumerate(matched_input_elements_positions)
}
self.new_args = []
for i in range(0, len(flat_args)):
arg = super().placeholder(f"arg{i}", (), {})
if i in matched_input_elements_to_fake:
arg.node.meta["val"] = matched_input_elements_to_fake[i]
else:
# Fill node.mata["val"] with faketensor from the input,
# if it's not found in matched_input_elements_positions
if fake_mode is not None and isinstance(flat_args[i], torch.Tensor):
arg.node.meta["val"] = fake_mode.from_tensor(flat_args[i])
self.new_args.append(arg)
self.old_args_gen = (self.new_args[i] for i in matched_input_elements_positions)
self.matched_output_elements_positions = matched_output_elements_positions
def placeholder(self, target, args, kwargs):
arg = next(self.old_args_gen)
if "val" in self.current_node.meta:
arg.node.meta["val"] = self.current_node.meta["val"]
if "tensor_dict" in self.current_node.meta:
arg.node.meta["tensor_dict"] = self.current_node.meta["tensor_dict"]
return arg
def output(self, target, args, kwargs):
dynamo_result_flat = args[0]
lookup = [*dynamo_result_flat, *self.new_args]
new_result_flat = [lookup[i] for i in self.matched_output_elements_positions]
return super().output(target, (new_result_flat,), {})
def run_node(self, n):
self.current_node = n
r = super().run_node(n)
if "val" in self.current_node.meta:
r.node.meta["val"] = self.current_node.meta["val"]
return r
class ExportResult(NamedTuple):
graph_module: torch.fx.GraphModule
guards: Set[_guards.Guard]
# NB: Do not add new fields without overriding __iter__; people are
# destructuring so it is BC-breaking
def check_signature_rewritable(graph):
input_errors = []
for node in graph.graph.nodes:
if node.op == "placeholder":
assert hasattr(node, "_dynamo_source")
source = node._dynamo_source
user_stacks = graph._source_to_user_stacks.get(source)
if user_stacks is None:
continue
assert len(user_stacks) > 0
# In some cases we may not have a useful stack. Look for a
# useful stack
stack = None
for s in user_stacks:
if len(s) == 0:
continue
stack = s
break
if stack is None:
msg = f"{source.name()}, a closed over free variable"
else:
tb = "".join(traceback.format_list(stack))
extra = ""
if len(user_stacks) > 1:
extra = f"(elided {len(user_stacks)-1} more accesses)"
msg = f"{source.name()}, accessed at:\n{tb}{extra}"
# TODO: option to print ALL of the stack traces at once
input_errors.append(msg)
if input_errors:
raise UserError(
UserErrorType.INVALID_INPUT,
"Cannot export model which references tensors that are neither "
"buffers/parameters/constants nor are direct inputs. For each tensor, if you'd "
"like this tensor to be an explicit input, add it as a dummy argument "
"to the top-level model definition you are exporting; if you would "
"like its value to be embedded as an exported constant, wrap its access "
"in a function marked with @assume_constant_result.\n\n"
+ "\n\n".join(input_errors),
)
def rewrite_signature(
f_sig,
graph,
fake_mode,
flat_args,
in_spec,
example_fake_inputs,
graph_captured_input,
graph_captured_output,
dynamo_traced_result,
):
orig_args, orig_kwargs = pytree.tree_unflatten(flat_args, in_spec)
def produce_matching(sources, candidates):
source_types = " or ".join(
[
desc + " (" + ", ".join([str(type(arg)) for arg in args]) + ")"
for desc, args in sources.items()
]
)
source_args = [arg for args in sources.values() for arg in args]
matched_elements_positions = []
dict_of_source_args = dict()
for i, arg in enumerate(source_args):
dict_of_source_args[id(arg)] = i
for candidate_desc, candidate_args in candidates.items():
for i, arg in enumerate(candidate_args):
# 1-element tensor arg can be unspec int/float
if isinstance(arg, torch.Tensor) and torch.numel(arg) == 1:
if id(arg) in dict_of_source_args:
matched_elements_positions.append(dict_of_source_args[id(arg)])
elif id(arg.item()) in dict_of_source_args:
matched_elements_positions.append(
dict_of_source_args[id(arg.item())]
)
else:
raise AssertionError(
f"{candidate_desc} #{i} ({type(arg)}) is not among {source_types}"
)
else:
if id(arg) not in dict_of_source_args:
raise AssertionError(
f"{candidate_desc} #{i} ({type(arg)}) is not among {source_types}"
)
matched_elements_positions.append(dict_of_source_args[id(arg)])
return matched_elements_positions
matched_input_elements_positions = produce_matching(
sources={"original args": flat_args},
candidates={"graph-captured input": graph_captured_input},
)
flat_results_traced, out_spec_traced = pytree.tree_flatten(dynamo_traced_result)
assert graph_captured_output is not None
matched_output_elements_positions = produce_matching(
sources={
"graph-captured outputs": list(graph_captured_output),
"original args": flat_args,
},
candidates={"traced result": flat_results_traced},
)
new_graph = FlattenInputOutputSignature(
graph,
flat_args,
matched_input_elements_positions,
matched_output_elements_positions,
example_fake_inputs,
fake_mode,
).transform()
# Make dynamo graph to have same input/output spec as user code
def argument_names(f_sig, args, kwargs) -> List[str]:
def signature_to_fullargspec(sig: inspect.Signature):
# Get a list of Parameter objects from the Signature object
params = list(sig.parameters.values())
# Separate positional arguments, keyword-only arguments and varargs/varkw
args = [
p.name
for p in params
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
kwonlyargs = [
p.name for p in params if p.kind == inspect.Parameter.KEYWORD_ONLY
]
varargs = next(
(p.name for p in params if p.kind == inspect.Parameter.VAR_POSITIONAL),
None,
)
varkw = next(
(p.name for p in params if p.kind == inspect.Parameter.VAR_KEYWORD),
None,
)
# Get default values for positional arguments and keyword-only arguments
defaults = tuple(
p.default
for p in params
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
and p.default is not inspect.Parameter.empty
)
kwonlydefaults = {
p.name: p.default
for p in params
if p.kind == inspect.Parameter.KEYWORD_ONLY
and p.default is not inspect.Parameter.empty
}
# Get annotations for parameters and return value
annotations = {}
if sig.return_annotation:
annotations = {"return": sig.return_annotation}
for parameter in params:
annotations[parameter.name] = parameter.annotation
# Return a FullArgSpec object with the extracted attributes
return inspect.FullArgSpec(
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations
)
fullargspec = signature_to_fullargspec(f_sig)
# 1. Map `args` 1-to-1 to positional arguments in original signature.
input_strs = fullargspec.args[: len(args)]
if len(args) > len(fullargspec.args):
# 2. If there are more arguments left in `args`, they map to varargs in original
# signature. Assign names as {varargs}_0, {varargs}_1, ...
assert fullargspec.varargs is not None, "More arguments than expected"
input_strs += [
f"{fullargspec.varargs}_{i}"
for i in range(0, len(args) - len(input_strs))
]
elif len(args) < len(fullargspec.args):
# 3. If there are fewer arguments in `args` than `fullargspec.args`,
# it implies these are arguments either with default values, or provided in
# `kwargs`. The former can be safely ignored. Because Dynamo.export does not
# export them as part of the function signature. The latter will be handled
# in the next step.
for unprovided_arg in fullargspec.args[
len(args) : -len(fullargspec.defaults or [])
]:
assert unprovided_arg in kwargs, f"Missing argument {unprovided_arg}"
# 4. Keyword arguments provided in `kwargs`.
input_strs += list(kwargs.keys())
# 5. Keyword-only arguments with default values if not provided are not exported
# as part of the function signature.
for kwonly_arg in fullargspec.kwonlyargs:
kwonlydefaults = fullargspec.kwonlydefaults or {}
assert (
kwonly_arg in kwargs or kwonly_arg in kwonlydefaults
), f"Missing keyword only argument {kwonly_arg}"
return input_strs
new_graph.graph._codegen = _PyTreeCodeGen(
_PyTreeInfo(
argument_names(f_sig, orig_args, orig_kwargs),
in_spec,
out_spec_traced,
)
)
new_graph.recompile()
return new_graph
def export(
f: Callable[..., Any],
*extra_args,
aten_graph: bool = False,
pre_dispatch: bool = False,
decomposition_table: Optional[
Dict[torch._ops.OpOverload, Callable[..., Any]]
] = None,
tracing_mode: str = "symbolic",
constraints: Optional[List[Constraint]] = None,
assume_static_by_default: bool = False,
same_signature: bool = True,
**extra_kwargs,
) -> Callable[..., ExportResult]:
"""
Export an input function f to a format that can be executed outside of PyTorch using the FX graph.
Args:
f (callable): A PyTorch function to be exported.
aten_graph (bool): If True, exports a graph with ATen operators.
If False, exports a graph with Python operators. Default is False.
pre_dispatch (bool): If True, exports a graph with ATen operators,
but before any logic in the PyTorch dispatcher has run.
This can be useful if you want to apply further transformations on a graph before running it
through autograd, autocast, or any other functionalities that are integrated into the dispatcher.
This flag is only valid if aten_graph=True is set.
Default is False.
decomposition_table (dict): A dictionary that maps operators to their decomposition functions.
Required if aten_graph or tracing_mode is specified. Default is None.
tracing_mode (str): If "symbolic", turn on dynamic shapes support. Default is "symbolic".
same_signature (bool): If True, rewrite the returned graph's signature to be the same as f.
Returns:
A function that given args and kwargs, returns a tuple of (graph, guards)
Graph: An FX graph representing the execution of the input PyTorch function with the provided arguments and options.
Guards: The guards we accumulated during tracing f above
Raises:
AssertionError: If decomposition_table is specified without setting aten_graph=True,
or if graph breaks during tracing in export.
AssertionError: If Dynamo input and output is not consistent with traced input/output.
Note - this headerdoc was authored by ChatGPT, with slight modifications by the author.
"""
# Deal with "local variable referenced before assignment"
_f = f
_assume_static_by_default = assume_static_by_default
def inner(*args, **kwargs):
f = _f
assume_static_by_default = _assume_static_by_default
check_if_dynamo_supported()
torch._C._log_api_usage_once("torch._dynamo.export")
if decomposition_table is not None:
assert (
aten_graph
), "Specifying a decomposition_table table or tracing mode is illegal without setting aten_graph=True"
if pre_dispatch:
assert aten_graph, "pre_dispatch=True can only be used when aten_graph=True"
f = innermost_fn(f)
call_to_inspect = f.forward if isinstance(f, torch.nn.Module) else f
original_signature = inspect.signature(call_to_inspect)
graph = None
out_guards = None
graph_captured_input = None
graph_captured_result: Optional[Tuple[torch.Tensor, ...]] = None
fake_mode = None
def guard_export_print(guards: Set[_guards.Guard]):
nonlocal out_guards
assert (
out_guards is None
), "whole graph export entails exactly one guard export"
out_guards = guards
example_inputs = []
def dynamo_normalization_capturing_compiler(
gm: torch.fx.GraphModule, inner_example_inputs
):
nonlocal graph
assert (
graph is None
), "Tried to emit a second graph during export. Tracing through 'f' must produce a single graph."
graph = gm
nonlocal fake_mode, example_inputs
# NB: do NOT pass inner_example_inputs here, we are detecting the
# Dynamo allocated fake mode, which should be DISTINCT from a
# potential outer ambient fake mode which the user provided.
# example_inputs is always the user specified inputs, so they
# would have the wrong fake mode attached to them
fake_mode = _guards.detect_fake_mode()
example_inputs = inner_example_inputs
def result_capturing_wrapper(*graph_inputs):
nonlocal graph_captured_result
nonlocal graph_captured_input
graph_captured_input = graph_inputs
assert graph is not None
named_parameters = dict(graph.named_parameters(remove_duplicate=False))
named_buffers = dict(graph.named_buffers(remove_duplicate=False))
ambient_fake_mode = (
_guards.detect_fake_mode(graph_inputs)
if _guards.detect_fake_mode(graph_inputs) is not None
else fake_mode
)
with ambient_fake_mode, enable_python_dispatcher():
params_and_buffers = {
**dict(named_parameters),
**dict(named_buffers),
}
fake_params_buffers = dict()
for name, value in params_and_buffers.items():
fake_params_buffers[name] = ambient_fake_mode.from_tensor(
value, static_shapes=True
)
fake_graph_inputs = pytree.tree_map(
ambient_fake_mode.from_tensor, graph_inputs
)
graph_captured_result = torch.func.functional_call(
graph, fake_params_buffers, fake_graph_inputs
)
return graph_captured_result
return result_capturing_wrapper
# Note: This is needed by rewrite_signature. We need to put it before
# optimize_assert since user program may mutate the inputs.
flat_args, in_spec = pytree.tree_flatten((args, kwargs))
remove_from_cache(f)
constraint_violation_error = None
if tracing_mode != "symbolic":
assume_static_by_default = True
with patch(f"{__name__}.most_recent_backend", None), config.patch(
specialize_int=True,
assume_static_by_default=assume_static_by_default,
automatic_dynamic_shapes=False,
capture_dynamic_output_shape_ops=True,
capture_scalar_outputs=True,
):
opt_f = optimize_assert(
dynamo_normalization_capturing_compiler,
hooks=Hooks(
guard_export_fn=guard_export_print,
guard_fail_fn=None,
),
export=True,
export_constraints=constraints,
)(f)
# TODO(voz): We may have instances of `f` that mutate inputs, we should track sideffects and reject.
try:
result_traced = opt_f(*args, **kwargs)
except ConstraintViolationError as e:
constraint_violation_error = e
remove_from_cache(f)
if (
(shape_env := getattr(fake_mode, "shape_env", None)) is not None
and (dim_constraints := shape_env.dim_constraints) is not None
and not skipfiles.check(inspect.getsourcefile(call_to_inspect))
):
dim_constraints.solve()
dim_constraints.remove_redundant_dynamic_results()
msg = dim_constraints.prettify_results(original_signature)
forced_specializations = dim_constraints.forced_specializations()
if forced_specializations:
msg = (
"Some dynamic dimensions need to be specialized because "
"the constraints inferred for them are too complex to specify.\n"
f"{forced_specializations}\n{msg}"
)
if constraint_violation_error:
constraint_violation_error.args = (
constraint_violation_error.args[0] + msg,
)
else:
if forced_specializations:
constraint_violation_error = ConstraintViolationError(msg)
else:
log.info(
"Summary of dimension constraints:%s",
msg,
)
# Error if we have any constraints on static values
for k in shape_env.var_to_range.keys():
if isinstance(k, sympy.Integer):
constraint_violation_error = ConstraintViolationError(
f"{''.join(traceback.format_list(shape_env.var_to_stack[k]))}\n"
"It appears that you're trying to set a constraint on a "
f"value which we evaluated to have a static value of {k}. "
"Scroll up to see where this constraint was set."
)
if constraint_violation_error:
raise constraint_violation_error
assert (
graph is not None
), "Failed to produce a graph during tracing. Tracing through 'f' must produce a single graph."
assert hasattr(graph, "_source_to_user_stacks")
assert out_guards is not None, "Failed to produce guards during tracing"
assert fake_mode is not None
# This check need to happend before aten_graph
# because placeholder's _source_node attribute is not preserved by make_fx
if same_signature:
check_signature_rewritable(graph)
# NB: This is mostly hitting the cache; Dynamo already converted these
example_fake_inputs = [fake_mode.from_tensor(t) for t in example_inputs]
if aten_graph:
# Running graph with interpreter is needed for propagating the stack_trace
def graph_with_interpreter(*args):
with torch.fx.traceback.preserve_node_meta():
return torch.fx.Interpreter(graph).run(*args)
with maybe_disable_fake_tensor_mode(), enable_python_dispatcher(), (
fake_mode
):
try:
graph = make_fx(
graph_with_interpreter,
decomposition_table=decomposition_table,
tracing_mode="real",
_allow_non_fake_inputs=True,
pre_dispatch=pre_dispatch,
_allow_fake_constant=False,
)(*example_fake_inputs)
except CondOpArgsMismatchError as e:
# Wrap the internal error to the user-facing error
raise UserError(UserErrorType.DYNAMIC_CONTROL_FLOW, str(e))
if same_signature:
graph = rewrite_signature(
original_signature,
graph,
fake_mode,
flat_args,
in_spec,
example_fake_inputs,
graph_captured_input,
graph_captured_result,
result_traced,
)
# Store constraints and inputs as metadata for user passes, e.g. turn constraints to runtime check
graph.meta["input_shape_constraints"] = (
[constraint.serializable_spec for constraint in constraints]
if constraints
else []
)
return ExportResult(graph, out_guards)
if extra_args or extra_kwargs:
warnings.warn(
"export(f, *args, **kwargs) is deprecated, use export(f)(*args, **kwargs) instead. "
"If you don't migrate, we may break your export call in the future if your user defined kwargs "
"conflict with future kwargs added to export(f)."
)
return inner(*extra_args, **extra_kwargs)
else:
return inner
def optimize_assert(
backend,
*,
hooks=Hooks(None, None),
export=False,
export_constraints=None,
dynamic=None,
):
"""
The same as `torch._dynamo.optimize(backend, nopython=True)`
"""
backend = get_compiler_fn(backend)
# Find if backend has any extra context manager
backend_ctx_ctor = getattr(backend, "backend_ctx_ctor", null_context)
return _optimize_catch_errors(
convert_frame.convert_frame_assert(
backend, export=export, export_constraints=export_constraints
),
hooks,
backend_ctx_ctor,
export=export,
dynamic=dynamic,
)
class TorchPatcher:
@staticmethod
@functools.lru_cache(None)
def patch():
# A better way to disable the following would be decorate the source
# functions with @torch._disable_dynamo. However, this causes issues
# with torch.deploy internally.
from .decorators import disable
torch.jit.trace = disable(torch.jit.trace)
torch.jit.trace_module = disable(torch.jit.trace_module)
torch.jit._get_trace_graph = disable(torch.jit._get_trace_graph)
torch.fx._symbolic_trace.Tracer.trace = disable(
torch.fx._symbolic_trace.Tracer.trace
)
torch.distributions.Distribution.set_default_validate_args(False)
from ..optim import (
adadelta,
adagrad,
adam,
adamax,
adamw,
asgd,
lbfgs,
nadam,
radam,
rmsprop,
rprop,
sgd,
sparse_adam,
)
optimizer_modules = {
adadelta,
adagrad,
adam,
adamax,
adamw,
asgd,
lbfgs,
nadam,
radam,
rmsprop,
rprop,
sgd,
sparse_adam,
}
disabled_multi_tensor_opt_modules = {
adamax,
nadam,
radam, # data-dependent control flow
sgd, # for now, until we can speed up compilation (this affects the benchmarks)
}
for opt_mod in optimizer_modules:
opt_name = opt_mod.__name__.split(".")[-1]
multi_tensor_fn_name = f"_multi_tensor_{opt_name}"
fused_fn_name = f"_fused_{opt_name}"
if (
hasattr(opt_mod, multi_tensor_fn_name)
and opt_mod in disabled_multi_tensor_opt_modules
):
setattr(
opt_mod,
multi_tensor_fn_name,
disable(getattr(opt_mod, multi_tensor_fn_name)),
)
if hasattr(opt_mod, fused_fn_name):
setattr(
opt_mod, fused_fn_name, disable(getattr(opt_mod, fused_fn_name))
)
optimizer_classes = [
opt
for opt in torch.optim.__dict__.values()
if inspect.isclass(opt) and issubclass(opt, torch.optim.Optimizer)
]
# Note: we don't support sparsity, data-dependent control, or tracing through backwards
excluded_optimizer_classes = {
torch.optim.SparseAdam,
torch.optim.RAdam,
torch.optim.LBFGS,
}
for opt in optimizer_classes:
if opt in excluded_optimizer_classes:
opt.step = disable(opt.step)
if hasattr(opt, "_init_group"):
opt._init_group = disable(opt._init_group)
# disable any currently set hooks
# Note: we only want to disable the profiling hook
# which is the *last* hook applied, we want to keep the no_grad hook
hooked = getattr(opt.step, "hooked", False)
if hooked:
unwrapped_step = getattr(opt.step, "__wrapped__", None)
if unwrapped_step:
opt.step = unwrapped_step
# disable future hooking
opt.step.hooked = True
torch._dynamo.variables.lists._register_dynamo_list_to_tree_spec()
torch._dynamo.variables.lists._register_dynamo_tuple_to_tree_spec()
torch._dynamo.variables.dicts._register_dynamo_dict_to_tree_spec()
@staticmethod
def suppress_torch_distributed_warnings(fn):
def inner_fn(*args, **kwargs):
warnings.filterwarnings(
"ignore", category=UserWarning, module="torch.distributed"
)
return fn(*args, **kwargs)
return inner_fn
| [
"[email protected]"
]
| |
3c2e5aaa02755fa348d33bdb44613ba9ceabf258 | 07c4c39a3f43aa41327702329cddf555ac489f0e | /.PyCharmCE2019.1/system/python_stubs/1499390420/typed_ast/_ast27/Import.py | bcab2971d027f20d058802751dc4c2363981afe7 | []
| no_license | shirotakoki/teran | a2ba42c2d2c605c7421b35dc1dfa5f51baec0fd7 | 13c5e8d7484d148c3490726aa860d5a10165381b | refs/heads/master | 2023-02-04T21:18:51.829188 | 2020-09-27T08:26:54 | 2020-09-27T08:26:54 | 323,551,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | # encoding: utf-8
# module typed_ast._ast27
# from C:\Users\teran\AppData\Roaming\Python\Python37\site-packages\typed_ast\_ast27.cp37-win_amd64.pyd
# by generator 1.147
# no doc
# no imports
from .stmt import stmt
class Import(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'names',
)
| [
"[email protected]"
]
| |
bb4f37c64f211df4f9a73f5b49800a64ada951c9 | f925499f896b012624118cfafd02fef76ff5075a | /src/testcase/GN_Y201J/input_case/GN_Y201J_Over_Day.py | 227028df3fa47b789517261bde3cd7c1da6899a7 | [
"Apache-2.0"
]
| permissive | maiyajj/AutoTest_script-Appium_Connect | f7c06db1d2f58682d1a9d6f534f7dd5fb65d766d | f9c2c42c281a9e2f984acb4a72dda0694b053f22 | HEAD | 2019-07-26T01:39:48.413753 | 2018-04-11T02:11:38 | 2018-04-11T02:11:38 | 112,449,369 | 30 | 22 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | # coding=utf-8
try:
from src.testcase.GN_Y201J.case.GN_Y201J_OVER_DAY.GN_Y201J_OVER_DAY_001 import *
from src.testcase.GN_Y201J.case.GN_Y201J_OVER_DAY.GN_Y201J_OVER_DAY_002 import *
from src.testcase.GN_Y201J.case.GN_Y201J_OVER_DAY.GN_Y201J_OVER_DAY_003 import *
from src.testcase.GN_Y201J.case.GN_Y201J_OVER_DAY.GN_Y201J_OVER_DAY_004 import *
from src.testcase.GN_Y201J.case.GN_Y201J_OVER_DAY.GN_Y201J_OVER_DAY_005 import *
from src.testcase.GN_Y201J.case.GN_Y201J_OVER_DAY.GN_Y201J_OVER_DAY_006 import *
from src.testcase.GN_Y201J.case.GN_Y201J_OVER_DAY.GN_Y201J_OVER_DAY_007 import *
except ImportError as e:
print(e)
| [
"[email protected]"
]
| |
fb01fad045d376f95d097d9da88e8099c7a6cfdd | 36bdbbf1be53ba5f09b9a2b1dd15e91f8f6b0da1 | /restaurants/migrations/0062_auto_20181226_0610.py | 3d00e07850b262bdc620a65cc97e2a5a2c9e49af | []
| no_license | phufoxy/fotourNew | 801ab2518424118020dc6e5f31a7ba90a654e56a | 6048c24f5256c8c5a0d18dc7b38c106a7c92a29c | refs/heads/master | 2023-04-13T01:34:22.510717 | 2018-12-26T03:46:09 | 2018-12-26T03:46:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | # Generated by Django 2.1.4 on 2018-12-26 06:10
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('restaurants', '0061_auto_20181226_0609'),
]
operations = [
migrations.AlterField(
model_name='comment_restaurant',
name='date',
field=models.DateTimeField(default=datetime.datetime(2018, 12, 26, 6, 9, 58, 878970)),
),
]
| [
"[email protected]"
]
| |
1e1c0f99075f01def2a23ac3fa4b1465b418cc2a | e44c83395d2ddd1e1b7c1e521d360f2ef8d585d0 | /gitlab-new/landchina/landchina.py | 8b8fd2fccd158e28a5533e0137c4282c437ec99c | []
| no_license | zhoujx4/Crawls | 63ebcac5b4c0bbccdde56e6a2f5efbc4091d03e0 | 94b3ac88d7e49cb4a03e7b211a9437709d1c371c | refs/heads/master | 2020-12-23T15:25:48.041965 | 2020-01-30T10:35:19 | 2020-01-30T10:35:19 | 237,189,197 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,121 | py | import fitz
import datetime
import time
from time import sleep
import re
import os
import sys
import csv
import socket
import random
from urllib import parse
from collections.abc import Iterable
from collections.abc import Mapping
from PIL import Image
from landchina.settings import settings
sys.path.append("..")
from library.commonmethodclass import CommonMethodClass
class LandchinaSpider(object):
"""
爬取https://www.landchina.com/default.aspx?tabid=263页面
备注:
1、20190808采用chrome webdriver爬取失败(失败的现象是在webdriver驱动的
浏览器内可以输入“广东省”等关键词;但是点击“查询”以后加载1秒钟以后就终止
加载了。)
2、20190812采用scrapy爬取,没有写完代码就放弃了。决心使用下面第3种方法爬取
3、采用图像识别和抓包工具配合爬取;使用了C++, Python, 和JScript;基本实现
无人值守。
"""
name = "landchina"
now = None
today = None
settings = None
root_path = None
log_dir = None
main_log_file_path = None
debug = False
crawled_dir = None
html_dir = None
output_folder_name = None
input_folder_name = None
base_uri = None
browser = None
tabid_list = None
input_keyword_dict = None
list_csv_file_path = None
wait_time = None
missed_url_file_name = ""
input_box_dict = {
263: "TAB_QuerySubmitConditionData",
226: "TAB_queryTblEnumItem_75",
}
keyword_english = {}
replace_list = ["市本级", "市", "县", "区" ]
def __init__(self ):
self.init_self_attributes( )
def init_self_attributes(self):
self.now = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
self.today = datetime.datetime.now().strftime("%Y%m%d")
self.settings = settings
self.root_path = self.settings.get( name="PROJECT_PATH", default="" )
self.log_dir = self.settings.get( name="LOG_DIR", default="" )
self.main_log_file_path = os.path.join( self.log_dir, self.settings.get( name="MAIN_LOG_FILE_NAME", default="" ) )
self.debug = self.settings.get( name = "PROJECT_DEBUG", default=False )
self.crawled_dir = self.settings.get( name="CRAWLED_DIR", default = "" )
self.html_dir = self.settings.get( name="HTML_DIR", default = "" )
self.output_folder_name = self.settings.get( name = "OUTPUT_FOLDER_NAME", default="" )
self.input_folder_name = self.settings.get( name = "INPUT_FOLDER_NAME", default="" )
self.base_uri = self.settings.get( name = "BASE_URI", default="" )
self.browser = self.settings.get( name = "BROWSER", default="" )
self.tabid_list = self.settings.get( name = "TABID_LIST", default="" )
self.input_keyword_dict = self.settings.get( name = "INPUT_KEYWORD_DICT", default="" )
self.list_csv_file_path = os.path.join( self.crawled_dir, f"landchina_list_{self.today}.csv" )
self.wait_time = 2 if self.debug else 3
self.maximal_requests = self.settings.get( name = "MAXIMAL_REQUESTS", default=50 )
self.missed_url_file_name = self.settings.get( name = "MISSED_URL_FILE_NAME", default="" )
self.keyword_english = self.settings.get( name = "KEYWORD_ENGLISH", default={} )
def make_uri_list(self):
url_list = []
for one_id in self.tabid_list:
url_list.append( f"{self.base_uri}?tabid={one_id}" )
return url_list
def send_keywords(self):
"""
revision: 20190813
"""
url_list = self.make_uri_list()
log_file_path = os.path.join( self.log_dir, self.missed_url_file_name )
for index, one_url in enumerate(url_list):
tabid = self.tabid_list[ index ]
keyword_list = self.input_keyword_dict[tabid]
input_box_xpath = self.input_box_dict[tabid]
for keyword in keyword_list:
keyword_en = self.keyword_english[keyword] if keyword in self.keyword_english.keys() else keyword
def parse_one_index_page_response_field(self, webdriver = None ):
info_list = []
if webdriver is None:
return info_list
tr_list = webdriver.find_elements_by_xpath( "//table[@id='TAB_contentTable']/tbody/tr[not(@class='gridHeader')]" )
for one_tr in tr_list:
td_list = one_tr.find_elements_by_xpath("./td")
value_list = []
this_row_dict = {}
link = ""
for one_td in td_list:
value_list.append( one_td.text )
link_a = self.get_element( webdriver = one_td, xpath = "./a", elements_bool = False, use_id = False )
if link_a is not None and 1 > len(link):
link = link_a.get_attribute("href")
if 4 == len( value_list ):
this_row_dict["序号"] = value_list[0].replace(".", "")
this_row_dict["行政区代码"] = value_list[1]
this_row_dict["标题"] = value_list[2]
this_row_dict["发布时间"] = value_list[3]
this_row_dict["detailed_url"] = link
info_list.append(this_row_dict)
return info_list
def execute(self):
if type(self.tabid_list) not in [list] or type(self.input_keyword_dict) not in [dict] or 1 > len( self.tabid_list ):
error_msg = f"self.tabid_list or self.input_keyword_dict is NOT correct: {self.tabid_list}, {self.input_keyword_dict}"
content = f"Inside Method {sys._getframe().f_code.co_name} of Class {self.__class__.__name__}, {error_msg}"
CommonMethodClass.write_log( content = content, log_file_path = self.main_log_file_path )
return False
for one_category in self.tabid_list:
if one_category not in self.input_keyword_dict.keys():
error_msg = f"{one_category} is NOT in {self.input_keyword_dict.keys()}"
content = f"Inside Method {sys._getframe().f_code.co_name} of Class {self.__class__.__name__}, {error_msg}"
CommonMethodClass.write_log( content = content, log_file_path = self.main_log_file_path )
return False
counter = self.do_requests( )
content = f"At {self.now}, {counter} requests have been sent"
CommonMethodClass.write_log( content = content, log_file_path = self.main_log_file_path )
def test(self):
path = self.whereis_chromedriver()
print( path )
# print( self.district_name_dict )
# district_list = ["南澳县", "佛山市本级", "连南瑶族自治县", "梅州市本级", "雷州市", ]
# self.check_district_names( district_list = district_list, keyword = "广东省" )
if __name__=='__main__':
app = LandchinaSpider( )
# app.test()
app.execute()
| [
"[email protected]"
]
| |
00a0f660b96836d3c0823a3c10c5289c90c74ab4 | 82ef9a0dd1618a28770597227acfc0150b948af2 | /wearnow/plugins/sidebar/categorysidebar.py | a878520c4e392dca2b4a5ca6025627ff95349c32 | []
| no_license | bmcage/wearnow | ef32a7848472e79e56763b38551835aa97864b21 | c8dfa75e1ea32b0c021d71c4f366ab47104c207e | refs/heads/master | 2021-01-16T00:27:59.597812 | 2016-01-19T11:55:03 | 2016-01-19T11:55:03 | 37,195,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,347 | py | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2005-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2010 Nick Hall
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# GNOME modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from wearnow.tex.config import config
from wearnow.gui.basesidebar import BaseSidebar
from wearnow.gui.viewmanager import get_available_views, views_to_show
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
UICATEGORY = '''<ui>
<toolbar name="ToolBar">
<placeholder name="ViewsInCategory">%s
</placeholder>
</toolbar>
</ui>
'''
#-------------------------------------------------------------------------
#
# CategorySidebar class
#
#-------------------------------------------------------------------------
class CategorySidebar(BaseSidebar):
"""
A sidebar displaying a column of toggle buttons that allows the user to
change the current view.
"""
def __init__(self, dbstate, uistate, categories, views):
self.viewmanager = uistate.viewmanager
self.buttons = []
self.button_handlers = []
self.ui_category = {}
self.merge_ids = []
self.window = Gtk.ScrolledWindow()
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.window.add(vbox)
self.window.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
self.window.show()
use_text = config.get('interface.sidebar-text')
for cat_num, cat_name, cat_icon in categories:
# create the button and add it to the sidebar
button = self.__make_sidebar_button(use_text, cat_num,
cat_name, cat_icon)
vbox.pack_start(button, False, True, 0)
# Enable view switching during DnD
button.drag_dest_set(0, [], 0)
button.connect('drag_motion', self.cb_switch_page_on_dnd, cat_num)
# toollbar buttons for switching views in a category
uitoolitems = ''
for view_num, view_name, view_icon in views[cat_num]:
pageid = 'page_%i_%i' % (cat_num, view_num)
uitoolitems += '\n<toolitem action="%s"/>' % pageid
if len(views[cat_num]) > 1:
self.ui_category[cat_num] = UICATEGORY % uitoolitems
vbox.show_all()
def get_top(self):
"""
Return the top container widget for the GUI.
"""
return self.window
def view_changed(self, cat_num, view_num):
"""
Called when the active view is changed.
"""
# Add buttons to the toolbar for the different view in the category
uimanager = self.viewmanager.uimanager
list(map(uimanager.remove_ui, self.merge_ids))
if cat_num in self.ui_category:
mergeid = uimanager.add_ui_from_string(self.ui_category[cat_num])
self.merge_ids.append(mergeid)
# Set new button as selected
self.__handlers_block()
for index, button in enumerate(self.buttons):
if index == cat_num:
button.set_active(True)
else:
button.set_active(False)
self.__handlers_unblock()
def __handlers_block(self):
"""
Block signals to the buttons to prevent spurious events.
"""
for idx in range(len(self.buttons)):
self.buttons[idx].handler_block(self.button_handlers[idx])
def __handlers_unblock(self):
"""
Unblock signals to the buttons.
"""
for idx in range(len(self.buttons)):
self.buttons[idx].handler_unblock(self.button_handlers[idx])
def cb_view_clicked(self, radioaction, current, cat_num):
"""
Called when a button causes a view change.
"""
view_num = radioaction.get_current_value()
self.viewmanager.goto_page(cat_num, view_num)
def __category_clicked(self, button, cat_num):
"""
Called when a button causes a category change.
"""
# Make the button active. If it was already active the category will
# not change.
button.set_active(True)
self.viewmanager.goto_page(cat_num, None)
def __make_sidebar_button(self, use_text, index, page_title, page_stock):
"""
Create the sidebar button. The page_title is the text associated with
the button.
"""
# create the button
button = Gtk.ToggleButton()
button.set_relief(Gtk.ReliefStyle.NONE)
self.buttons.append(button)
# add the tooltip
button.set_tooltip_text(page_title)
# connect the signal, along with the index as user data
handler_id = button.connect('clicked', self.__category_clicked, index)
self.button_handlers.append(handler_id)
button.show()
# add the image. If we are using text, use the BUTTON (larger) size.
# otherwise, use the smaller size
hbox = Gtk.Box()
hbox.show()
image = Gtk.Image()
if use_text:
image.set_from_icon_name(page_stock, Gtk.IconSize.BUTTON)
else:
image.set_from_icon_name(page_stock, Gtk.IconSize.DND)
image.show()
hbox.pack_start(image, False, False, 0)
hbox.set_spacing(4)
# add text if requested
if use_text:
label = Gtk.Label(label=page_title)
label.show()
hbox.pack_start(label, False, True, 0)
button.add(hbox)
return button
def cb_switch_page_on_dnd(self, widget, context, xpos, ypos, time, page_no):
"""
Switches the page based on drag and drop.
"""
self.__handlers_block()
if self.viewmanager.notebook.get_current_page() != page_no:
self.viewmanager.notebook.set_current_page(page_no)
self.__handlers_unblock()
def inactive(self):
"""
Called when the sidebar is hidden.
"""
uimanager = self.viewmanager.uimanager
list(map(uimanager.remove_ui, self.merge_ids))
| [
"[email protected]"
]
| |
989d249c6266a0283ffbc88c2776a2ecdb833eca | c25f64f43660d5a5065327f258f3e7348d93e438 | /asyncio_redis/encoders.py | 68a493e85b54bf349b6f1c61466fd292929b58c3 | [
"BSD-2-Clause-Views"
]
| permissive | jonathanslenders/asyncio-redis | 96735d4270453eaa8435e8e39b5c536abb1a7d86 | 50d71a53798967f7fdf1be36b8447e322dedc5ee | refs/heads/master | 2022-04-06T16:06:50.671959 | 2020-06-12T21:18:37 | 2020-08-11T13:56:51 | 13,547,040 | 495 | 83 | NOASSERTION | 2021-01-11T13:44:37 | 2013-10-13T21:31:36 | Python | UTF-8 | Python | false | false | 2,095 | py | """
The redis protocol only knows about bytes, but we like to have strings inside
Python. This file contains some helper classes for decoding the bytes to
strings and encoding the other way around. We also have a `BytesEncoder`, which
provides raw access to the redis server.
"""
__all__ = ("BaseEncoder", "BytesEncoder", "UTF8Encoder")
class BaseEncoder:
"""
Abstract base class for all encoders.
"""
#: The native Python type from which we encode, or to which we decode.
native_type = None
def encode_from_native(self, data):
"""
Encodes the native Python type to network bytes.
Usually this will encode a string object to bytes using the UTF-8
encoding. You can either override this function, or set the
`encoding` attribute.
"""
raise NotImplementedError
def decode_to_native(self, data):
"""
Decodes network bytes to a Python native type.
It should always be the reverse operation of `encode_from_native`.
"""
raise NotImplementedError
class BytesEncoder(BaseEncoder):
"""
For raw access to the Redis database.
"""
#: The native Python type from which we encode, or to which we decode.
native_type = bytes
def encode_from_native(self, data):
return data
def decode_to_native(self, data):
return data
class StringEncoder(BaseEncoder):
"""
Abstract base class for all string encoding encoders.
"""
#: Redis keeps all values in binary. Set the encoding to be used to
#: decode/encode Python string values from and to binary.
encoding = None
#: The native Python type from which we encode, or to which we decode.
native_type = str
def encode_from_native(self, data):
""" string to bytes """
return data.encode(self.encoding)
def decode_to_native(self, data):
""" bytes to string """
return data.decode(self.encoding)
class UTF8Encoder(StringEncoder):
"""
Encode strings to and from utf-8 bytes.
"""
encoding = "utf-8"
| [
"[email protected]"
]
| |
a794664d21b2877b6ebe81762c14babbf6aa8140 | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/cloud/shell/v1/shell-v1-py/scripts/fixup_shell_v1_keywords.py | c4b2aa4ae9e1c4e9409492197cc740fab3accbec | [
"Apache-2.0"
]
| permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,211 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class shellCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'add_public_key': ('environment', 'key', ),
'authorize_environment': ('name', 'access_token', 'id_token', 'expire_time', ),
'get_environment': ('name', ),
'remove_public_key': ('environment', 'key', ),
'start_environment': ('name', 'access_token', 'public_keys', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: not a.keyword.value in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=shellCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the shell client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
]
| bazel-bot-development[bot]@users.noreply.github.com |
15855cb736dc27f1def8b56d28bc287b6cac6fdf | 1207c58fa92dad30050b9f3bcc1173d7e7034c73 | /train_mnist/train.py | 3259594eb13f1d915ccdf55d49383f85fcb36d13 | []
| no_license | chagge/rethinking-generalization | b49cf59c8d4d2c3607fa2074a80f86d8e682150c | 317c1ae29ae119d7399e8e04e95eb903f4d1c045 | refs/heads/master | 2021-01-22T09:04:55.449746 | 2017-02-12T16:41:09 | 2017-02-12T16:41:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,745 | py | import numpy as np
import os, sys, time, math
from chainer import cuda
from chainer import functions as F
import pandas as pd
sys.path.append(os.path.split(os.getcwd())[0])
import dataset
from progress import Progress
from mnist_tools import load_train_images, load_test_images
from model import model
from args import args
def compute_accuracy(image_batch, label_batch):
num_data = image_batch.shape[0]
images_l_segments = np.split(image_batch, num_data // 500)
label_ids_l_segments = np.split(label_batch, num_data // 500)
sum_accuracy = 0
for image_batch, label_batch in zip(images_l_segments, label_ids_l_segments):
distribution = model.discriminate(image_batch, apply_softmax=True, test=True)
accuracy = F.accuracy(distribution, model.to_variable(label_batch))
sum_accuracy += float(accuracy.data)
return sum_accuracy / len(images_l_segments)
def main():
# load MNIST images
images, labels = dataset.load_train_images()
# config
config = model.config
# settings
max_epoch = 1000
num_trains_per_epoch = 500
num_validation_data = 10000
batchsize = 128
# seed
np.random.seed(args.seed)
if args.gpu_device != -1:
cuda.cupy.random.seed(args.seed)
# save validation accuracy per epoch
csv_results = []
# create semi-supervised split
training_images, training_labels, validation_images, validation_labels = dataset.split_data(images, labels, num_validation_data, seed=args.seed)
# training
progress = Progress()
for epoch in xrange(1, max_epoch):
progress.start_epoch(epoch, max_epoch)
sum_loss = 0
for t in xrange(num_trains_per_epoch):
# sample from data distribution
image_batch, label_batch = dataset.sample_data(training_images, training_labels, batchsize, binarize=False)
distribution = model.discriminate(image_batch, apply_softmax=False)
loss = F.softmax_cross_entropy(distribution, model.to_variable(label_batch))
sum_loss += float(loss.data)
model.backprop(loss)
if t % 10 == 0:
progress.show(t, num_trains_per_epoch, {})
model.save(args.model_dir)
train_accuracy = compute_accuracy(training_images, training_labels)
validation_accuracy = compute_accuracy(validation_images, validation_labels)
progress.show(num_trains_per_epoch, num_trains_per_epoch, {
"loss": sum_loss / num_trains_per_epoch,
"accuracy (validation)": validation_accuracy,
"accuracy (train)": train_accuracy,
})
# write accuracy to csv
csv_results.append([epoch, validation_accuracy, progress.get_total_time()])
data = pd.DataFrame(csv_results)
data.columns = ["epoch", "accuracy", "min"]
data.to_csv("{}/result.csv".format(args.model_dir))
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
056345d4c6d7aad110c67c6acea795d071247950 | 7d7d37a66e970af3cc0beca3babba9ef18f8d7c1 | /Searching Algorithms/LinearSearch.py | 42f93963971baac2bab579ee83d77359c8443b40 | [
"MIT"
]
| permissive | DrakeEntity/DataStructureAndAlgorithm | 762e4d01024252754c1308e642803cccaa461fb0 | 9c942217e1a31f143e739682680c12f67d717ee3 | refs/heads/master | 2022-04-21T02:25:39.318888 | 2020-04-24T15:32:33 | 2020-04-24T15:32:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | class Search:
""" True : If searching objet found in list it will return True
False : If Searching object not found in list it will return False
"""
def __init__(self,list,search_for):
self.list = list
self.search_for = search_for
def __len__(self):
return len(self.list)
def linear_search(self):
"""
In this type of search, a sequential search is made over all items one by one. Every Item is checked.
If a match is found then that particular item is returned,
otherwise the search continue till the end of the
data-strucutre
"""
search_at = 0
search_res = False
# match the value with each data point
while search_at < len(self.list) and search_res is False:
if self.list[search_at] == self.search_for:
search_res = True
else:
search_at = search_at + 1
print(f'{search_res}')
l = [4,534,646,3,6,6,33,6,34,643,32,4,43,6]
result = Search(l,5)
result.linear_search()
| [
"[email protected]"
]
| |
764cdd64ee9f866e42d940df2f06f450d0e88fd7 | f889bc01147869459c0a516382e7b95221295a7b | /test/test_catalog_data_custom_option_interface.py | ca5ca581745279b4da16c3436707fa9b39887277 | []
| no_license | wildatheart/magento2-api-client | 249a86f5c0289743f8df5b0324ccabd76f326512 | e6a707f85b37c6c3e4ef3ff78507a7deb8f71427 | refs/heads/master | 2021-07-14T16:01:17.644472 | 2017-10-18T13:33:08 | 2017-10-18T13:33:08 | 107,412,121 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,085 | py | # coding: utf-8
"""
Magento Community
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.catalog_data_custom_option_interface import CatalogDataCustomOptionInterface
class TestCatalogDataCustomOptionInterface(unittest.TestCase):
""" CatalogDataCustomOptionInterface unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testCatalogDataCustomOptionInterface(self):
"""
Test CatalogDataCustomOptionInterface
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.catalog_data_custom_option_interface.CatalogDataCustomOptionInterface()
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
7a5bc213722fdefb013d9c11de37ab21381a8ff8 | bb150497a05203a718fb3630941231be9e3b6a32 | /framework/e2e/jit/test_Softsign_base.py | 206436de22ef7510496f8c5810e52b215c72787a | []
| no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 623 | py | #!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
test jit cases
"""
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.getcwd())))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "utils"))
from utils.yaml_loader import YamlLoader
from jittrans import JitTrans
yaml_path = os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "yaml", "nn.yml")
yml = YamlLoader(yaml_path)
def test_Softsign_base():
"""test Softsign_base"""
jit_case = JitTrans(case=yml.get_case_info("Softsign_base"))
jit_case.jit_run()
| [
"[email protected]"
]
| |
179ce3c3fb1728e12287ef4537916aafa8484c19 | 09a6d8dbad5b92f93791948b5bf9b75f5cb2e5ce | /pennylane/qchem/structure.py | 1d9fe6bfe7f3dbbe62dadcec32ca9482ab05c3dd | [
"Apache-2.0"
]
| permissive | PennyLaneAI/pennylane | 458efd5d9457e90ada31ca2ef0fb6bb96a24e9a7 | 0843183ff15a013c2622af5e61fea431d18076d3 | refs/heads/master | 2023-09-03T17:00:43.105784 | 2023-09-01T16:15:07 | 2023-09-01T16:15:07 | 129,936,360 | 1,431 | 410 | Apache-2.0 | 2023-09-14T21:30:56 | 2018-04-17T16:45:42 | Python | UTF-8 | Python | false | false | 21,010 | py | # Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains functions to read the structure of molecules, build a Hartree-Fock state,
build an active space and generate single and double excitations.
"""
# pylint: disable=too-many-locals
import os
import re
from shutil import copyfile
from pennylane import numpy as np
# Bohr-Angstrom correlation coefficient (https://physics.nist.gov/cgi-bin/cuu/Value?bohrrada0)
bohr_angs = 0.529177210903
def read_structure(filepath, outpath="."):
r"""Read the structure of the polyatomic system from a file and returns
a list with the symbols of the atoms in the molecule and a 1D array
with their positions :math:`[x_1, y_1, z_1, x_2, y_2, z_2, \dots]` in
atomic units (Bohr radius = 1).
The atomic coordinates in the file must be in Angstroms.
The `xyz <https://en.wikipedia.org/wiki/XYZ_file_format>`_ format is supported. Additionally,
the new file ``structure.xyz``, containing the input geometry, is created in a directory with
path given by ``outpath``.
Args:
filepath (str): name of the molecular structure file in the working directory
or the absolute path to the file if it is located in a different folder
outpath (str): path to the output directory
Returns:
tuple[list, array]: symbols of the atoms in the molecule and a 1D array with their
positions in atomic units.
**Example**
>>> symbols, coordinates = read_structure('h2.xyz')
>>> print(symbols, coordinates)
['H', 'H'] [0. 0. -0.66140414 0. 0. 0.66140414]
"""
file_in = filepath.strip()
file_out = os.path.join(outpath, "structure.xyz")
copyfile(file_in, file_out)
symbols = []
coordinates = []
with open(file_out, encoding="utf-8") as f:
for line in f.readlines()[2:]:
symbol, x, y, z = line.split()
symbols.append(symbol)
coordinates.append(float(x))
coordinates.append(float(y))
coordinates.append(float(z))
return symbols, np.array(coordinates) / bohr_angs
def active_space(electrons, orbitals, mult=1, active_electrons=None, active_orbitals=None):
r"""Build the active space for a given number of active electrons and active orbitals.
Post-Hartree-Fock (HF) electron correlation methods expand the many-body wave function
as a linear combination of Slater determinants, commonly referred to as configurations.
This configurations are generated by exciting electrons from the occupied to the
unoccupied HF orbitals as sketched in the figure below. Since the number of configurations
increases combinatorially with the number of electrons and orbitals this expansion can be
truncated by defining an active space.
The active space is created by classifying the HF orbitals as core, active and
external orbitals:
- Core orbitals are always occupied by two electrons
- Active orbitals can be occupied by zero, one, or two electrons
- The external orbitals are never occupied
|
.. figure:: ../../_static/qchem/sketch_active_space.png
:align: center
:width: 50%
|
.. note::
The number of active *spin*-orbitals ``2*active_orbitals`` determines the number of
qubits required to perform the quantum simulations of the electronic structure
of the many-electron system.
Args:
electrons (int): total number of electrons
orbitals (int): total number of orbitals
mult (int): Spin multiplicity :math:`\mathrm{mult}=N_\mathrm{unpaired} + 1` for
:math:`N_\mathrm{unpaired}` unpaired electrons occupying the HF orbitals.
Possible values for ``mult`` are :math:`1, 2, 3, \ldots`. If not specified,
a closed-shell HF state is assumed.
active_electrons (int): Number of active electrons. If not specified, all electrons
are treated as active.
active_orbitals (int): Number of active orbitals. If not specified, all orbitals
are treated as active.
Returns:
tuple: lists of indices for core and active orbitals
**Example**
>>> electrons = 4
>>> orbitals = 4
>>> core, active = active_space(electrons, orbitals, active_electrons=2, active_orbitals=2)
>>> print(core) # core orbitals
[0]
>>> print(active) # active orbitals
[1, 2]
"""
# pylint: disable=too-many-branches
if active_electrons is None:
ncore_orbs = 0
core = []
else:
if active_electrons <= 0:
raise ValueError(
f"The number of active electrons ({active_electrons}) " f"has to be greater than 0."
)
if active_electrons > electrons:
raise ValueError(
f"The number of active electrons ({active_electrons}) "
f"can not be greater than the total "
f"number of electrons ({electrons})."
)
if active_electrons < mult - 1:
raise ValueError(
f"For a reference state with multiplicity {mult}, "
f"the number of active electrons ({active_electrons}) should be "
f"greater than or equal to {mult - 1}."
)
if mult % 2 == 1:
if active_electrons % 2 != 0:
raise ValueError(
f"For a reference state with multiplicity {mult}, "
f"the number of active electrons ({active_electrons}) should be even."
)
else:
if active_electrons % 2 != 1:
raise ValueError(
f"For a reference state with multiplicity {mult}, "
f"the number of active electrons ({active_electrons}) should be odd."
)
ncore_orbs = (electrons - active_electrons) // 2
core = list(range(ncore_orbs))
if active_orbitals is None:
active = list(range(ncore_orbs, orbitals))
else:
if active_orbitals <= 0:
raise ValueError(
f"The number of active orbitals ({active_orbitals}) " f"has to be greater than 0."
)
if ncore_orbs + active_orbitals > orbitals:
raise ValueError(
f"The number of core ({ncore_orbs}) + active orbitals ({active_orbitals}) cannot "
f"be greater than the total number of orbitals ({orbitals})"
)
homo = (electrons + mult - 1) / 2
if ncore_orbs + active_orbitals <= homo:
raise ValueError(
f"For n_active_orbitals={active_orbitals}, there are no virtual orbitals "
f"in the active space."
)
active = list(range(ncore_orbs, ncore_orbs + active_orbitals))
return core, active
def excitations(electrons, orbitals, delta_sz=0):
r"""Generate single and double excitations from a Hartree-Fock reference state.
Single and double excitations can be generated by acting with the operators
:math:`\hat T_1` and :math:`\hat T_2` on the Hartree-Fock reference state:
.. math::
&& \hat{T}_1 = \sum_{r \in \mathrm{occ} \\ p \in \mathrm{unocc}}
\hat{c}_p^\dagger \hat{c}_r \\
&& \hat{T}_2 = \sum_{r>s \in \mathrm{occ} \\ p>q \in
\mathrm{unocc}} \hat{c}_p^\dagger \hat{c}_q^\dagger \hat{c}_r \hat{c}_s.
In the equations above the indices :math:`r, s` and :math:`p, q` run over the
occupied (occ) and unoccupied (unocc) *spin* orbitals and :math:`\hat c` and
:math:`\hat c^\dagger` are the electron annihilation and creation operators,
respectively.
|
.. figure:: ../../_static/qchem/sd_excitations.png
:align: center
:width: 80%
|
Args:
electrons (int): Number of electrons. If an active space is defined, this
is the number of active electrons.
orbitals (int): Number of *spin* orbitals. If an active space is defined,
this is the number of active spin-orbitals.
delta_sz (int): Specifies the selection rules ``sz[p] - sz[r] = delta_sz`` and
``sz[p] + sz[p] - sz[r] - sz[s] = delta_sz`` for the spin-projection ``sz`` of
the orbitals involved in the single and double excitations, respectively.
``delta_sz`` can take the values :math:`0`, :math:`\pm 1` and :math:`\pm 2`.
Returns:
tuple(list, list): lists with the indices of the spin orbitals involved in the
single and double excitations
**Example**
>>> electrons = 2
>>> orbitals = 4
>>> singles, doubles = excitations(electrons, orbitals)
>>> print(singles)
[[0, 2], [1, 3]]
>>> print(doubles)
[[0, 1, 2, 3]]
"""
if not electrons > 0:
raise ValueError(
f"The number of active electrons has to be greater than 0 \n"
f"Got n_electrons = {electrons}"
)
if orbitals <= electrons:
raise ValueError(
f"The number of active spin-orbitals ({orbitals}) "
f"has to be greater than the number of active electrons ({electrons})."
)
if delta_sz not in (0, 1, -1, 2, -2):
raise ValueError(
f"Expected values for 'delta_sz' are 0, +/- 1 and +/- 2 but got ({delta_sz})."
)
# define the spin projection 'sz' of the single-particle states
sz = np.array([0.5 if (i % 2 == 0) else -0.5 for i in range(orbitals)])
singles = [
[r, p]
for r in range(electrons)
for p in range(electrons, orbitals)
if sz[p] - sz[r] == delta_sz
]
doubles = [
[s, r, q, p]
for s in range(electrons - 1)
for r in range(s + 1, electrons)
for q in range(electrons, orbitals - 1)
for p in range(q + 1, orbitals)
if (sz[p] + sz[q] - sz[r] - sz[s]) == delta_sz
]
return singles, doubles
def hf_state(electrons, orbitals):
r"""Generate the occupation-number vector representing the Hartree-Fock state.
The many-particle wave function in the Hartree-Fock (HF) approximation is a `Slater determinant
<https://en.wikipedia.org/wiki/Slater_determinant>`_. In Fock space, a Slater determinant
for :math:`N` electrons is represented by the occupation-number vector:
.. math::
\vert {\bf n} \rangle = \vert n_1, n_2, \dots, n_\mathrm{orbs} \rangle,
n_i = \left\lbrace \begin{array}{ll} 1 & i \leq N \\ 0 & i > N \end{array} \right.,
where :math:`n_i` indicates the occupation of the :math:`i`-th orbital.
Args:
electrons (int): Number of electrons. If an active space is defined, this
is the number of active electrons.
orbitals (int): Number of *spin* orbitals. If an active space is defined,
this is the number of active spin-orbitals.
Returns:
array: NumPy array containing the vector :math:`\vert {\bf n} \rangle`
**Example**
>>> state = hf_state(2, 6)
>>> print(state)
[1 1 0 0 0 0]
"""
if electrons <= 0:
raise ValueError(
f"The number of active electrons has to be larger than zero; "
f"got 'electrons' = {electrons}"
)
if electrons > orbitals:
raise ValueError(
f"The number of active orbitals cannot be smaller than the number of active electrons;"
f" got 'orbitals'={orbitals} < 'electrons'={electrons}"
)
state = np.where(np.arange(orbitals) < electrons, 1, 0)
return np.array(state)
def excitations_to_wires(singles, doubles, wires=None):
r"""Map the indices representing the single and double excitations
generated with the function :func:`~.excitations` to the wires that
the Unitary Coupled-Cluster (UCCSD) template will act on.
Args:
singles (list[list[int]]): list with the indices ``r``, ``p`` of the two qubits
representing the single excitation
:math:`\vert r, p \rangle = \hat{c}_p^\dagger \hat{c}_r \vert \mathrm{HF}\rangle`
doubles (list[list[int]]): list with the indices ``s``, ``r``, ``q``, ``p`` of the four
qubits representing the double excitation
:math:`\vert s, r, q, p \rangle = \hat{c}_p^\dagger \hat{c}_q^\dagger
\hat{c}_r \hat{c}_s \vert \mathrm{HF}\rangle`
wires (Iterable[Any]): Wires of the quantum device. If None, will use consecutive wires.
The indices :math:`r, s` and :math:`p, q` in these lists correspond, respectively, to the
occupied and virtual orbitals involved in the generated single and double excitations.
Returns:
tuple[list[list[Any]], list[list[list[Any]]]]: lists with the sequence of wires,
resulting from the single and double excitations, that the Unitary Coupled-Cluster
(UCCSD) template will act on.
**Example**
>>> singles = [[0, 2], [1, 3]]
>>> doubles = [[0, 1, 2, 3]]
>>> singles_wires, doubles_wires = excitations_to_wires(singles, doubles)
>>> print(singles_wires)
[[0, 1, 2], [1, 2, 3]]
>>> print(doubles_wires)
[[[0, 1], [2, 3]]]
>>> wires=['a0', 'b1', 'c2', 'd3']
>>> singles_wires, doubles_wires = excitations_to_wires(singles, doubles, wires=wires)
>>> print(singles_wires)
[['a0', 'b1', 'c2'], ['b1', 'c2', 'd3']]
>>> print(doubles_wires)
[[['a0', 'b1'], ['c2', 'd3']]]
"""
if (not singles) and (not doubles):
raise ValueError(
f"'singles' and 'doubles' lists can not be both empty; "
f"got singles = {singles}, doubles = {doubles}"
)
expected_shape = (2,)
for single_ in singles:
if np.array(single_).shape != expected_shape:
raise ValueError(
f"Expected entries of 'singles' to be of shape (2,); got {np.array(single_).shape}"
)
expected_shape = (4,)
for double_ in doubles:
if np.array(double_).shape != expected_shape:
raise ValueError(
f"Expected entries of 'doubles' to be of shape (4,); got {np.array(double_).shape}"
)
max_idx = 0
if singles:
max_idx = np.max(singles)
if doubles:
max_idx = max(np.max(doubles), max_idx)
if wires is None:
wires = range(max_idx + 1)
elif len(wires) != max_idx + 1:
raise ValueError(f"Expected number of wires is {max_idx + 1}; got {len(wires)}")
singles_wires = []
for r, p in singles:
s_wires = [wires[i] for i in range(r, p + 1)]
singles_wires.append(s_wires)
doubles_wires = []
for s, r, q, p in doubles:
d1_wires = [wires[i] for i in range(s, r + 1)]
d2_wires = [wires[i] for i in range(q, p + 1)]
doubles_wires.append([d1_wires, d2_wires])
return singles_wires, doubles_wires
def mol_data(identifier, identifier_type="name"):
r"""Obtain symbols and geometry of a compound from the PubChem Database.
The `PubChem <https://pubchem.ncbi.nlm.nih.gov>`__ database is one of the largest public
repositories for information on chemical substances from which symbols and geometry can be
retrieved for a compound by its name, SMILES, InChI, InChIKey, or PubChem Compound ID (CID) to
build a molecule object for Hartree-Fock calculations. The retrieved atomic coordinates will be
converted to `atomic units <https://en.wikipedia.org/wiki/Bohr_radius>`__ for consistency.
Args:
identifier (str or int): compound's identifier as required by the PubChem database
identifier_type (str): type of the provided identifier - name, CAS, CID, SMILES, InChI, InChIKey
Returns:
Tuple(list[str], array[float]): symbols and geometry (in Bohr radius) of the compound
**Example**
>>> mol_data("BeH2")
(['Be', 'H', 'H'],
tensor([[ 4.79404621, 0.29290755, 0. ],
[ 3.77945225, -0.29290755, 0. ],
[ 5.80882913, -0.29290755, 0. ]], requires_grad=True))
>>> mol_data(223, "CID")
(['N', 'H', 'H', 'H', 'H'],
tensor([[ 0. , 0. , 0. ],
[ 1.82264085, 0.52836742, 0.40402345],
[ 0.01417295, -1.67429735, -0.98038991],
[-0.98927163, -0.22714508, 1.65369933],
[-0.84773114, 1.373075 , -1.07733286]], requires_grad=True))
.. details::
``mol_data`` can also be used with other chemical identifiers - CAS, SMILES, InChI, InChIKey:
>>> mol_data("74-82-8", "CAS")
(['C', 'H', 'H', 'H', 'H'],
tensor([[ 0. , 0. , 0. ],
[ 1.04709725, 1.51102501, 0.93824902],
[ 1.29124986, -1.53710323, -0.47923455],
[-1.47058487, -0.70581271, 1.26460472],
[-0.86795121, 0.7320799 , -1.7236192 ]], requires_grad=True))
>>> mol_data("[C]", "SMILES")
(['C', 'H', 'H', 'H', 'H'],
tensor([[ 0. , 0. , 0. ],
[ 1.04709725, 1.51102501, 0.93824902],
[ 1.29124986, -1.53710323, -0.47923455],
[-1.47058487, -0.70581271, 1.26460472],
[-0.86795121, 0.7320799 , -1.7236192 ]], requires_grad=True))
>>> mol_data("InChI=1S/CH4/h1H4", "InChI")
(['C', 'H', 'H', 'H', 'H'],
tensor([[ 0. , 0. , 0. ],
[ 1.04709725, 1.51102501, 0.93824902],
[ 1.29124986, -1.53710323, -0.47923455],
[-1.47058487, -0.70581271, 1.26460472],
[-0.86795121, 0.7320799 , -1.7236192 ]], requires_grad=True))
>>> mol_data("VNWKTOKETHGBQD-UHFFFAOYSA-N", "InChIKey")
(['C', 'H', 'H', 'H', 'H'],
tensor([[ 0. , 0. , 0. ],
[ 1.04709725, 1.51102501, 0.93824902],
[ 1.29124986, -1.53710323, -0.47923455],
[-1.47058487, -0.70581271, 1.26460472],
[-0.86795121, 0.7320799 , -1.7236192 ]], requires_grad=True))
"""
try:
# pylint: disable=import-outside-toplevel, unused-import, multiple-imports
import pubchempy as pcp
except ImportError as Error:
raise ImportError(
"This feature requires pubchempy.\nIt can be installed with: pip install pubchempy."
) from Error
# https://gist.github.com/lsauer/1312860/264ae813c2bd2c27a769d261c8c6b38da34e22fb#file-smiles_inchi_annotated-js
identifier_patterns = {
"name": re.compile(r"^[a-zA-Z0-9_+-]+$"),
"cas": re.compile(r"^\d{1,7}\-\d{2}\-\d$"),
"smiles": re.compile(
r"^(?!InChI=)(?!\d{1,7}\-\d{2}\-\d)(?![A-Z]{14}\-[A-Z]{10}(\-[A-Z])?)[^J][a-zA-Z0-9@+\-\[\]\(\)\\\/%=#$]{1,}$"
),
"inchi": re.compile(
r"^InChI\=1S?\/[A-Za-z0-9\.]+(\+[0-9]+)?(\/[cnpqbtmsih][A-Za-z0-9\-\+\(\)\,\/\?\;\.]+)*$"
),
"inchikey": re.compile(r"^[A-Z]{14}\-[A-Z]{10}(\-[A-Z])?"),
}
if identifier_type.lower() == "cid":
cid = int(identifier)
else:
if identifier_type.lower() not in identifier_patterns:
raise ValueError(
"Specified identifier type is not supported. Supported type are: name, CAS, SMILES, InChI, InChIKey."
)
try:
if identifier_patterns[identifier_type.lower()].search(identifier):
if identifier_type.lower() == "cas":
identifier_type = "name"
cid = pcp.get_cids(identifier, namespace=identifier_type.lower())[0]
else:
raise ValueError(
f"Specified identifier doesn't seem to match type: {identifier_type}."
)
except (IndexError, pcp.NotFoundError) as exc:
raise ValueError("Specified molecule does not exist in the PubChem Database.") from exc
try:
pcp_molecule = pcp.Compound.from_cid(cid, record_type="3d")
except pcp.NotFoundError:
pcp_molecule = pcp.Compound.from_cid(cid, record_type="2d")
except ValueError as exc:
raise ValueError("Provided CID (or Identifier) is None.") from exc
data_mol = pcp_molecule.to_dict(properties=["atoms"])
symbols = [atom["element"] for atom in data_mol["atoms"]]
geometry = (
np.array([[atom["x"], atom["y"], atom.get("z", 0.0)] for atom in data_mol["atoms"]])
/ bohr_angs
)
return symbols, geometry
| [
"[email protected]"
]
| |
7dba6a3289087fe713494551fb12554582fca39e | 05824d96edf28918e25886716f0a5f904868a872 | /diff_tool.py | d638d6c9b4a4e50b342d048a3f0bb7a0f2ba7f91 | [
"MIT"
]
| permissive | DahlitzFlorian/diff-tool-video-snippets | d7c5fb8616fc06f71566d7c9eae9be9e5ec1bf7d | 0cd457abe43e63732810dbfec2e90cfb17d3d0a8 | refs/heads/master | 2023-08-27T06:46:36.789501 | 2021-10-29T06:55:07 | 2021-10-29T06:55:07 | 420,377,905 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | # diff_tool.py
import argparse
import difflib
import sys
from pathlib import Path
def create_diff(old_file: Path, new_file: Path, output_file: Path = None):
file_1 = open(old_file).readlines()
file_2 = open(new_file).readlines()
if output_file:
delta = difflib.HtmlDiff().make_file(
file_1, file_2, old_file.name, new_file.name
)
with open(output_file, "w") as f:
f.write(delta)
else:
delta = difflib.unified_diff(file_1, file_2, old_file.name, new_file.name)
sys.stdout.writelines(delta)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("old_file_version")
parser.add_argument("new_file_version")
parser.add_argument("--html", help="specify html to write to")
args = parser.parse_args()
old_file = Path(args.old_file_version)
new_file = Path(args.new_file_version)
if args.html:
output_file = Path(args.html)
else:
output_file = None
create_diff(old_file, new_file, output_file)
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
ac0bdc20ef03692d0dfb49b0a9c2a5916f19c954 | 06685fa3aceb620ea13b593ddc52bba53300b93a | /ssh/__init__.py | 2dd7cdbecddeb86fdb56377ffe072425eef11b83 | []
| no_license | 66laps/kokki-cookbooks | a900f958d346e35a35a05ed6cbb12bbe2f5bf4a4 | 6c059f8cda577c765083dfe92688094bc38dfd4b | refs/heads/master | 2021-01-01T17:00:28.952170 | 2010-11-20T13:39:52 | 2010-11-20T13:39:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py |
from kokki import *
from kokki.cookbooks.ssh.providers import SSHKnownHostProvider, SSHAuthorizedKeyProvider
from kokki.cookbooks.ssh.resources import SSHKnownHost, SSHAuthorizedKey
def SSHConfig(name, hosts, mode=0600, **kwargs):
File(name,
mode = mode,
content = Template("ssh/config.j2", {'hosts': hosts}),
**kwargs)
| [
"[email protected]"
]
| |
9859b409527d7034606e1203ba46e1b8cf065b5a | f4fdb0c1213bbb403b87c2dbbde390918ac08861 | /fix_cite_date.py | deff0147b7c52b04e42582406ca609f000af3565 | []
| no_license | benwing2/RuNounChanges | 0d5076e576237f10b50049ed52b91f96c95cca95 | 048dfed5abe09b8d5629c5772292027ce0a170f2 | refs/heads/master | 2023-09-03T22:48:06.972127 | 2023-09-03T06:27:56 | 2023-09-03T06:27:56 | 41,480,942 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,627 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Replace dates of the form "1 January, 2012" with "1 January 2012"
# (remove the comma) in quotation/citation templates.
import pywikibot, re, sys, argparse
import mwparserfromhell as mw
import blib
from blib import getparam, rmparam, set_template_name, msg, errmsg, site
import rulib
replace_templates = [
"cite-book", "cite-journal", "cite-newsgroup", "cite-video game",
"cite-web",
"quote-book", "quote-hansard", "quote-journal", "quote-newsgroup",
"quote-song", "quote-us-patent", "quote-video", "quote-web",
"quote-wikipedia"
]
months = ["January", "February", "March", "April", "May", "June", "July",
"August", "September", "October", "November", "December",
"Jan", "Feb", "Mar", "Apr", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov",
"Dec"]
month_re = "(?:%s)" % "|".join(months)
def process_page(page, index, parsed):
pagetitle = str(page.title())
def pagemsg(txt):
msg("Page %s %s: %s" % (index, pagetitle, txt))
pagemsg("Processing")
if not page.exists():
pagemsg("WARNING: Page doesn't exist")
return
if ":" in pagetitle and not re.search(
"^(Citations|Appendix|Reconstruction|Transwiki|Talk|Wiktionary|[A-Za-z]+ talk):", pagetitle):
pagemsg("WARNING: Colon in page title and not a recognized namespace to include, skipping page")
return
text = str(page.text)
notes = []
parsed = blib.parse_text(text)
for t in parsed.filter_templates():
tname = str(t.name)
origt = str(t)
if tname.strip() in replace_templates:
date = getparam(t, "date")
if date.strip():
newdate = re.sub(r"^(\s*[0-9]+\s+%s\s*),(\s*[0-9]+\s*)$" % month_re,
r"\1\2", date)
if date != newdate:
# We do this instead of t.add() because if there's a final newline,
# it will appear in the value but t.add() will try to preserve the
# newline separately and you'll get two newlines.
t.get("date").value = newdate
pagemsg(("Replacing %s with %s" % (origt, str(t))).replace("\n", r"\n"))
notes.append("fix date in %s" % tname.strip())
return str(parsed), notes
if __name__ == "__main__":
parser = blib.create_argparser("Fix date in cite/quote templates",
include_pagefile=True)
args = parser.parse_args()
start, end = blib.parse_start_end(args.start, args.end)
blib.do_pagefile_cats_refs(args, start, end, process_page, edit=True,
# FIXME, had includelinks= for references, which we don't have a flag for now
default_refs=["Template:%s" % template for template in replace_templates])
| [
"[email protected]"
]
| |
89c3ca5f36c3d748b35e14008fd3349a8572e530 | 0cf3a74494ed92bcdeaf0220ad44475c1b6b2c1c | /tests/providers/google/cloud/hooks/test_stackdriver.py | 4d9a2b0dab008947fa28b6b99409866323b7afc0 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
]
| permissive | bolkedebruin/airflow | 064a0c32dff267f586c3fd4dc4beaae12273d180 | e1fe30c70d0fe9c033db9daf9d4420f7fa815b2d | refs/heads/master | 2023-06-09T11:37:57.734560 | 2022-02-23T12:38:27 | 2022-02-23T12:38:27 | 38,505,714 | 2 | 7 | Apache-2.0 | 2022-03-01T01:22:16 | 2015-07-03T18:27:44 | Python | UTF-8 | Python | false | false | 17,729 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import unittest
from unittest import mock
from google.api_core.gapic_v1.method import DEFAULT
from google.cloud.monitoring_v3 import AlertPolicy, NotificationChannel
from google.protobuf.field_mask_pb2 import FieldMask
from airflow.providers.google.cloud.hooks import stackdriver
PROJECT_ID = "sd-project"
CREDENTIALS = "sd-credentials"
TEST_FILTER = "filter"
TEST_ALERT_POLICY_1 = {
"combiner": "OR",
"name": "projects/sd-project/alertPolicies/12345",
"enabled": True,
"display_name": "test display",
"conditions": [
{
"condition_threshold": {
"comparison": "COMPARISON_GT",
"aggregations": [{"alignment_period": {'seconds': 60}, "per_series_aligner": "ALIGN_RATE"}],
},
"display_name": "Condition display",
"name": "projects/sd-project/alertPolicies/123/conditions/456",
}
],
}
TEST_ALERT_POLICY_2 = {
"combiner": "OR",
"name": "projects/sd-project/alertPolicies/6789",
"enabled": False,
"display_name": "test display",
"conditions": [
{
"condition_threshold": {
"comparison": "COMPARISON_GT",
"aggregations": [{"alignment_period": {'seconds': 60}, "per_series_aligner": "ALIGN_RATE"}],
},
"display_name": "Condition display",
"name": "projects/sd-project/alertPolicies/456/conditions/789",
}
],
}
TEST_NOTIFICATION_CHANNEL_1 = {
"display_name": "sd",
"enabled": True,
"labels": {"auth_token": "top-secret", "channel_name": "#channel"},
"name": "projects/sd-project/notificationChannels/12345",
"type_": "slack",
}
TEST_NOTIFICATION_CHANNEL_2 = {
"display_name": "sd",
"enabled": False,
"labels": {"auth_token": "top-secret", "channel_name": "#channel"},
"name": "projects/sd-project/notificationChannels/6789",
"type_": "slack",
}
class TestStackdriverHookMethods(unittest.TestCase):
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id',
return_value=(CREDENTIALS, PROJECT_ID),
)
@mock.patch('airflow.providers.google.cloud.hooks.stackdriver.StackdriverHook._get_policy_client')
def test_stackdriver_list_alert_policies(self, mock_policy_client, mock_get_creds_and_project_id):
method = mock_policy_client.return_value.list_alert_policies
hook = stackdriver.StackdriverHook()
hook.list_alert_policies(
filter_=TEST_FILTER,
project_id=PROJECT_ID,
)
method.assert_called_once_with(
request=dict(name=f'projects/{PROJECT_ID}', filter=TEST_FILTER, order_by=None, page_size=None),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id',
return_value=(CREDENTIALS, PROJECT_ID),
)
@mock.patch('airflow.providers.google.cloud.hooks.stackdriver.StackdriverHook._get_policy_client')
def test_stackdriver_enable_alert_policy(self, mock_policy_client, mock_get_creds_and_project_id):
hook = stackdriver.StackdriverHook()
alert_policy_enabled = AlertPolicy(**TEST_ALERT_POLICY_1)
alert_policy_disabled = AlertPolicy(**TEST_ALERT_POLICY_2)
alert_policies = [alert_policy_enabled, alert_policy_disabled]
mock_policy_client.return_value.list_alert_policies.return_value = alert_policies
hook.enable_alert_policies(
filter_=TEST_FILTER,
project_id=PROJECT_ID,
)
mock_policy_client.return_value.list_alert_policies.assert_called_once_with(
request=dict(name=f'projects/{PROJECT_ID}', filter=TEST_FILTER, order_by=None, page_size=None),
retry=DEFAULT,
timeout=None,
metadata=(),
)
mask = FieldMask(paths=["enabled"])
alert_policy_disabled.enabled = True
mock_policy_client.return_value.update_alert_policy.assert_called_once_with(
request=dict(alert_policy=alert_policy_disabled, update_mask=mask),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id',
return_value=(CREDENTIALS, PROJECT_ID),
)
@mock.patch('airflow.providers.google.cloud.hooks.stackdriver.StackdriverHook._get_policy_client')
def test_stackdriver_disable_alert_policy(self, mock_policy_client, mock_get_creds_and_project_id):
hook = stackdriver.StackdriverHook()
alert_policy_enabled = AlertPolicy(**TEST_ALERT_POLICY_1)
alert_policy_disabled = AlertPolicy(**TEST_ALERT_POLICY_2)
mock_policy_client.return_value.list_alert_policies.return_value = [
alert_policy_enabled,
alert_policy_disabled,
]
hook.disable_alert_policies(
filter_=TEST_FILTER,
project_id=PROJECT_ID,
)
mock_policy_client.return_value.list_alert_policies.assert_called_once_with(
request=dict(name=f'projects/{PROJECT_ID}', filter=TEST_FILTER, order_by=None, page_size=None),
retry=DEFAULT,
timeout=None,
metadata=(),
)
mask = FieldMask(paths=["enabled"])
alert_policy_enabled.enabled = False
mock_policy_client.return_value.update_alert_policy.assert_called_once_with(
request=dict(alert_policy=alert_policy_enabled, update_mask=mask),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id',
return_value=(CREDENTIALS, PROJECT_ID),
)
@mock.patch('airflow.providers.google.cloud.hooks.stackdriver.StackdriverHook._get_policy_client')
@mock.patch('airflow.providers.google.cloud.hooks.stackdriver.StackdriverHook._get_channel_client')
def test_stackdriver_upsert_alert_policy(
self, mock_channel_client, mock_policy_client, mock_get_creds_and_project_id
):
hook = stackdriver.StackdriverHook()
existing_alert_policy = AlertPolicy(**TEST_ALERT_POLICY_1)
alert_policy_to_create = AlertPolicy(**TEST_ALERT_POLICY_2)
mock_policy_client.return_value.list_alert_policies.return_value = [existing_alert_policy]
mock_channel_client.return_value.list_notification_channels.return_value = []
hook.upsert_alert(
alerts=json.dumps({"policies": [TEST_ALERT_POLICY_1, TEST_ALERT_POLICY_2], "channels": []}),
project_id=PROJECT_ID,
)
mock_channel_client.return_value.list_notification_channels.assert_called_once_with(
request=dict(
name=f'projects/{PROJECT_ID}',
filter=None,
order_by=None,
page_size=None,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
mock_policy_client.return_value.list_alert_policies.assert_called_once_with(
request=dict(name=f'projects/{PROJECT_ID}', filter=None, order_by=None, page_size=None),
retry=DEFAULT,
timeout=None,
metadata=(),
)
alert_policy_to_create.name = None
alert_policy_to_create.creation_record = None
alert_policy_to_create.mutation_record = None
alert_policy_to_create.conditions[0].name = None
mock_policy_client.return_value.create_alert_policy.assert_called_once_with(
request=dict(
name=f'projects/{PROJECT_ID}',
alert_policy=alert_policy_to_create,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
existing_alert_policy.creation_record = None
existing_alert_policy.mutation_record = None
mock_policy_client.return_value.update_alert_policy.assert_called_once_with(
request=dict(alert_policy=existing_alert_policy), retry=DEFAULT, timeout=None, metadata=()
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id',
return_value=(CREDENTIALS, PROJECT_ID),
)
@mock.patch('airflow.providers.google.cloud.hooks.stackdriver.StackdriverHook._get_policy_client')
@mock.patch('airflow.providers.google.cloud.hooks.stackdriver.StackdriverHook._get_channel_client')
def test_stackdriver_upsert_alert_policy_without_channel(
self, mock_channel_client, mock_policy_client, mock_get_creds_and_project_id
):
hook = stackdriver.StackdriverHook()
existing_alert_policy = AlertPolicy(**TEST_ALERT_POLICY_1)
mock_policy_client.return_value.list_alert_policies.return_value = [existing_alert_policy]
mock_channel_client.return_value.list_notification_channels.return_value = []
hook.upsert_alert(
alerts=json.dumps({"policies": [TEST_ALERT_POLICY_1, TEST_ALERT_POLICY_2]}),
project_id=PROJECT_ID,
)
mock_channel_client.return_value.list_notification_channels.assert_called_once_with(
request=dict(name=f'projects/{PROJECT_ID}', filter=None, order_by=None, page_size=None),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_policy_client.return_value.list_alert_policies.assert_called_once_with(
request=dict(name=f'projects/{PROJECT_ID}', filter=None, order_by=None, page_size=None),
retry=DEFAULT,
timeout=None,
metadata=(),
)
existing_alert_policy.creation_record = None
existing_alert_policy.mutation_record = None
mock_policy_client.return_value.update_alert_policy.assert_called_once_with(
request=dict(alert_policy=existing_alert_policy), retry=DEFAULT, timeout=None, metadata=()
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id',
return_value=(CREDENTIALS, PROJECT_ID),
)
@mock.patch('airflow.providers.google.cloud.hooks.stackdriver.StackdriverHook._get_policy_client')
def test_stackdriver_delete_alert_policy(self, mock_policy_client, mock_get_creds_and_project_id):
hook = stackdriver.StackdriverHook()
hook.delete_alert_policy(
name='test-alert',
)
mock_policy_client.return_value.delete_alert_policy.assert_called_once_with(
request=dict(name='test-alert'),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id',
return_value=(CREDENTIALS, PROJECT_ID),
)
@mock.patch('airflow.providers.google.cloud.hooks.stackdriver.StackdriverHook._get_channel_client')
def test_stackdriver_list_notification_channel(self, mock_channel_client, mock_get_creds_and_project_id):
hook = stackdriver.StackdriverHook()
hook.list_notification_channels(
filter_=TEST_FILTER,
project_id=PROJECT_ID,
)
mock_channel_client.return_value.list_notification_channels.assert_called_once_with(
request=dict(name=f'projects/{PROJECT_ID}', filter=TEST_FILTER, order_by=None, page_size=None),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id',
return_value=(CREDENTIALS, PROJECT_ID),
)
@mock.patch('airflow.providers.google.cloud.hooks.stackdriver.StackdriverHook._get_channel_client')
def test_stackdriver_enable_notification_channel(
self, mock_channel_client, mock_get_creds_and_project_id
):
hook = stackdriver.StackdriverHook()
notification_channel_enabled = NotificationChannel(**TEST_NOTIFICATION_CHANNEL_1)
notification_channel_disabled = NotificationChannel(**TEST_NOTIFICATION_CHANNEL_2)
mock_channel_client.return_value.list_notification_channels.return_value = [
notification_channel_enabled,
notification_channel_disabled,
]
hook.enable_notification_channels(
filter_=TEST_FILTER,
project_id=PROJECT_ID,
)
notification_channel_disabled.enabled = True
mask = FieldMask(paths=['enabled'])
mock_channel_client.return_value.update_notification_channel.assert_called_once_with(
request=dict(notification_channel=notification_channel_disabled, update_mask=mask),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id',
return_value=(CREDENTIALS, PROJECT_ID),
)
@mock.patch('airflow.providers.google.cloud.hooks.stackdriver.StackdriverHook._get_channel_client')
def test_stackdriver_disable_notification_channel(
self, mock_channel_client, mock_get_creds_and_project_id
):
hook = stackdriver.StackdriverHook()
notification_channel_enabled = NotificationChannel(**TEST_NOTIFICATION_CHANNEL_1)
notification_channel_disabled = NotificationChannel(**TEST_NOTIFICATION_CHANNEL_2)
mock_channel_client.return_value.list_notification_channels.return_value = [
notification_channel_enabled,
notification_channel_disabled,
]
hook.disable_notification_channels(
filter_=TEST_FILTER,
project_id=PROJECT_ID,
)
notification_channel_enabled.enabled = False
mask = FieldMask(paths=['enabled'])
mock_channel_client.return_value.update_notification_channel.assert_called_once_with(
request=dict(notification_channel=notification_channel_enabled, update_mask=mask),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id',
return_value=(CREDENTIALS, PROJECT_ID),
)
@mock.patch('airflow.providers.google.cloud.hooks.stackdriver.StackdriverHook._get_channel_client')
def test_stackdriver_upsert_channel(self, mock_channel_client, mock_get_creds_and_project_id):
hook = stackdriver.StackdriverHook()
existing_notification_channel = NotificationChannel(**TEST_NOTIFICATION_CHANNEL_1)
notification_channel_to_be_created = NotificationChannel(**TEST_NOTIFICATION_CHANNEL_2)
mock_channel_client.return_value.list_notification_channels.return_value = [
existing_notification_channel
]
hook.upsert_channel(
channels=json.dumps({"channels": [TEST_NOTIFICATION_CHANNEL_1, TEST_NOTIFICATION_CHANNEL_2]}),
project_id=PROJECT_ID,
)
mock_channel_client.return_value.list_notification_channels.assert_called_once_with(
request=dict(name=f'projects/{PROJECT_ID}', filter=None, order_by=None, page_size=None),
retry=DEFAULT,
timeout=None,
metadata=(),
)
mock_channel_client.return_value.update_notification_channel.assert_called_once_with(
request=dict(notification_channel=existing_notification_channel),
retry=DEFAULT,
timeout=None,
metadata=(),
)
notification_channel_to_be_created.name = None
mock_channel_client.return_value.create_notification_channel.assert_called_once_with(
request=dict(
name=f'projects/{PROJECT_ID}', notification_channel=notification_channel_to_be_created
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id',
return_value=(CREDENTIALS, PROJECT_ID),
)
@mock.patch('airflow.providers.google.cloud.hooks.stackdriver.StackdriverHook._get_channel_client')
def test_stackdriver_delete_notification_channel(
self, mock_channel_client, mock_get_creds_and_project_id
):
hook = stackdriver.StackdriverHook()
hook.delete_notification_channel(
name='test-channel',
)
mock_channel_client.return_value.delete_notification_channel.assert_called_once_with(
request=dict(name='test-channel'), retry=DEFAULT, timeout=None, metadata=()
)
| [
"[email protected]"
]
| |
9d468379d6252e0193cf1aa21ee3dd194eb34613 | 55692ac1b8a1b00750c0b9caf7ebba53f1dde78b | /server/toolkits/migrations/tip.py | c3783643ffc06de90010c34ded47ec1e4a3f1e4f | []
| no_license | Soopro/julolo | 8d9dea62aa055318f891d200614314e402bda1eb | 73cc67f378f45c0da40911bac5e5e038f63588ab | refs/heads/master | 2021-10-26T08:50:58.940548 | 2019-04-11T15:41:12 | 2019-04-11T15:41:12 | 107,217,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | # coding=utf-8
from __future__ import absolute_import
from mongokit import DocumentMigration
class TipMigration(DocumentMigration):
pass
# def allmigration01_remove_complete(self):
# self.target = {'verification': {'$exists': True}}
# if not self.status:
# self.update = {
# '$unset': {
# 'verification': False
# },
# '$set': {
# 'verified': False
# }
# }
# self.collection.update(self.target,
# self.update,
# multi=True,
# safe=True)
| [
"[email protected]"
]
| |
81de7e98002c913886831f84a4f671f56499c8f8 | 7d5047dae4df06f10752b7a3ec6e663f296457d3 | /Programmers/Level 2/영어 끝말잇기.py | fb829b110405b798b97e5b1c0d629d4794df69a9 | []
| no_license | soulgchoi/Algorithm | a73e3e8e3a256d4cf1c8b5fa3c7dc35a325a6e9a | a88b2c2a0f0d75ca59269dd815ee8d30dd270ce7 | refs/heads/master | 2022-02-05T18:08:09.271443 | 2022-01-20T02:36:37 | 2022-01-20T02:36:37 | 228,958,329 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | def solution(n, words):
answer = [0, 0]
for i in range(1, len(words)):
if words[i][0] != words[i-1][-1] or words.index(words[i]) < i:
answer = [i % n + 1, i // n + 1]
break
return answer
def solution(n, words):
answer = [0, 0]
for i in range(1, len(words)):
if words[i][0] != words[i-1][-1] or words[i] in words[:i]:
answer = [i % n + 1, i // n + 1]
break
return answer
| [
"[email protected]"
]
| |
fb7213b98d2e792b11a08e891b2cde8ae6a46d14 | bc6508a1dde1e61a8b2f61e70044c074aeeb4406 | /whoiser/servers/XN__G6W251D.py | 36b0bcbef1eb19e54daf5967aaf73f97ffed1542 | []
| no_license | krikulis/whoiser | 7eca72260dc061a91c7630901557264b80c5263e | 27af46d6ffcf2bacc5e5b837883ab5fab7ac9b40 | refs/heads/master | 2021-01-10T19:10:53.915622 | 2012-06-24T23:50:28 | 2012-06-24T23:50:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py |
from servers.generic import GenericWhoisQuery
class WhoisQuery(GenericWhoisQuery):
def query(self, query):
raise NotImplementedError(u"TLD XN--G6W251D has no Whois server available")
| [
"[email protected]"
]
| |
1c3c444acf1973ede408349bd8384cc01093b4f8 | 129710acb68187f1ef4aab97cd9eecbc3b620a6b | /venv/lib/python2.7/site-packages/aldryn_people/models.py | 1e5deda6b5737b0e5734e1661eaa9631a4070937 | []
| no_license | frankip/cmsaldryn_blog | 8f62c3853e1459caad3fb6ab1e6dbde93900e4b9 | 7ea6c9cb3fd8e8d16d617a167a0e33af08ec3b32 | refs/heads/master | 2021-01-18T19:30:41.464504 | 2016-06-20T09:03:34 | 2016-06-20T09:03:34 | 61,532,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,304 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import base64
import six
from aldryn_people.vcard import Vcard
try:
import urlparse
except ImportError:
from urllib import parse as urlparse
import warnings
from reversion.revisions import (
default_revision_manager, RegistrationError
)
from distutils.version import LooseVersion
from django import get_version
from django.contrib.auth import get_user_model
from django.conf import settings
from django.core.urlresolvers import reverse, NoReverseMatch
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
try:
# Python>=2.7
from importlib import import_module
except ImportError:
# Python==2.6
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
from django.utils.importlib import import_module
from django.utils.translation import ugettext_lazy as _, override, force_text
from six import text_type
from aldryn_common.admin_fields.sortedm2m import SortedM2MModelField
from aldryn_translation_tools.models import (
TranslatedAutoSlugifyMixin,
TranslationHelperMixin,
)
from cms.models.pluginmodel import CMSPlugin
from cms.utils.i18n import get_current_language, get_default_language
from djangocms_text_ckeditor.fields import HTMLField
from filer.fields.image import FilerImageField
from parler.models import TranslatableModel, TranslatedFields
from aldryn_reversion.core import version_controlled_content
from .utils import get_additional_styles
# NOTE: We use LooseVersion and not StrictVersion because sometimes Aldryn uses
# patched build with version numbers of the form X.Y.Z.postN.
loose_version = LooseVersion(get_version())
if loose_version < LooseVersion('1.7.0'):
# Prior to 1.7 it is pretty straight forward
user_model = get_user_model()
if user_model not in default_revision_manager.get_registered_models():
default_revision_manager.register(user_model)
else:
# otherwise it is a pain, but thanks to solution of getting model from
# https://github.com/django-oscar/django-oscar/commit/c479a1
# we can do almost the same thing from the different side.
from django.apps import apps
from django.apps.config import MODELS_MODULE_NAME
from django.core.exceptions import AppRegistryNotReady
def get_model(app_label, model_name):
"""
Fetches a Django model using the app registry.
This doesn't require that an app with the given app label exists,
which makes it safe to call when the registry is being populated.
All other methods to access models might raise an exception about the
registry not being ready yet.
Raises LookupError if model isn't found.
"""
try:
return apps.get_model(app_label, model_name)
except AppRegistryNotReady:
if apps.apps_ready and not apps.models_ready:
# If this function is called while `apps.populate()` is
# loading models, ensure that the module that defines the
# target model has been imported and try looking the model up
# in the app registry. This effectively emulates
# `from path.to.app.models import Model` where we use
# `Model = get_model('app', 'Model')` instead.
app_config = apps.get_app_config(app_label)
# `app_config.import_models()` cannot be used here because it
# would interfere with `apps.populate()`.
import_module('%s.%s' % (app_config.name, MODELS_MODULE_NAME))
# In order to account for case-insensitivity of model_name,
# look up the model through a private API of the app registry.
return apps.get_registered_model(app_label, model_name)
else:
# This must be a different case (e.g. the model really doesn't
# exist). We just re-raise the exception.
raise
# now get the real user model
user_model = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
model_app_name, model_model = user_model.split('.')
user_model_object = get_model(model_app_name, model_model)
# and try to register, if we have a registration error - that means that
# it has been registered already
try:
default_revision_manager.register(user_model_object)
except RegistrationError:
pass
@version_controlled_content
@python_2_unicode_compatible
class Group(TranslationHelperMixin, TranslatedAutoSlugifyMixin,
TranslatableModel):
slug_source_field_name = 'name'
translations = TranslatedFields(
name=models.CharField(_('name'), max_length=255,
help_text=_("Provide this group's name.")),
description=HTMLField(_('description'), blank=True),
slug=models.SlugField(_('slug'), max_length=255, default='',
blank=True,
help_text=_("Leave blank to auto-generate a unique slug.")),
)
address = models.TextField(
verbose_name=_('address'), blank=True)
postal_code = models.CharField(
verbose_name=_('postal code'), max_length=20, blank=True)
city = models.CharField(
verbose_name=_('city'), max_length=255, blank=True)
phone = models.CharField(
verbose_name=_('phone'), null=True, blank=True, max_length=100)
fax = models.CharField(
verbose_name=_('fax'), null=True, blank=True, max_length=100)
email = models.EmailField(
verbose_name=_('email'), blank=True, default='')
website = models.URLField(
verbose_name=_('website'), null=True, blank=True)
@property
def company_name(self):
warnings.warn(
'"Group.company_name" has been refactored to "Group.name"',
DeprecationWarning
)
return self.safe_translation_getter('name')
@property
def company_description(self):
warnings.warn(
'"Group.company_description" has been refactored to '
'"Group.description"',
DeprecationWarning
)
return self.safe_translation_getter('description')
class Meta:
verbose_name = _('Group')
verbose_name_plural = _('Groups')
def __str__(self):
return self.safe_translation_getter(
'name', default=_('Group: {0}').format(self.pk))
def get_absolute_url(self, language=None):
if not language:
language = get_current_language() or get_default_language()
slug, language = self.known_translation_getter(
'slug', None, language_code=language)
if slug:
kwargs = {'slug': slug}
else:
kwargs = {'pk': self.pk}
with override(language):
return reverse('aldryn_people:group-detail', kwargs=kwargs)
@version_controlled_content(follow=['groups', 'user'])
@python_2_unicode_compatible
class Person(TranslationHelperMixin, TranslatedAutoSlugifyMixin,
TranslatableModel):
slug_source_field_name = 'name'
translations = TranslatedFields(
name=models.CharField(_('name'), max_length=255, blank=False,
default='', help_text=_("Provide this person's name.")),
slug=models.SlugField(_('unique slug'), max_length=255, blank=True,
default='',
help_text=_("Leave blank to auto-generate a unique slug.")),
function=models.CharField(_('role'),
max_length=255, blank=True, default=''),
description=HTMLField(_('description'),
blank=True, default='')
)
phone = models.CharField(
verbose_name=_('phone'), null=True, blank=True, max_length=100)
mobile = models.CharField(
verbose_name=_('mobile'), null=True, blank=True, max_length=100)
fax = models.CharField(
verbose_name=_('fax'), null=True, blank=True, max_length=100)
email = models.EmailField(
verbose_name=_("email"), blank=True, default='')
website = models.URLField(
verbose_name=_('website'), null=True, blank=True)
groups = SortedM2MModelField(
'aldryn_people.Group', default=None, blank=True, related_name='people',
help_text=_('Choose and order the groups for this person, the first '
'will be the "primary group".'))
visual = FilerImageField(
null=True, blank=True, default=None, on_delete=models.SET_NULL)
vcard_enabled = models.BooleanField(
verbose_name=_('enable vCard download'), default=True)
user = models.OneToOneField(
getattr(settings, 'AUTH_USER_MODEL', 'auth.User'),
null=True, blank=True, related_name='persons')
class Meta:
verbose_name = _('Person')
verbose_name_plural = _('People')
def __str__(self):
pkstr = str(self.pk)
if six.PY2:
pkstr = six.u(pkstr)
name = self.safe_translation_getter(
'name',
default='',
any_language=True
).strip()
return name if len(name) > 0 else pkstr
@property
def primary_group(self):
"""Simply returns the first in `groups`, if any, else None."""
return self.groups.first()
@property
def comment(self):
return self.safe_translation_getter('description', '')
def get_absolute_url(self, language=None):
if not language:
language = get_current_language()
slug, language = self.known_translation_getter(
'slug', None, language_code=language)
if slug:
kwargs = {'slug': slug}
else:
kwargs = {'pk': self.pk}
with override(language):
# do not fail with 500 error so that if detail view can't be
# resolved we still can use plugins.
try:
url = reverse('aldryn_people:person-detail', kwargs=kwargs)
except NoReverseMatch:
url = ''
return url
def get_vcard_url(self, language=None):
if not language:
language = get_current_language()
slug = self.safe_translation_getter(
'slug', None, language_code=language, any_language=False)
if slug:
kwargs = {'slug': slug}
else:
kwargs = {'pk': self.pk}
with override(language):
return reverse('aldryn_people:download_vcard', kwargs=kwargs)
def get_vcard(self, request=None):
vcard = Vcard()
function = self.safe_translation_getter('function')
safe_name = self.safe_translation_getter(
'name', default="Person: {0}".format(self.pk))
vcard.add_line('FN', safe_name)
vcard.add_line('N', [None, safe_name, None, None, None])
if self.visual:
ext = self.visual.extension.upper()
try:
with open(self.visual.path, 'rb') as f:
data = force_text(base64.b64encode(f.read()))
vcard.add_line('PHOTO', data, TYPE=ext, ENCODING='b')
except IOError:
if request:
url = urlparse.urljoin(request.build_absolute_uri(),
self.visual.url),
vcard.add_line('PHOTO', url, TYPE=ext)
if self.email:
vcard.add_line('EMAIL', self.email)
if function:
vcard.add_line('TITLE', self.function)
if self.phone:
vcard.add_line('TEL', self.phone, TYPE='WORK')
if self.mobile:
vcard.add_line('TEL', self.mobile, TYPE='CELL')
if self.fax:
vcard.add_line('TEL', self.fax, TYPE='FAX')
if self.website:
vcard.add_line('URL', self.website)
if self.primary_group:
group_name = self.primary_group.safe_translation_getter(
'name', default="Group: {0}".format(self.primary_group.pk))
if group_name:
vcard.add_line('ORG', group_name)
if (self.primary_group.address or self.primary_group.city or
self.primary_group.postal_code):
vcard.add_line('ADR', (
None, None,
self.primary_group.address,
self.primary_group.city,
None,
self.primary_group.postal_code,
None,
), TYPE='WORK')
if self.primary_group.phone:
vcard.add_line('TEL', self.primary_group.phone, TYPE='WORK')
if self.primary_group.fax:
vcard.add_line('TEL', self.primary_group.fax, TYPE='FAX')
if self.primary_group.website:
vcard.add_line('URL', self.primary_group.website)
return str(vcard)
@python_2_unicode_compatible
class BasePeoplePlugin(CMSPlugin):
STYLE_CHOICES = [
('standard', _('Standard')),
('feature', _('Feature'))
] + get_additional_styles()
style = models.CharField(
_('Style'), choices=STYLE_CHOICES,
default=STYLE_CHOICES[0][0], max_length=50)
people = SortedM2MModelField(
Person, blank=True,
help_text=_('Select and arrange specific people, or, leave blank to '
'select all.')
)
class Meta:
abstract = True
def copy_relations(self, oldinstance):
self.people = oldinstance.people.all()
def get_selected_people(self):
return self.people.select_related('visual')
def __str__(self):
return text_type(self.pk)
class PeoplePlugin(BasePeoplePlugin):
group_by_group = models.BooleanField(
verbose_name=_('group by group'),
default=True,
help_text=_('Group people by their group.')
)
show_ungrouped = models.BooleanField(
verbose_name=_('show ungrouped'),
default=False,
help_text=_('When using "group by group", show ungrouped people too.')
)
show_links = models.BooleanField(
verbose_name=_('Show links to Detail Page'), default=False)
show_vcard = models.BooleanField(
verbose_name=_('Show links to download vCard'), default=False)
class Meta:
abstract = False
| [
"[email protected]"
]
| |
fd0f71e1297eb6aea740278e25eb6fb47249e785 | 3ea45d6acd362a646e906eac31ab6d3ea019d727 | /qaeval/tests/metric_test.py | f55aac1947adabbf36a8c37e2b70b19c3a004a2f | [
"Apache-2.0"
]
| permissive | rajhans/qaeval | 9747dea5dd0a234cc3df7837d6cbc0406b5d1b03 | dd7273183dd1b2c9995115310ef041daa953ca81 | refs/heads/master | 2023-07-10T04:15:05.399369 | 2021-08-03T02:22:15 | 2021-08-03T02:22:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,814 | py | import json
import os
import pytest
import unittest
from typing import List
from qaeval import QAEval, FIXTURES_ROOT
@pytest.mark.skipif(
"GENERATION_MODEL" not in os.environ,
reason="`GENERATION_MODEL` environment variable not set",
)
@pytest.mark.skipif(
"ANSWERING_MODEL" not in os.environ,
reason="`ANSWERING_MODEL` environment variable not set",
)
class TestQAEval(unittest.TestCase):
def setUp(self) -> None:
self.summaries = []
self.references_list = []
with open(f"{FIXTURES_ROOT}/multiling2011.jsonl", "r") as f:
for line in f:
data = json.loads(line)
summary = data["summary"]["text"]
references = [reference["text"] for reference in data["references"]]
self.summaries.append(summary)
self.references_list.append(references)
def _check_output(self, metric: QAEval, expected_output: List) -> None:
actual_output = metric.score_batch(self.summaries, self.references_list)
assert len(expected_output) == len(actual_output)
for expected, actual in zip(expected_output, actual_output):
assert len(expected) == len(actual) == 1
expected = expected["qa-eval"]
actual = actual["qa-eval"]
assert len(expected) == len(actual)
for metric in expected.keys():
assert expected[metric] == pytest.approx(actual[metric], abs=1e-5)
def test_qaeval(self):
# This is a regression test, not necessarily a test for correctness
metric = QAEval(
generation_model_path=os.environ["GENERATION_MODEL"],
answering_model_dir=os.environ["ANSWERING_MODEL"],
)
expected_output = [
{
"qa-eval": {
"is_answered": 0.2171952736318408,
"em": 0.03078358208955224,
"f1": 0.05688114487088367,
}
},
{
"qa-eval": {
"is_answered": 0.2706778606965174,
"em": 0.08286691542288557,
"f1": 0.11367400349443259,
}
},
{
"qa-eval": {
"is_answered": 0.4552238805970149,
"em": 0.05223880597014925,
"f1": 0.10360696517412935,
}
},
{
"qa-eval": {
"is_answered": 0.2671408582089552,
"em": 0.04582555970149253,
"f1": 0.05402803689883914,
}
},
{
"qa-eval": {
"is_answered": 0.17126063232225966,
"em": 0.025276841598459315,
"f1": 0.04173576561636263,
}
},
{
"qa-eval": {
"is_answered": 0.3291829383548209,
"em": 0.029159756771697066,
"f1": 0.0543755246092705,
}
},
{
"qa-eval": {
"is_answered": 0.34836235489220563,
"em": 0.05223880597014925,
"f1": 0.09381412591922542,
}
},
{
"qa-eval": {
"is_answered": 0.4337987481945113,
"em": 0.04537794896485315,
"f1": 0.12145356515842792,
}
},
{
"qa-eval": {
"is_answered": 0.44427039821776665,
"em": 0.06434837092731831,
"f1": 0.10272833079850623,
}
},
{
"qa-eval": {
"is_answered": 0.40391255917571706,
"em": 0.09642160957950431,
"f1": 0.13482779720666102,
}
},
{
"qa-eval": {
"is_answered": 0.5345864661654135,
"em": 0.12349624060150374,
"f1": 0.16393273976257167,
}
},
{
"qa-eval": {
"is_answered": 0.5204365079365079,
"em": 0.12678571428571428,
"f1": 0.16151234567901235,
}
},
]
self._check_output(metric, expected_output)
@pytest.mark.skipif(
"LERC_MODEL" not in os.environ,
reason="`LERC_MODEL` environment variable not set",
)
@pytest.mark.skipif(
"LERC_PRETRAINED_MODEL" not in os.environ,
reason="`LERC_PRETRAINED_MODEL` environment variable not set",
)
def test_qaeval_with_lerc(self):
# This is a regression test, not necessarily a test for correctness
metric = QAEval(
generation_model_path=os.environ["GENERATION_MODEL"],
answering_model_dir=os.environ["ANSWERING_MODEL"],
use_lerc=True,
lerc_model_path=os.environ["LERC_MODEL"],
lerc_pretrained_model_path=os.environ["LERC_PRETRAINED_MODEL"],
)
expected_output = [
{
"qa-eval": {
"is_answered": 0.2171952736318408,
"em": 0.03078358208955224,
"f1": 0.05688114487088367,
"lerc": 0.5280342313984585,
}
},
{
"qa-eval": {
"is_answered": 0.2706778606965174,
"em": 0.08286691542288557,
"f1": 0.11367400349443259,
"lerc": 0.8588525844061404,
}
},
{
"qa-eval": {
"is_answered": 0.4552238805970149,
"em": 0.05223880597014925,
"f1": 0.10360696517412935,
"lerc": 1.2307390170310861,
}
},
{
"qa-eval": {
"is_answered": 0.2671408582089552,
"em": 0.04582555970149253,
"f1": 0.05402803689883914,
"lerc": 0.6782244059549116,
}
},
{
"qa-eval": {
"is_answered": 0.17126063232225966,
"em": 0.025276841598459315,
"f1": 0.04173576561636263,
"lerc": 0.40871678001285994,
}
},
{
"qa-eval": {
"is_answered": 0.3291829383548209,
"em": 0.029159756771697066,
"f1": 0.0543755246092705,
"lerc": 0.6477515654560587,
}
},
{
"qa-eval": {
"is_answered": 0.34836235489220563,
"em": 0.05223880597014925,
"f1": 0.09381412591922542,
"lerc": 0.947292007320556,
}
},
{
"qa-eval": {
"is_answered": 0.4337987481945113,
"em": 0.04537794896485315,
"f1": 0.12145356515842792,
"lerc": 1.2629075305115793,
}
},
{
"qa-eval": {
"is_answered": 0.44427039821776665,
"em": 0.06434837092731831,
"f1": 0.10272833079850623,
"lerc": 1.1977039740821571,
}
},
{
"qa-eval": {
"is_answered": 0.40391255917571706,
"em": 0.09642160957950431,
"f1": 0.13482779720666102,
"lerc": 1.2360802221434326,
}
},
{
"qa-eval": {
"is_answered": 0.5345864661654135,
"em": 0.12349624060150374,
"f1": 0.16393273976257167,
"lerc": 1.5575424717221045,
}
},
{
"qa-eval": {
"is_answered": 0.5204365079365079,
"em": 0.12678571428571428,
"f1": 0.16151234567901235,
"lerc": 1.4713040575976408,
}
},
]
self._check_output(metric, expected_output)
@pytest.mark.skipif(
"LERC_MODEL" not in os.environ,
reason="`LERC_MODEL` environment variable not set",
)
@pytest.mark.skipif(
"LERC_PRETRAINED_MODEL" not in os.environ,
reason="`LERC_PRETRAINED_MODEL` environment variable not set",
)
def test_return_qa_pairs(self):
metric = QAEval(
generation_model_path=os.environ["GENERATION_MODEL"],
answering_model_dir=os.environ["ANSWERING_MODEL"],
use_lerc=True,
lerc_model_path=os.environ["LERC_MODEL"],
lerc_pretrained_model_path=os.environ["LERC_PRETRAINED_MODEL"],
)
summaries = [
"Dan walked to the bakery this morning.",
"He bought some scones today",
]
references_list = [
["Dan went to buy scones earlier this morning."],
["Dan went to buy scones earlier this morning."],
]
results_list = metric.score_batch(summaries, references_list, return_qa_pairs=True)
assert len(results_list) == 2
metrics, qa_pairs_list = results_list[0]
assert metrics["qa-eval"]["is_answered"] == 1.0
assert metrics["qa-eval"]["em"] == 0.5
assert metrics["qa-eval"]["f1"] == 0.5
self.assertAlmostEqual(metrics["qa-eval"]["lerc"], 3.171376943588257, places=4)
assert len(qa_pairs_list) == 1
qa_pairs = qa_pairs_list[0]
assert len(qa_pairs) == 2
assert (
qa_pairs[0]["question"]["question"]
== "Who went to buy scones earlier this morning?"
)
assert qa_pairs[0]["prediction"]["prediction"] == "Dan"
assert qa_pairs[0]["prediction"]["start"] == 0
assert qa_pairs[0]["prediction"]["end"] == 3
assert qa_pairs[0]["prediction"]["is_answered"] == 1.0
assert qa_pairs[0]["prediction"]["em"] == 1.0
assert qa_pairs[0]["prediction"]["f1"] == 1.0
self.assertAlmostEqual(
qa_pairs[0]["prediction"]["lerc"], 5.035197734832764, places=4
)
assert (
qa_pairs[1]["question"]["question"]
== "What did Dan go to buy earlier this morning?"
)
assert qa_pairs[1]["prediction"]["prediction"] == "bakery"
assert qa_pairs[1]["prediction"]["start"] == 18
assert qa_pairs[1]["prediction"]["end"] == 24
assert qa_pairs[1]["prediction"]["is_answered"] == 1.0
assert qa_pairs[1]["prediction"]["em"] == 0.0
assert qa_pairs[1]["prediction"]["f1"] == 0.0
self.assertAlmostEqual(
qa_pairs[1]["prediction"]["lerc"], 1.30755615234375, places=4
)
metrics, qa_pairs_list = results_list[1]
assert metrics["qa-eval"]["is_answered"] == 0.5
assert metrics["qa-eval"]["em"] == 0.5
assert metrics["qa-eval"]["f1"] == 0.5
self.assertAlmostEqual(metrics["qa-eval"]["lerc"], 2.492440700531006, places=4)
assert len(qa_pairs_list) == 1
qa_pairs = qa_pairs_list[0]
assert len(qa_pairs) == 2
assert (
qa_pairs[0]["question"]["question"]
== "Who went to buy scones earlier this morning?"
)
assert qa_pairs[0]["prediction"]["prediction"] == "He"
assert qa_pairs[0]["prediction"]["start"] == 0
assert qa_pairs[0]["prediction"]["end"] == 2
assert qa_pairs[0]["prediction"]["is_answered"] == 0.0
assert qa_pairs[0]["prediction"]["em"] == 0.0
assert qa_pairs[0]["prediction"]["f1"] == 0.0
assert qa_pairs[0]["prediction"]["lerc"] == 0.0
assert (
qa_pairs[1]["question"]["question"]
== "What did Dan go to buy earlier this morning?"
)
assert qa_pairs[1]["prediction"]["prediction"] == "scones"
assert qa_pairs[1]["prediction"]["start"] == 15
assert qa_pairs[1]["prediction"]["end"] == 21
assert qa_pairs[1]["prediction"]["is_answered"] == 1.0
assert qa_pairs[1]["prediction"]["em"] == 1.0
assert qa_pairs[1]["prediction"]["f1"] == 1.0
self.assertAlmostEqual(
qa_pairs[1]["prediction"]["lerc"], 4.984881401062012, places=4
)
| [
"[email protected]"
]
| |
8a312b438fc15bf78d0eae6a7849064a7eaaf7e8 | f3997f566695a78d09fcab688db88499223dca17 | /coil_phase/det_coil_phase.py | c1f4d76d095a147236a45d3f938cbc97af0f839b | []
| no_license | melampyge/CollectiveFilament | 600d7a426d88a7f8f31702edb2b1fea7691372d2 | 7d2659bee85c955c680eda019cbff6e2b93ecff2 | refs/heads/master | 2020-07-23T05:58:55.383746 | 2017-06-25T14:55:14 | 2017-06-25T14:55:14 | 94,351,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,003 | py | #!/usr/local/bin/python2.7
import matplotlib as mpl
mpl.use('Agg')
import sys
import numpy as np
import math
import matplotlib.pyplot as plt
from scipy import stats
#####################################################################
### define / read in some global variables
gamma = 2.0 # viscosity
kT = 1.0 # thermal energy
ifile = open('scale_params.data')
line = ifile.readline()
line = line.split()
L = float(line[-1]) # polymer length
line = ifile.readline()
line = line.split()
dt = float(line[-1]) # simulation timestep
#####################################################################
def read_coilicity():
""" read in the coilcity"""
t = []
c = []
ifile = open('coilicity.data')
ifile.readline()
ifile.readline()
for line in ifile:
line = line.split()
try:
t.append(float(line[0]))
c.append(float(line[2]))
except:
pass
ifile.close()
t = np.array(t)
c = np.array(c)
# transform time and coility units
ttrans = gamma*L**3/6./kT
t *= dt/ttrans
c *= L/2/np.pi
return t,c
#####################################################################
def read_cacf():
""" read in the coilicity autocorrelation function"""
tacf = []
cacf = []
ifile = open('coil2_acf.data', 'r')
ifile.readline()
for line in ifile:
line = line.split()
tacf.append(float(line[0]))
cacf.append(float(line[1]))
ifile.close()
tacf = np.array(tacf)
cacf = np.array(cacf)
# transform time units
ttrans = gamma*L**3/6./kT
tacf *= dt/ttrans
return tacf,cacf
#####################################################################
def compute_moments(c):
""" compute the coil moments"""
n = len(c)
cav = np.average(c)
cav_std = np.std(c)/np.sqrt(n)
csq = np.average(c**2)
csq_std = np.std(c**2)/np.sqrt(n)
curt = stats.kurtosis(c, fisher = False)
# compute mirrored statistics
cm = -np.copy(c)
cboth = np.append(c,cm)
curt2 = stats.kurtosis(cboth, fisher = False)
return cav, cav_std, csq, csq_std,curt,curt2
#####################################################################
def compute_thalf(tacf,cacf):
""" check where the autocorrelation function drops below 0.5"""
n = len(tacf)
thalf = -1
for i in range(n):
if cacf[i] < 0.5:
thalf = tacf[i]
break
plt.plot(tacf, cacf)
plt.savefig('coilicity_acf.png')
plt.close()
return thalf
#####################################################################
def main():
""" main function"""
# read in the coilicity
t,c = read_coilicity()
# read in the time autocorrelation function
tacf, cacf = read_cacf()
# compute the moments and standard deviations
cav, cav_std, csq, csq_std,curt,curt2 = compute_moments(c)
# compute the moments for only the second part of the array
n = len(c)
cavh, cav_stdh, csqh, csq_stdh, curth, curt2h = compute_moments(c[n/2:])
# compute the time where the acf drops below 0.5
thalf = compute_thalf(tacf,cacf)
# write results to file
ofile = open('coil_phase.data', 'w')
ofile.write('Information required to identify coil phase\n\n')
ofile.write('cav\tcav_std\tcsq\tcsq_std\tthalf\tcurt\tcurt2\n')
ofile.write(str(cav) + '\t' + str(cav_std) + '\t' + str(csq) + '\t' + str(csq_std) + '\t' + str(thalf) + '\t' + str(curt) + '\t' + str(curt2) + '\n')
ofile.close()
ofile = open('coil_phaseh.data', 'w')
ofile.write('Information required to identify coil phase\n\n')
ofile.write('cav\tcav_std\tcsq\tcsq_std\tthalf\tcurt\tcurt2\n')
ofile.write(str(cavh) + '\t' + str(cav_stdh) + '\t' + str(csqh) + '\t' + str(csq_stdh) + '\t' + str(thalf) + '\t' + str(curth) + '\t' + str(curt2h) + '\n')
ofile.close()
return
#####################################################################
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
d9a39023ff5913ca3b3d1a074f52ca0bb921f5aa | 22bb398d1d9af678e25ccf39350f90f109c74256 | /tests/test_utils/output/ifabsents.py | 03eb882c4e349ece90efcb33534eaf4c040188a6 | [
"CC0-1.0"
]
| permissive | rajshruti18/biolinkml | 45a0848512e00d0ce66ad17684f26909a3ad3953 | 451e71c9d3fd11aa3b08c6a713d9ab8b127ece77 | refs/heads/master | 2023-03-14T05:57:22.399803 | 2020-08-11T15:42:49 | 2020-08-11T15:42:49 | 287,549,421 | 0 | 0 | CC0-1.0 | 2020-08-14T14:16:53 | 2020-08-14T14:16:52 | null | UTF-8 | Python | false | false | 7,050 | py | # Auto generated from ifabsents.yaml by pythongen.py version: 0.4.0
# Generation date: 2020-08-04 09:40
# Schema: ifabsent
#
# id: http://example.org/tests/ifabsent
# description:
# license: https://creativecommons.org/publicdomain/zero/1.0/
import dataclasses
import sys
from typing import Optional, List, Union, Dict, ClassVar, Any
from dataclasses import dataclass
from biolinkml.utils.slot import Slot
from biolinkml.utils.metamodelcore import empty_list, empty_dict, bnode
from biolinkml.utils.yamlutils import YAMLRoot, extended_str, extended_float, extended_int
if sys.version_info < (3, 7, 6):
from biolinkml.utils.dataclass_extensions_375 import dataclasses_init_fn_with_kwargs
else:
from biolinkml.utils.dataclass_extensions_376 import dataclasses_init_fn_with_kwargs
from biolinkml.utils.formatutils import camelcase, underscore, sfx
from rdflib import Namespace, URIRef
from biolinkml.utils.curienamespace import CurieNamespace
from biolinkml.utils.metamodelcore import Bool, ElementIdentifier, NCName, NodeIdentifier, URI, URIorCURIE, XSDDate, XSDDateTime, XSDTime
metamodel_version = "1.5.3"
# Overwrite dataclasses _init_fn to add **kwargs in __init__
dataclasses._init_fn = dataclasses_init_fn_with_kwargs
# Namespaces
SHEX = CurieNamespace('shex', 'http://www.w3.org/ns/shex#')
SKOS = CurieNamespace('skos', 'http://www.w3.org/2004/02/skos/core#')
TEST = CurieNamespace('test', 'http://example.org/test/')
XSD = CurieNamespace('xsd', 'http://www.w3.org/2001/XMLSchema#')
DEFAULT_ = TEST
# Types
class String(str):
""" A character string """
type_class_uri = XSD.string
type_class_curie = "xsd:string"
type_name = "string"
type_model_uri = TEST.String
class Integer(int):
""" An integer """
type_class_uri = XSD.integer
type_class_curie = "xsd:integer"
type_name = "integer"
type_model_uri = TEST.Integer
class Boolean(Bool):
""" A binary (true or false) value """
type_class_uri = XSD.boolean
type_class_curie = "xsd:boolean"
type_name = "boolean"
type_model_uri = TEST.Boolean
class Float(float):
""" A real number that conforms to the xsd:float specification """
type_class_uri = XSD.float
type_class_curie = "xsd:float"
type_name = "float"
type_model_uri = TEST.Float
class Double(float):
""" A real number that conforms to the xsd:double specification """
type_class_uri = XSD.double
type_class_curie = "xsd:double"
type_name = "double"
type_model_uri = TEST.Double
class Time(XSDTime):
""" A time object represents a (local) time of day, independent of any particular day """
type_class_uri = XSD.dateTime
type_class_curie = "xsd:dateTime"
type_name = "time"
type_model_uri = TEST.Time
class Date(XSDDate):
""" a date (year, month and day) in an idealized calendar """
type_class_uri = XSD.date
type_class_curie = "xsd:date"
type_name = "date"
type_model_uri = TEST.Date
class Datetime(XSDDateTime):
""" The combination of a date and time """
type_class_uri = XSD.dateTime
type_class_curie = "xsd:dateTime"
type_name = "datetime"
type_model_uri = TEST.Datetime
class Uriorcurie(URIorCURIE):
""" a URI or a CURIE """
type_class_uri = XSD.anyURI
type_class_curie = "xsd:anyURI"
type_name = "uriorcurie"
type_model_uri = TEST.Uriorcurie
class Uri(URI):
""" a complete URI """
type_class_uri = XSD.anyURI
type_class_curie = "xsd:anyURI"
type_name = "uri"
type_model_uri = TEST.Uri
class Ncname(NCName):
""" Prefix part of CURIE """
type_class_uri = XSD.string
type_class_curie = "xsd:string"
type_name = "ncname"
type_model_uri = TEST.Ncname
class Objectidentifier(ElementIdentifier):
""" A URI or CURIE that represents an object in the model. """
type_class_uri = SHEX.iri
type_class_curie = "shex:iri"
type_name = "objectidentifier"
type_model_uri = TEST.Objectidentifier
class Nodeidentifier(NodeIdentifier):
""" A URI, CURIE or BNODE that represents a node in a model. """
type_class_uri = SHEX.nonLiteral
type_class_curie = "shex:nonLiteral"
type_name = "nodeidentifier"
type_model_uri = TEST.Nodeidentifier
# Class references
@dataclass
class C1(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = TEST.C1
class_class_curie: ClassVar[str] = "test:C1"
class_name: ClassVar[str] = "c1"
class_model_uri: ClassVar[URIRef] = TEST.C1
s1: Optional[str] = True
s1p: Optional[str] = True
s2: Optional[str] = False
s2p: Optional[str] = False
slot_uri: Optional[str] = None
slot_curie: Optional[str] = None
class_uri: Optional[str] = None
class_curie: Optional[str] = None
bnode: Optional[str] = bnode()
txt: Optional[str] = "penguins\"doves"
int: Optional[str] = -1403
dfltrange: Optional[str] = None
dfltns: Optional[str] = None
# Slots
class slots:
pass
slots.s1 = Slot(uri=TEST.s1, name="s1", curie=TEST.curie('s1'),
model_uri=TEST.s1, domain=None, range=Optional[str])
slots.s1p = Slot(uri=TEST.s1p, name="s1p", curie=TEST.curie('s1p'),
model_uri=TEST.s1p, domain=None, range=Optional[str])
slots.s2 = Slot(uri=TEST.s2, name="s2", curie=TEST.curie('s2'),
model_uri=TEST.s2, domain=None, range=Optional[str])
slots.s2p = Slot(uri=TEST.s2p, name="s2p", curie=TEST.curie('s2p'),
model_uri=TEST.s2p, domain=None, range=Optional[str])
slots.slot_uri = Slot(uri=TEST.slot_uri, name="slot_uri", curie=TEST.curie('slot_uri'),
model_uri=TEST.slot_uri, domain=None, range=Optional[str])
slots.slot_curie = Slot(uri=TEST.slot_curie, name="slot_curie", curie=TEST.curie('slot_curie'),
model_uri=TEST.slot_curie, domain=None, range=Optional[str])
slots.class_uri = Slot(uri=TEST.class_uri, name="class_uri", curie=TEST.curie('class_uri'),
model_uri=TEST.class_uri, domain=None, range=Optional[str])
slots.class_curie = Slot(uri=TEST.class_curie, name="class_curie", curie=TEST.curie('class_curie'),
model_uri=TEST.class_curie, domain=None, range=Optional[str])
slots.bnode = Slot(uri=TEST.bnode, name="bnode", curie=TEST.curie('bnode'),
model_uri=TEST.bnode, domain=None, range=Optional[str])
slots.txt = Slot(uri=TEST.txt, name="txt", curie=TEST.curie('txt'),
model_uri=TEST.txt, domain=None, range=Optional[str])
slots.int = Slot(uri=TEST.int, name="int", curie=TEST.curie('int'),
model_uri=TEST.int, domain=None, range=Optional[str])
slots.dfltrange = Slot(uri=TEST.dfltrange, name="dfltrange", curie=TEST.curie('dfltrange'),
model_uri=TEST.dfltrange, domain=None, range=Optional[str])
slots.dfltns = Slot(uri=TEST.dfltns, name="dfltns", curie=TEST.curie('dfltns'),
model_uri=TEST.dfltns, domain=None, range=Optional[str]) | [
"[email protected]"
]
| |
f4145e6b8b24944fa2ee4b82009ad6b9a3c1facb | ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1 | /res/packages/scripts/scripts/client/account_helpers/settings_core/SettingsCache.py | c8feafae4411fe1391ea440ca86c25272b1f67dd | []
| no_license | webiumsk/WOT-0.9.20.0 | de3d7441c5d442f085c47a89fa58a83f1cd783f2 | 811cb4e1bca271372a1d837a268b6e0e915368bc | refs/heads/master | 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,564 | py | # 2017.08.29 21:43:24 Střední Evropa (letní čas)
# Embedded file name: scripts/client/account_helpers/settings_core/SettingsCache.py
from Event import Event
from adisp import async
from gui.ClientUpdateManager import g_clientUpdateManager
from gui.shared.utils.requesters.IntSettingsRequester import IntSettingsRequester
from account_helpers.settings_core.settings_constants import VERSION
from skeletons.account_helpers.settings_core import ISettingsCache
class SettingsCache(ISettingsCache):
def __init__(self):
self.__intSettings = IntSettingsRequester()
self.__waitForSync = False
self.onSyncStarted = Event()
self.onSyncCompleted = Event()
def init(self):
g_clientUpdateManager.addCallbacks({'intUserSettings': self._onResync})
def fini(self):
self.onSyncStarted.clear()
self.onSyncCompleted.clear()
g_clientUpdateManager.removeObjectCallbacks(self)
@property
def waitForSync(self):
return self.__waitForSync
@property
def settings(self):
return self.__intSettings
def _onResync(self, *args):
self.__invalidateData()
@async
def update(self, callback = None):
self.__invalidateData(callback)
def getSectionSettings(self, section, defaultValue = 0):
return self.__intSettings.getSetting(section, defaultValue)
def setSectionSettings(self, section, value):
self.__intSettings.setSetting(section, value)
def setSettings(self, settings):
self.__intSettings.setSettings(settings)
def getSetting(self, key, defaultValue = 0):
return self.__intSettings.getSetting(key, defaultValue)
def getVersion(self, defaultValue = 0):
return self.__intSettings.getSetting(VERSION, defaultValue)
def setVersion(self, value):
self.__intSettings.setSetting(VERSION, value)
def __invalidateData(self, callback = lambda *args: None):
def cbWrapper(*args):
self.__waitForSync = False
self.onSyncCompleted()
callback(*args)
self.__waitForSync = True
self.onSyncStarted()
import BattleReplay
if BattleReplay.g_replayCtrl.isPlaying:
cbWrapper(dict())
return
self.__intSettings.request()(cbWrapper)
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\account_helpers\settings_core\SettingsCache.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:43:24 Střední Evropa (letní čas)
| [
"[email protected]"
]
| |
a008c9830e1db80a44abf7e5e9088150db092ed9 | 24a9bbc1c8000f080958570c513d1d249c3514fd | /models/resnet_50.py | 898b31c4c982d6933630de58afae99afeb9cad85 | []
| no_license | CoderHHX/DGRL_OPFE | e74640693152ce4256dc54d3c0e4703fd6fcef4d | 40d2f1a87714d7924e0931f8f7da6487acd1634d | refs/heads/master | 2020-09-17T09:22:34.589899 | 2018-09-29T14:54:53 | 2018-09-29T14:54:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,672 | py | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import torch
import numpy as np
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']
model_urls = {
'resnet50': '/home/zhengxiawu/.torch/models/resnet50-19c8e357.pth',
}
# model_urls = {
# 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
# 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
# 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
# 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
# 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
# }
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, **kwargs):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avg_pool = nn.AvgPool2d(7, stride=1)
self.global_avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.global_max_pool = nn.AdaptiveMaxPool2d((1, 1))
self.class_fc = nn.Linear(512 * block.expansion * 2, kwargs['num_class'])
#normalze the weight with
self.is_train = bool(kwargs['is_train'])
self.saliency = str(kwargs['saliency'])
self.pool_type = str(kwargs['pool_type'])
self.scale = int(kwargs['scale'])
self.threshold = float(kwargs['threshold']) if kwargs.has_key('threshold') else 'none'
self.phase = str(kwargs['phase']) if kwargs.has_key('phase') else 'none'
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def extract_conv_feature(self,x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def forward(self, x):
#**kwargs form
#pool_type:default max_avg, can be set max, avg
#scale: int default 128
#scda:True or false, if use saliency
#is_train
x = self.extract_conv_feature(x)
if self.saliency=='scda':
scda_x = torch.sum(x,1,keepdim=True)
mean_x = torch.mean(scda_x.view(scda_x.size(0),-1),1,True)
scda_x = scda_x - mean_x
scda_x = scda_x>0
scda_x = scda_x.float()
x = x * scda_x
elif self.saliency == 'oc_mask':
object_tive_ness = torch.sum(x,1,keepdim=True)
max_object_score = 2* torch.mean(object_tive_ness.view(object_tive_ness.size(0),-1),1,True)
object_tive_ness = object_tive_ness / max_object_score
_,_, size_w, size_h = object_tive_ness.shape
prior_feature = np.indices((size_w,size_h))
prior_feature = prior_feature + 1
prior_feature = np.transpose(prior_feature, axes=(1, 2, 0))
prior_feature = prior_feature - (np.array((size_w,size_h)) / 2.)
sigma = size_h if size_h < size_w else size_w
sigma = sigma / 3.
prior_feature = np.exp(-1 * np.sum(prior_feature ** 2, axis=2) / (2 * (sigma ** 2)))
prior_feature = np.reshape(prior_feature,(1,1,size_w,size_h))
prior_feature_tensor = torch.Tensor(prior_feature).cuda()
indicate_mat = object_tive_ness + prior_feature_tensor> self.threshold
indicate_mat = indicate_mat.float()
x = x * indicate_mat
if self.phase == 'extract_conv_feature':
return x
if self.pool_type == 'max_avg':
avg_x = self.global_avg_pool(x)
avg_x = avg_x.view(avg_x.size(0), -1)
avg_x = F.normalize(avg_x,p=2,dim=1)
max_x = self.global_max_pool(x)
max_x = max_x.view(max_x.size(0), -1)
max_x = F.normalize(max_x,p=2,dim=1)
x = torch.cat((avg_x,max_x),dim=1)
x = x * self.scale
# the last fc layer can be treat as distance compute
if self.is_train:
fc_weight_relu = self.relu(self.class_fc.weight)
self.class_fc.weight.data = fc_weight_relu
x = self.class_fc(x)
return x,fc_weight_relu
return x
def resnet_50(pretrained=False,**kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
pretrained_dict = torch.load(model_urls['resnet50'])
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
# if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model | [
"[email protected]"
]
| |
647e11cca2a8aa827a45288dc54739ade950f14d | f889bc01147869459c0a516382e7b95221295a7b | /swagger_client/models/customer_data_group_extension_interface.py | 3806a3f11578732561fab75c8a5c20d9992130f3 | []
| no_license | wildatheart/magento2-api-client | 249a86f5c0289743f8df5b0324ccabd76f326512 | e6a707f85b37c6c3e4ef3ff78507a7deb8f71427 | refs/heads/master | 2021-07-14T16:01:17.644472 | 2017-10-18T13:33:08 | 2017-10-18T13:33:08 | 107,412,121 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,453 | py | # coding: utf-8
"""
Magento Community
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class CustomerDataGroupExtensionInterface(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self):
"""
CustomerDataGroupExtensionInterface - a model defined in Swagger
"""
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, CustomerDataGroupExtensionInterface):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
]
| |
56775bad4d09bedb696e213784acf2be07894eb5 | ed5961c9a3ae027a37913047bd149296955a7497 | /metrics/Evaluator.py | 2a9a61abc94289acdd8c4ee1653a21000c630eb5 | [
"MIT",
"BSD-3-Clause",
"LGPL-2.1-or-later",
"GPL-3.0-only",
"Apache-2.0"
]
| permissive | microsoft/NeuronBlocks | c3de6b1afc431521e70c04ce82b54e7a3292f3aa | 47e03e09589e86d16c609511bf875bd3e3ff3a3e | refs/heads/master | 2023-08-30T08:51:20.228137 | 2022-11-28T19:10:10 | 2022-11-28T19:10:10 | 181,388,576 | 1,308 | 195 | MIT | 2023-07-22T03:07:56 | 2019-04-15T01:01:24 | Python | UTF-8 | Python | false | false | 13,651 | py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
from sklearn import metrics
from sklearn.metrics import mean_squared_error
from .conlleval import countChunks, evaluate, to_conll_format
from .slot_tagging_metrics import get_ner_BIOES, get_ner_BIO
from settings import TaggingSchemes
import numpy as np
import re
import string
from collections import Counter
class Evaluator(object):
def __init__(self, metrics, pos_label=1, first_metric=None, tagging_scheme=None, label_indices=None):
"""
Args:
metrics:
pos_label: the positive label for auc metric
first_metric:
tagging_scheme:
label_indices: label to index dictionary, for auc@average or auc@some_type metric
"""
self.__metrics = metrics
self.__pos_label = pos_label
if first_metric is None:
self.__first_metric = metrics[0]
else:
self.__first_metric = first_metric
self.__tagging_scheme = tagging_scheme
self.__label_indices = label_indices
self.has_auc_type_specific = False # if True, the recorder needs to record the pred score of all types
supported_metrics = self.get_supported_metrics()
for metric in metrics:
if not metric in supported_metrics:
if metric.find('@') != -1:
field, target = metric.split('@')
if field != 'auc' or (self.__label_indices and (not target in self.__label_indices) and target != 'average'):
raise Exception("The metric %s is not supported. Supported metrics are: %s" % (metric, supported_metrics))
else:
self.has_auc_type_specific = True
def evaluate(self, y_true, y_pred, y_pred_pos_score=None, y_pred_scores_all=None, formatting=False):
""" evalution
Args:
y_true:
y_pred:
y_pred_pos_score:
formatting:
Returns:
"""
result = dict()
for metric in self.__metrics:
if metric == 'auc':
result[metric] = getattr(self, metric)(y_true, y_pred_pos_score)
elif metric.startswith('auc@'):
field, target = metric.split('@')
if target == 'average':
results = []
for i in range(len(y_pred_scores_all[0])):
results.append(self.auc(y_true, np.array(y_pred_scores_all)[:, i]))
result[metric] = np.mean(results)
else:
result[metric] = self.auc(y_true, np.array(y_pred_scores_all)[:, self.__label_indices[target]])
else:
result[metric] = getattr(self, metric)(y_true, y_pred)
self.__last_result = result
if formatting is True:
ret = self.format_result(result)
else:
ret = result
return ret
def compare(self, current_result, previous_result, metric=None):
"""
Args:
current_result:
previous_result:
metric:
Returns:
current better than previous: 1
current worse than previous: -1
current equal to previous: 0
"""
if previous_result is None:
return 1
if metric is None:
metric = self.__first_metric
# by default, metrics are the bigger, the better
small_better_metrics = set(['MSE', 'RMSE'])
if not metric in small_better_metrics:
if current_result > previous_result:
return 1
elif current_result < previous_result:
return -1
else:
return 0
else:
if current_result > previous_result:
return -1
elif current_result < previous_result:
return 1
else:
return 0
def get_first_metric_result(self):
return self.__last_result[self.__first_metric]
def get_supported_metrics(self):
except_methods = ["evaluate", "format_result", "get_supported_metrics", "get_first_metric_result", "normalize_answer"]
supported_metrics = []
for name in dir(self):
if callable(getattr(self, name)) and name.startswith("_") is False and not name in except_methods:
supported_metrics.append(name)
return supported_metrics
def format_result(self, result):
return "; ".join(["%s: %.6f" % (metric, result[metric]) for metric in self.__metrics])
def auc(self, y_true, y_pred_pos_score):
assert y_pred_pos_score is not None, "Prediction confidence of positive label should not be None for auc metric!"
fpr, tpr, thresholds = metrics.roc_curve(y_true, y_pred_pos_score, pos_label=self.__pos_label)
return metrics.auc(fpr, tpr)
def accuracy(self, y_true, y_pred):
return metrics.accuracy_score(y_true, y_pred)
def seq_tag_f1(self, y_ture, y_pred):
'''
:param y_ture:
:param y_pred:
:return:
'''
assert self.__tagging_scheme is not None, "Please define tagging scheme!"
sent_num = len(y_pred)
golden_full = []
predict_full = []
right_full = []
for idx in range(0, sent_num):
golden_list = y_ture[idx]
predict_list = y_pred[idx]
if self.__tagging_scheme == "BMES" or self.__tagging_scheme == "BIOES":
gold_matrix = get_ner_BIOES(golden_list)
pred_matrix = get_ner_BIOES(predict_list)
elif self.__tagging_scheme == "BIO":
gold_matrix = get_ner_BIO(golden_list)
pred_matrix = get_ner_BIO(predict_list)
else:
# raise Exception("DETECT UNKNOWN TAGGING SCHEMES! YOU CAN USE OUR SCRIPT TO CONVERT TAG SCHEME!")
raise Exception("DETECT UNKNOWN TAGGING SCHEMES!")
right_ner = list(set(gold_matrix).intersection(set(pred_matrix)))
golden_full += gold_matrix
predict_full += pred_matrix
right_full += right_ner
right_num = len(right_full)
golden_num = len(golden_full)
predict_num = len(predict_full)
if predict_num == 0:
precision = -1
else:
precision = (right_num + 0.0) / predict_num
if golden_num == 0:
recall = -1
else:
recall = (right_num + 0.0) / golden_num
if (precision == -1) or (recall == -1) or (precision + recall) <= 0.:
f_measure = -1
else:
f_measure = 2 * precision * recall / (precision + recall)
return f_measure
def seq_tag_accuracy(self, y_ture, y_pred):
'''
:param y_ture:
:param y_pred:
:return:
'''
sent_num = len(y_pred)
right_tag = 0
all_tag = 0
for idx in range(0, sent_num):
golden_list = y_ture[idx]
predict_list = y_pred[idx]
for idy in range(len(golden_list)):
if golden_list[idy] == predict_list[idy]:
right_tag += 1
all_tag += len(golden_list)
accuracy = (right_tag + 0.0) / all_tag
return accuracy
def macro_f1(self, y_true, y_pred):
""" For classification task, calculate f1-score for each label, and find their unweighted mean. This does not take label imbalance into account.
Args:
y_true:
y_pred:
Returns:
"""
return metrics.f1_score(y_true, y_pred, average='macro')
def macro_precision(self, y_true, y_pred):
""" Calculate precision for each label, and find their unweighted mean. This does not take label imbalance into account.
Args:
y_true:
y_pred:
Returns:
"""
return metrics.precision_score(y_true, y_pred, average='macro')
def macro_recall(self, y_true, y_pred):
""" Calculate recall for each label, and find their unweighted mean. This does not take label imbalance into account.
Args:
y_true:
y_pred:
Returns:
"""
return metrics.recall_score(y_true, y_pred, average='macro')
def micro_f1(self, y_true, y_pred):
""" For classification task, calculate f1-score globally by counting the total true positives, false negatives and false positives.
Args:
y_true:
y_pred:
Returns:
"""
return metrics.f1_score(y_true, y_pred, average='micro')
def f1(self, y_true, y_pred):
""" For classification task, calculate f1-score Only report results for the class specified by pos_label. This is applicable only if targets (y_{true,pred}) are binary..
Args:
y_true:
y_pred:
Returns:
"""
return metrics.f1_score(y_true, y_pred)
def micro_precision(self, y_true, y_pred):
""" Calculate precision globally by counting the total true positives, false negatives and false positives.
Args:
y_true:
y_pred:
Returns:
"""
return metrics.precision_score(y_true, y_pred, average='micro')
def micro_recall(self, y_true, y_pred):
""" Calculate recall globally by counting the total true positives, false negatives and false positives.
Args:
y_true:
y_pred:
Returns:
"""
return metrics.recall_score(y_true, y_pred, average='micro')
def weighted_f1(self, y_true, y_pred):
""" Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters ‘macro’ to account for label imbalance; it can result in an F-score that is not between precision and recall.
Args:
y_true:
y_pred:
Returns:
"""
return metrics.f1_score(y_true, y_pred, average='weighted')
def weighted_precision(self, y_true, y_pred):
""" Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters ‘macro’ to account for label imbalance; it can result in an F-score that is not between precision and recall.
Args:
y_true:
y_pred:
Returns:
"""
return metrics.precision_score(y_true, y_pred, average='weighted')
def weighted_recall(self, y_true, y_pred):
""" Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters ‘macro’ to account for label imbalance; it can result in an F-score that is not between precision and recall.
Args:
y_true:
y_pred:
Returns:
"""
return metrics.recall_score(y_true, y_pred, average='weighted')
def MSE(self, y_true, y_pred):
""" mean square error
Args:
y_true: true score
y_pred: predict score
Returns:
"""
return mean_squared_error(y_true, y_pred)
def RMSE(self, y_true, y_pred):
""" root mean square error
Args:
y_true: true score
y_pred: predict score
Returns:
"""
return np.sqrt(mean_squared_error(y_true, y_pred))
def normalize_answer(self, s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def mrc_f1(self, y_true, y_pred):
'''
compute mrc task metric F1
:param y_true: type list. ground thruth answer text
:param y_pred: type list. length is same as y_true, model output answer text.
:return: mrc task F1 score
'''
f1 = total = 0
for single_true, single_pred in zip(y_true, y_pred):
total += 1
prediction_tokens = self.normalize_answer(single_pred).split()
ground_truth_tokens = self.normalize_answer(single_true).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
continue
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 += (2*precision*recall) / (precision+recall)
return 100.0 * f1 / total
def mrc_em(self, y_true, y_pred):
'''
compute mrc task metric EM
:param y_true:
:param y_pred:
:return: mrc task EM score
'''
em = total = 0
for single_true, single_pred in zip(y_true, y_pred):
total += 1
em += (self.normalize_answer(single_true) == self.normalize_answer(single_pred))
return 100.0 * em / total
if __name__ == '__main__':
evaluator = Evaluator(['auc', 'accuracy'])
print(evaluator.get_supported_metrics())
| [
"[email protected]"
]
| |
21daa653eb1721f4470a8e23b1c4e3f4ac9d37c7 | 1b86187256acfeca198c6683324a6ba37acc378c | /scripts/telocate/telocate_run.py | 5e4b9df1e139b29933a09e26718f2cfeef85f5ff | [
"BSD-2-Clause"
]
| permissive | paa49/mcclintock | 6359e5942913a98290dcfdd6e643f18de0eb0a61 | 10fc9c563911659f34656d06091e8b240c422490 | refs/heads/master | 2022-11-16T06:18:32.217877 | 2020-07-10T17:02:02 | 2020-07-10T17:02:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,527 | py | import os
import sys
import subprocess
sys.path.append(snakemake.config['args']['mcc_path'])
import scripts.mccutils as mccutils
import config.telocate.telocate_run as config
def main():
te_gff = snakemake.input.te_gff
sam = snakemake.input.sam
ref_fasta = snakemake.input.ref
median_insert_size_file = snakemake.input.median_insert_size
log = snakemake.params.log
mccutils.log("te-locate","running TE-Locate", log=log)
with open(log,"a") as l:
l.write("TE GFF: "+te_gff+"\n")
l.write("SAM: "+sam+"\n")
l.write("reference fasta: "+ref_fasta+"\n")
telocate = snakemake.params.run_script
out_dir = snakemake.params.out_dir
sam_dir = out_dir+"/sam/"
mccutils.mkdir(sam_dir)
te_locate_sam = sam_dir+"te-locate.sam"
if os.path.exists(te_locate_sam):
os.remove(te_locate_sam)
os.symlink(sam, te_locate_sam)
os.chdir(os.path.dirname(telocate))
median_insert_size = mccutils.get_median_insert_size(median_insert_size_file)
distance = (median_insert_size * config.MIN_DISTANCE)
command = ["perl", telocate, str(config.MAX_MEM), sam_dir, te_gff, ref_fasta, out_dir, str(distance), str(config.MIN_SUPPORT_READS), str(config.MIN_SUPPORT_INDIVIDUALS)]
mccutils.run_command(command, log=log)
mccutils.run_command(["cp", out_dir+"_"+str(distance)+"_reads3_acc1.info", out_dir+"te-locate-raw.info"])
mccutils.log("te-locate", "TE-Locate complete")
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
11826ecb99f3fd727a22ab7dbcd2f75000e497b0 | 9b54e3d58447e917a238b85891020c392c4ac601 | /acmicpc/15921/15921.py | 2ab1ca2cb457c55845667d86a94d84560f016d91 | [
"MIT"
]
| permissive | love-adela/algorithm-ps | ea0ebcd641a4c309348b389b8618daa83973f4b2 | c92d105d8ad344def001160367115ecf99d81c0d | refs/heads/master | 2023-05-11T03:37:11.750692 | 2023-04-30T17:31:30 | 2023-04-30T17:31:30 | 174,651,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | # 연습기록들의 평균값 / 연습 기록들 중 하나를 균일한 확률로 뽑을 때의 기댓값
n = int(input())
if n == 0:
print('divide by zero')
else:
print('1.00')
| [
"[email protected]"
]
| |
97526d54617bd8d4a7ba932dd17601495af62fa6 | b1b77bb1ed47586f96d8f2554a65bcbd0c7162cc | /SPOTIFY/crtauth/crtauth/msgpack_protocol.py | cc458fe3d3c4303eb316a3f064fa1e41a1a39e25 | [
"Apache-2.0"
]
| permissive | DanHefrman/stuff | b3624d7089909972ee806211666374a261c02d08 | b98a5c80cfe7041d8908dcfd4230cf065c17f3f6 | refs/heads/master | 2023-07-10T09:47:04.780112 | 2021-08-13T09:55:17 | 2021-08-13T09:55:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,988 | py | # Copyright (c) 2014-2017 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import hmac
import io
import msgpack
from crtauth import exceptions
from crtauth.constant_time_compare import constant_time_compare
PROTOCOL_VERSION = 1
HMAC_HASH_ALGORITHM = hashlib.sha256
HMAC_SIZE = HMAC_HASH_ALGORITHM().digest_size
class TypeInfo(object):
"""
TypeInfo instances contains extra information about the type of a field
"""
def __init__(self, data_type, size=None, binary=False):
self._data_type = data_type
self._size = size
self._packer = msgpack.Packer(use_bin_type=binary)
def validate(self, data, name):
if not isinstance(data, self._data_type):
raise ValueError("Value for field %s should have been of %s"
% (name, self._data_type))
def pack(self, value, stream):
stream.write(self._packer.pack(value))
class MessageBase(object):
"""
Base class with common functionality for Message and AuthenticatedMessage
"""
__fields__ = None
__magic__ = None
def __init__(self, **kw):
if len(kw) != len(self.__fields__):
raise RuntimeError("Wrong number of constructor parameters, "
"expected %d got %d",
len(self.__fields__), len(kw))
for key, _ in self.__fields__:
val = kw.get(key, None)
if val is None:
raise RuntimeError(
"Missing required argument '%s'" % key)
setattr(self, key, val)
def _do_serialize(self):
if self.__magic__ is None or self.__fields__ is None:
raise RuntimeError(
"Serialization can only be performed on classes implementing "
"__fields__ and __magic__")
buf = io.BytesIO()
msgpack.pack(PROTOCOL_VERSION, buf)
msgpack.pack(self.__magic__, buf)
for name, type_info in self.__fields__:
value = getattr(self, name)
type_info.validate(value, name)
type_info.pack(value, buf)
return buf
@classmethod
def _do_deserialize(cls, serialized):
stream = io.BytesIO(serialized)
unpacker = msgpack.Unpacker(stream)
version = unpacker.unpack()
if version != PROTOCOL_VERSION:
raise exceptions.ProtocolError(
"Wrong version, expected %d got %d" % (PROTOCOL_VERSION,
version))
magic = unpacker.unpack()
if magic != cls.__magic__:
raise exceptions.ProtocolError(
"Wrong magic, expected %d got %d" % (cls.__magic__, magic))
kw = dict()
for name, type_info in cls.__fields__:
kw[name] = unpacker.unpack()
return cls(**kw), unpacker
@classmethod
def deserialize(cls, serialized):
return cls._do_deserialize(serialized)[0]
class Message(MessageBase):
"""
Base class for messages not authenticated with a HMAC code
"""
def serialize(self):
return self._do_serialize().getvalue()
class AuthenticatedMessage(MessageBase):
"""
Base class for messages authenticated with a HMAC code
"""
def serialize(self, hmac_secret):
"""
Serialises this instance into the serialization format and appends
a SHA256 HMAC at the end computed using the provided hmac_secret
"""
buf = self._do_serialize()
offset = buf.tell()
buf.seek(0)
mac = hmac.new(hmac_secret, buf.read(), HMAC_HASH_ALGORITHM)
buf.seek(offset)
buf.write(msgpack.Packer(use_bin_type=True).pack(mac.digest()))
return buf.getvalue()
@classmethod
def deserialize_authenticated(cls, serialized, hmac_secret):
"""
Deserialises instances of this class, validating the HMAC appended
at the end using the provided hmac_secret
"""
instance, unpacker = cls._do_deserialize(serialized)
# the extra 2 bytes taken off is the serialization overhead of byte
# strings shorter than 256 bytes.
calculated_mac = hmac.new(hmac_secret, serialized[:-HMAC_SIZE-2],
HMAC_HASH_ALGORITHM).digest()
stored_mac = unpacker.unpack()
if not constant_time_compare(calculated_mac, stored_mac):
# TODO better exception, perhaps?
raise exceptions.BadResponse("Invalid authentication code")
return instance
class Challenge(AuthenticatedMessage):
"""
A challenge.
"""
__magic__ = ord('c')
__fields__ = (
("unique_data", TypeInfo(str, 20, binary=True)),
("valid_from", TypeInfo(int)),
("valid_to", TypeInfo(int)),
("fingerprint", TypeInfo(str, 6, binary=True)),
("server_name", TypeInfo(str)),
("username", TypeInfo(str))
)
class Response(Message):
"""
A response (a copy of the challenge plus a signature)
"""
__magic__ = ord('r')
__fields__ = (
("challenge", TypeInfo(str, binary=True)),
("signature", TypeInfo(str, binary=True)),
)
class Token(AuthenticatedMessage):
"""
Represents a token used to authenticate the user
"""
__magic__ = ord("t")
__fields__ = (
("valid_from", TypeInfo(int)),
("valid_to", TypeInfo(int)),
("username", TypeInfo(str))
)
| [
"[email protected]"
]
| |
005031d4c1b14983cc629f1a19fc91dbd91a81a9 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02725/s568490525.py | 2affcb786486d1c8e87015b5691b632584056eb6 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | K,N=map(int,input().split())
A=list(map(int,input().split()))
B=[0]*N
for i in range(N-1):
B[i]=A[i+1]-A[i]
B[N-1]=K-A[N-1]+A[0]
print(K-max(B)) | [
"[email protected]"
]
| |
1cbaa703c33b556e2bd7081dd2bb80906cb5e97f | 55540f3e86f1d5d86ef6b5d295a63518e274efe3 | /toolchain/riscv/MSYS/python/Tools/scripts/pyvenv.py | f84f4c78c804cdf82c673b71cf914dfb40eba39c | [
"Apache-2.0",
"bzip2-1.0.6",
"LicenseRef-scancode-proprietary-license",
"OpenSSL",
"Python-2.0",
"LicenseRef-scancode-newlib-historical",
"TCL",
"LicenseRef-scancode-warranty-disclaimer"
]
| permissive | bouffalolab/bl_iot_sdk | bc5eaf036b70f8c65dd389439062b169f8d09daa | b90664de0bd4c1897a9f1f5d9e360a9631d38b34 | refs/heads/master | 2023-08-31T03:38:03.369853 | 2023-08-16T08:50:33 | 2023-08-18T09:13:27 | 307,347,250 | 244 | 101 | Apache-2.0 | 2023-08-28T06:29:02 | 2020-10-26T11:16:30 | C | UTF-8 | Python | false | false | 454 | py | #!/usr/bin/env python3
if __name__ == '__main__':
import sys
import pathlib
executable = pathlib.Path(sys.executable or 'python3').name
print('WARNING: the pyenv script is deprecated in favour of '
f'`{executable} -m venv`', file=sys.stderr)
rc = 1
try:
import venv
venv.main()
rc = 0
except Exception as e:
print('Error: %s' % e, file=sys.stderr)
sys.exit(rc)
| [
"[email protected]"
]
| |
165628f55fbde0ee362db96cde96f48396556eb5 | fae2430e2e7717704f9c454f75ec1cd17e0831a9 | /tf_quant_finance/math/integration/integration_test.py | 803aa92c54cf378a36efe302e9b332d8ccac46f8 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla"
]
| permissive | gsamarakoon/tf-quant-finance | ae00f12ab5f8bbf85c515a53379db234bd619802 | 7873ea202ec50059014836b950881239e7d154fa | refs/heads/master | 2020-09-06T06:42:49.670227 | 2019-11-07T16:20:44 | 2019-11-07T16:21:10 | 220,354,269 | 1 | 0 | Apache-2.0 | 2019-11-08T00:28:13 | 2019-11-08T00:28:12 | null | UTF-8 | Python | false | false | 6,555 | py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Tests for numeric integration methods."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from scipy import special
import tensorflow as tf
import tf_quant_finance as tff
tff_int = tff.math.integration
IntegrationTestCase = collections.namedtuple('IntegrationTestCase', [
'func',
'lower',
'upper',
'antiderivative',
])
# pylint:disable=g-long-lambda
BASIC_TEST_CASES = [
IntegrationTestCase(
func=lambda x: tf.exp(2 * x + 1),
lower=1.0,
upper=3.0,
antiderivative=lambda x: np.exp(2 * x + 1) / 2,
),
IntegrationTestCase(
func=lambda x: x**5,
lower=-10.0,
upper=100.0,
antiderivative=lambda x: x**6 / 6,
),
IntegrationTestCase(
func=lambda x: (x**3 + x**2 - 4 * x + 1) / (x**2 + 1)**2,
lower=0.0,
upper=10.0,
antiderivative=lambda x: sum([
2.5 / (x**2 + 1),
0.5 * np.log(x**2 + 1),
np.arctan(x),
]),
),
IntegrationTestCase(
func=lambda x: (tf.sinh(2 * x) + 3 * tf.sinh(x)) /
(tf.cosh(x)**2 + 2 * tf.cosh(0.5 * x)**2),
lower=2.0,
upper=4.0,
antiderivative=lambda x: sum([
np.log(np.cosh(x)**2 + np.cosh(x) + 1),
(4 / np.sqrt(3)) * np.arctan((1 + 2 * np.cosh(x)) / np.sqrt(3.0)),
]),
),
IntegrationTestCase(
func=lambda x: tf.exp(2 * x) * tf.math.sqrt(tf.exp(x) + tf.exp(2 * x)),
lower=2.0,
upper=4.0,
antiderivative=lambda x: sum([
np.sqrt((np.exp(x) + np.exp(2 * x))**3) / 3,
-(1 + 2 * np.exp(x)) * np.sqrt(np.exp(x) + np.exp(2 * x)) / 8,
np.log(np.sqrt(1 + np.exp(x)) + np.exp(0.5 * x)) / 8,
]),
),
IntegrationTestCase(
func=lambda x: tf.exp(-x**2),
lower=0.0,
upper=1.0,
antiderivative=lambda x: 0.5 * np.sqrt(np.pi) * special.erf(x),
),
]
TEST_CASE_RAPID_CHANGE = IntegrationTestCase(
func=lambda x: 1.0 / tf.sqrt(x + 1e-6),
lower=0.0,
upper=1.0,
antiderivative=lambda x: 2.0 * np.sqrt(x + 1e-6),
)
class IntegrationTest(tf.test.TestCase):
def _test_batches_and_types(self, integrate_function, args):
"""Checks handling batches and dtypes."""
dtypes = [np.float32, np.float64, np.complex64, np.complex128]
a = [[0.0, 0.0], [0.0, 0.0]]
b = [[np.pi / 2, np.pi], [1.5 * np.pi, 2 * np.pi]]
a = [a, a]
b = [b, b]
k = tf.constant([[[[1.0]]], [[[2.0]]]])
func = lambda x: tf.cast(k, dtype=x.dtype) * tf.sin(x)
ans = [[[1.0, 2.0], [1.0, 0.0]], [[2.0, 4.0], [2.0, 0.0]]]
results = []
for dtype in dtypes:
lower = tf.constant(a, dtype=dtype)
upper = tf.constant(b, dtype=dtype)
results.append(integrate_function(func, lower, upper, **args))
results = self.evaluate(results)
for i in range(len(results)):
assert results[i].dtype == dtypes[i]
assert np.allclose(results[i], ans, atol=1e-3)
def _test_accuracy(self, integrate_function, args, test_case, max_rel_error):
func = test_case.func
lower = tf.constant(test_case.lower, dtype=tf.float64)
upper = tf.constant(test_case.upper, dtype=tf.float64)
exact = test_case.antiderivative(
test_case.upper) - test_case.antiderivative(test_case.lower)
approx = integrate_function(func, lower, upper, **args)
approx = self.evaluate(approx)
assert np.abs(approx - exact) <= np.abs(exact) * max_rel_error
def _test_gradient(self, integrate_function, args):
"""Checks that integration result can be differentiated."""
# We consider I(a) = int_0^1 cos(ax) dx.
# Then dI/da = (a*cos(a) - sin(a))/a^2.
def integral(a):
return integrate_function(
lambda x: tf.cos(a * x), 0.0, 1.0, dtype=tf.float64, **args)
a = tf.constant(0.5, dtype=tf.float64)
di_da = tff.math.fwd_gradient(integral, a)
true_di_da = lambda a: (a * np.cos(a) - np.sin(a)) / (a**2)
self.assertAllClose(self.evaluate(di_da), true_di_da(0.5))
def test_integrate_batches_and_types(self):
self._test_batches_and_types(tff_int.integrate, {})
for method in tff_int.IntegrationMethod:
self._test_batches_and_types(tff_int.integrate, {'method': method})
def test_integrate_accuracy(self):
for test_case in BASIC_TEST_CASES:
self._test_accuracy(tff_int.integrate, {}, test_case, 1e-8)
for method in tff_int.IntegrationMethod:
self._test_accuracy(tff_int.integrate, {'method': method}, test_case,
1e-8)
def test_integrate_gradient(self):
for method in tff_int.IntegrationMethod:
self._test_gradient(tff_int.integrate, {'method': method})
def test_integrate_int_limits(self):
for method in tff_int.IntegrationMethod:
result = tff_int.integrate(tf.sin, 0, 1, method=method, dtype=tf.float64)
result = self.evaluate(result)
self.assertAllClose(0.459697694, result)
def test_simpson_batches_and_types(self):
self._test_batches_and_types(tff_int.simpson, {})
def test_simpson_accuracy(self):
for test_case in BASIC_TEST_CASES:
self._test_accuracy(tff_int.simpson, {}, test_case,
1e-8)
def test_simpson_rapid_change(self):
self._test_accuracy(tff_int.simpson,
{'num_points': 1001}, TEST_CASE_RAPID_CHANGE, 2e-1)
self._test_accuracy(tff_int.simpson,
{'num_points': 10001}, TEST_CASE_RAPID_CHANGE, 3e-2)
self._test_accuracy(tff_int.simpson,
{'num_points': 100001}, TEST_CASE_RAPID_CHANGE, 5e-4)
self._test_accuracy(tff_int.simpson,
{'num_points': 1000001}, TEST_CASE_RAPID_CHANGE, 3e-6)
def test_simpson_gradient(self):
self._test_gradient(tff_int.simpson, {})
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
]
| |
b8cd5d4e2ecee18362ea956d197bd10a0d9c3445 | 856a1d6c6737ee3d42888831d7da3142ec11d75a | /cpt2wgt.py | 0a2649be8d41ac38c2172f78249af3222274b850 | [
"MIT"
]
| permissive | kevincao91/Tools | 81d499dcac04987724142d6c7e77fa2629707f3c | 9de429d066b4c601afd32ba09360995297736d2f | refs/heads/master | 2023-05-12T07:41:26.736274 | 2023-05-05T08:29:49 | 2023-05-05T08:29:49 | 225,775,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | py | import pickle
import os,sys
cptPath=sys.argv[1]
wgtPath=cptPath
with open(cptPath,'rb') as f:
data = pickle.load(f,encoding='latin1')
keys = data['blobs'].keys()
# needs = ['conv','res','fpn',]
not_needs = ['fc1000','momentum']
output_dic={'blobs':{}}
print('filtered out:')
for key in keys:
keep = True
# for need in needs:
# if key.startswith(need):
# keep=True
for not_need in not_needs:
if not_need in key:
keep=False
break
if keep:
# if 'score' in key:
# print(key)
output_dic['blobs'][key] = data['blobs'][key]
#print(key)
else:
print(' - '+key)
#print(output_dic['blobs'].keys())
with open(wgtPath,'wb') as f:
pickle.dump(output_dic,f,protocol=0)
| [
"[email protected]"
]
| |
1891b8efa206d3ac3a27653452945bb1c3676750 | 84ebacfa7c91348f1275f3945f7ee3567b91d458 | /MusicObjectDetector/image_color_inverter.py | 31eb7267ffda5824a9d056a90cab11ed330bdde6 | [
"MIT",
"Apache-2.0"
]
| permissive | apacha/MusicObjectDetector-TF | 4f40f639ac1240150dcbf5e489569f80878d2008 | d32cf96575c995f4d5b634e4dbb876845e3bcd2a | refs/heads/master | 2022-11-01T03:33:31.589657 | 2022-10-09T20:26:52 | 2022-10-09T20:26:52 | 112,597,906 | 83 | 32 | Apache-2.0 | 2022-10-09T20:26:53 | 2017-11-30T10:25:08 | Python | UTF-8 | Python | false | false | 1,621 | py | import argparse
import os
from glob import glob
from PIL import Image, ImageOps
from tqdm import tqdm
class ImageColorInverter:
""" Class for inverting white-on-black images to black-on-white images """
def __init__(self) -> None:
super().__init__()
def invert_images(self, image_directory: str, image_file_ending: str):
"""
In-situ converts the white on black images of a directory to black on white images
:param image_directory: The directory, that contains the images
:param image_file_ending: The pattern for finding files in the image_directory
"""
image_paths = [y for x in os.walk(image_directory) for y in glob(os.path.join(x[0], image_file_ending))]
for image_path in tqdm(image_paths, desc="Inverting all images in directory {0}".format(image_directory)):
white_on_black_image = Image.open(image_path).convert("L")
black_on_white_image = ImageOps.invert(white_on_black_image)
black_on_white_image.save(os.path.splitext(image_path)[0] + ".png")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--image_directory",
type=str,
default="../data/fornes_raw",
help="The directory, where a dataset can be found, that needs to be inverted, e.g. the original Fornés dataset")
parser.add_argument("--image_file_ending", type=str, default="*.bmp", )
flags, unparsed = parser.parse_known_args()
image_inverter = ImageColorInverter()
image_inverter.invert_images(flags.image_directory, flags.image_file_ending)
| [
"[email protected]"
]
| |
837cfc52b7ee3e3a72063c4fa5e18a1c4da0b41c | e97ddcfba5fe4b002fe86b960f1c696aa87c10c0 | /tests/test_ec2/test_vm_import.py | ef3e5e1a5702deac471b36f73ecf21d46df530d6 | [
"Apache-2.0"
]
| permissive | flozano/moto | 5488bc0849f9c767c7ed1af35f358e3a1043c6dd | dacf9afd2d3346ce2a23dfa6a2621fe69aba0b03 | refs/heads/master | 2021-01-21T00:56:53.527482 | 2013-07-17T15:31:31 | 2013-07-17T15:31:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | import boto
import sure # flake8: noqa
from moto import mock_ec2
@mock_ec2
def test_vm_import():
pass
| [
"[email protected]"
]
| |
a6170160382a184522bd7e3775027053bd5cd7db | 364ec3089ac2dcdab887518ac1b816f1c0d2858e | /Fase10/Desafios/Desafio_032.py | b67d2c478d6671e29f0745ef3f89062e5b767c47 | []
| no_license | loc-dev/CursoEmVideo-Python | 20488925da782677b9849944f9bbfd0d862e2b8f | d1eafebbfa88ca70ea0681e45edce6924a9c26d5 | refs/heads/master | 2022-12-05T22:35:23.056806 | 2020-08-26T20:20:00 | 2020-08-26T20:20:00 | 255,226,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | # Fase 10 - Condições ( Parte 1 )
# Desafio 32
# Faça um programa que leia três números
# e mostre qual é maior e qual é o menor.
n1 = int(input('Digite o primeiro número: '))
n2 = int(input('Digite o segundo número: '))
n3 = int(input('Digite o terceiro número: '))
if n1 > n2:
if n1 > n3:
print('O maior número é {}'.format(n1))
print('O menor número é {}'.format(n3))
else:
print('O maior número é {}'.format(n3))
print('O menor número é {}'.format(n2))
else:
if n2 > n3:
print('O maior número é {}'.format(n2))
print('O menor número é {}'.format(n1))
else:
print('O maior número é {}'.format(n3))
print('O menor número é {}'.format(n1))
| [
"[email protected]"
]
| |
ee2b759f34b601d9e0a5b19d7e40fdfbbb995092 | 4cc285b0c585241ff4404087e6fbb901195639be | /NeuralNetworkNumbers/venv/Lib/site-packages/tensorflow/_api/v2/compat/v2/autodiff/__init__.py | 9aff8afae8ce5d4673c70fa11a102c5053d8bfc3 | []
| no_license | strazhg/NeuralNetworksPython | 815542f4ddbb86e918e657f783158f8c078de514 | 15038e44a5a6c342336c119cdd2abdeffd84b5b1 | refs/heads/main | 2023-04-16T18:51:29.602644 | 2021-04-27T14:46:55 | 2021-04-27T14:46:55 | 361,944,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:460dbd09fb995b71b073b0f30f23ef0db5586ce34f01294ee54ca7d366f974a1
size 413
| [
"[email protected]"
]
| |
820b9f97995a650d7f84aa1abfefe03c929c296e | 4ff8676136167cdd81d7a983272102fff86360e8 | /python/404. 左叶子之和.py | 3f7495600a4a204120f687388c5274a274da8162 | []
| no_license | geniuscynic/leetcode | 0ec256af2377d19fee22ce736462a7e95e3f4e67 | 379a8f27f8213951ee8be41bd56598036995d267 | refs/heads/master | 2023-07-19T07:22:20.001770 | 2021-09-07T14:50:40 | 2021-09-07T14:50:40 | 297,277,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,137 | py | import sys
from collections import defaultdict
from collections import Counter
from collections import deque
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def helper(self, root: TreeNode, parentNode):
if not root:
return 0
if not root.left and not root.right and parentNode.left and parentNode.left == root:
return root.val
return self.helper(root.left, root) + self.helper(root.right, root)
def sumOfLeftLeaves(self, root: TreeNode) -> int:
return self.helper(root, root)
def isleaf(self, root: TreeNode):
if not root.left and not root.right:
return True
return False
def sumOfLeftLeaves_1(self, root: TreeNode) -> int:
if not root:
return 0
#if not root.left and not root.right:
#return root.val
res = 0
if root.left:
res += root.left.val if self.isleaf(root.left) else self.sumOfLeftLeaves_1(root.left)
if root.right and not self.isleaf(root.right):
res += self.sumOfLeftLeaves_1(root.right)
return res
def coverttoTree():
ls =deque([-6,8,-4,8,-5,-1,None,-9,9,8,8,None,None,-5,6,None,None,None,-4,None,4,None,None,8,8,None,None,None,5,None,None,None,None,None,-9])
temp = TreeNode(ls.popleft())
res = deque()
res.append(temp)
while ls:
left = ls.popleft()
right = ls.popleft()
node = res.popleft()
#print(node.val, left, right)
if left != None:
node.left = TreeNode(left)
res.append(node.left)
if right != None:
node.right = TreeNode(right)
res.append(node.right)
return temp
if __name__ == "__main__":
solution = Solution()
nums1 = coverttoTree()
m = TreeNode(2)
nums2 = TreeNode(4)
n = 3
result = solution.sumOfLeftLeaves_1(nums1)
#print(solution.ls)
print(result) | [
"[email protected]"
]
| |
b77a8520217da5787c934d1d79e6e0b831e46e6d | 039c5b793ace774bb815f4061a273ff098efd475 | /service/venv/bin/easy_install-3.5 | b8d6d4e653a8969421dd255380f763334d7b2df5 | []
| no_license | zzyzx4/soft | b7872a1c1e2dc91912f22aaaf96f2cedaf1423c1 | 264c399ddef2b55efd8a1a8b796320f72c6dec7c | refs/heads/master | 2022-12-16T20:50:45.512689 | 2019-07-01T11:38:12 | 2019-07-01T11:38:12 | 184,214,960 | 0 | 0 | null | 2022-12-08T05:07:18 | 2019-04-30T07:38:24 | null | UTF-8 | Python | false | false | 446 | 5 | #!/home/user/PycharmProjects/service/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.5'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.5')()
)
| [
"[email protected]"
]
| |
a6fdcba7f9380ceaab8bb1fef39cbc7f2713c220 | 8ac36a81c150432a989ac20c622d41f3e0d88625 | /recruitment/recruitment/doctype/type/type.py | 12aca933d5ba6247e8a6ade2221095ece4b498af | [
"MIT"
]
| permissive | asoral/recruitment | ed85fd4ef2fa7f16ec0098cb80dd67e792fc3ead | bcfdfd9ffe6b493cc79565b0bc1055bee6299645 | refs/heads/master | 2021-01-04T10:33:08.573635 | 2020-01-09T13:15:12 | 2020-01-09T13:15:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, VHRS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Type(Document):
pass
| [
"[email protected]"
]
| |
23f246e57df6bb3cbf79901d0d81e4121278878e | 4ce0f35c6aa01f5041a11979a8b5662d8ad08962 | /learning_machine/brain.py | 27f04210e42fe2f21713eb36f846e0542c11fc30 | []
| no_license | lanlanzky/tt | f125785b00b51774c9033492117305dfba19fb8f | 4666af6a250a48200f5af9ef9692da53bbfcd79d | refs/heads/master | 2016-09-06T02:19:18.492453 | 2014-09-01T13:26:55 | 2014-09-01T13:26:55 | 23,542,631 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,895 | py | #coding=utf8
from numpy import *
from django.http import HttpResponse
from django.shortcuts import render_to_response
from stock.models import New_stock
from pybrain.datasets import SupervisedDataSet
from pybrain.tools.shortcuts import buildNetwork
#归一化函数处理
def normal(record):
return [ "%.5f" % round(float((i-min(record)))/(max(record)-min(record)),4) for i in record]
#返归一化
def backnormal(backdata,outdata):
large=max(backdata)
small=min(backdata)
bizhi=large-small
for i in range(len(outdata)):
for j in range(len(outdata[1])):
outdata[i][j]=outdata[i][j]*bizhi+small
return outdata
#实验数据归一化处理
def newalldate(alldate,len):
newalldate=[]
allopen=[]
allhigh=[]
alllow=[]
allclose=[]
allvolumn=[]
alladjclose=[]
for date in alldate:
allopen.append(date.open)
allhigh.append(date.high)
alllow.append(date.low)
allclose.append(date.close)
allvolumn.append(date.volume)
alladjclose.append(date.adjclose)
newallopen=normal([ float(i) for i in allopen])
newallhigh=normal([ float(i) for i in allhigh])
newalllow=normal([ float(i) for i in alllow])
newallclose=normal([ float(i) for i in allclose])
newallvolume=normal([ float(i) for i in allvolumn])
newalladjclose=normal([ float(i) for i in alladjclose])
for i in range(len):
new=[]
new.append(newallopen[i])
new.append(newallhigh[i])
new.append(newalllow[i])
new.append(newallclose[i])
new.append(newallvolume[i])
new.append(newalladjclose[i])
newalldate.append(new)
return newalldate
# 用神经网络来预测最大值
# 用神经网络来预测最小值
| [
"[email protected]"
]
| |
15a49b8d9a456333def2e0f3c0135a0c1957b1bc | 529833339de2d1f78ec79d4bbe7e6f174fd66779 | /alignments/select.py | 0387674d35b0808a4fa574902bfe1447bb5ef0f1 | []
| no_license | standage/EmexAssemblyMay2016 | 01f98c69b481e9c7670d35a82c62628b53747927 | 7a22a17fa0ff6b28262b5da5e906a9554862bcac | refs/heads/master | 2021-01-18T10:44:31.587771 | 2016-05-20T16:20:44 | 2016-05-20T16:20:44 | 59,306,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | #!/usr/bin/env python
import sys
from __future__ import print_function
def parse(data):
name, seq = None, []
for line in data:
line = line.rstrip()
if line.startswith('>'):
if name:
yield (name, ''.join(seq))
name, seq = line, []
else:
seq.append(line)
if name:
yield (name, ''.join(seq))
if __name__ == '__main__':
for defline, seq in parse(sys.stdin):
if len(seq) > 250000:
print(defline)
print(seq)
| [
"[email protected]"
]
| |
c73e7e538899e34cc1dd877afb850c40d7e3a7f6 | e8fa46e0e5318c229a49b2803910e12e4d29884e | /interviewbit/Hashing/2-SumBook.py | 8bd2842ecf7da92e021b911c43628a7a180a5bea | []
| no_license | igorbragaia/algorithms | e6bc71b0612a65b2650c259aa2cdec593b9f6c53 | 0b4204c5a11d736c7299bd8c485e325eed630a19 | refs/heads/master | 2021-12-12T13:49:13.226739 | 2021-11-27T02:51:23 | 2021-11-27T02:51:23 | 106,027,078 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | class Solution:
# @param A : tuple of integers
# @param B : integer
# @return a list of integers
def twoSum(self, A, B):
lista = []
new_hash = {}
for i in range(len(A)):
if A[i] not in new_hash:
new_hash[A[i]] = [i]
else:
new_hash[A[i]].append(i)
for i in range(len(A)):
if B - A[i] in new_hash:
temp = [x for x in new_hash[B - A[i]] if x > i]
if len(temp) > 0:
lista.append((min(temp) + 1, i +1))
lista = sorted(lista)
if lista != []:
return (lista[0][1], lista[0][0])
return lista | [
"[email protected]"
]
| |
6e84d40880efd0710dc18b037665f1bc62c15700 | f523e7bdd7f616267b82a7f00f2b7cae132dc6b9 | /dicodile/utils/plot_config.py | 402d82f7377e42f69177aa8ef32b9abae42dcd3c | [
"BSD-3-Clause"
]
| permissive | tomMoral/dicodile | 2d7da76be7d32fb05502cbb358fcda0018e5c00c | 5a64fbe456f3a117275c45ee1f10c60d6e133915 | refs/heads/main | 2023-05-25T11:58:05.596455 | 2023-05-19T14:35:04 | 2023-05-19T14:35:04 | 167,703,861 | 17 | 8 | BSD-3-Clause | 2023-05-19T14:35:06 | 2019-01-26T15:26:24 | Python | UTF-8 | Python | false | false | 742 | py |
STYLES = {
'lgcd': {
'color': 'C1',
'linestyle': 'o-',
'hatch': '//',
'label': 'LGCD',
'label_p': 'DiCoDiLe$_Z$'
},
'greedy': {
'color': 'C0',
'linestyle': 's-',
'hatch': None,
'label': 'Greedy',
'label_p': 'Dicod'
},
'cyclic': {
'color': 'C2',
'linestyle': '^-',
'hatch': None,
'label': 'Cyclic',
'label_p': 'Cyclic'
},
}
def get_style(name, *keys, parallel=False):
all_style = STYLES[name]
style = {
'label': all_style['label_p'] if parallel else all_style['label'],
'color': all_style['color']
}
for k in keys:
style[k] = all_style[k]
return style
| [
"[email protected]"
]
| |
075948fe95a02e474ee41679d278ff9a4a2253ec | 6ac0aeea8229c4e2c7a041e85c3afeeb106c6b01 | /mark_big_words.py | acf70f3d66db359248c9b464095c9d13919c5604 | []
| no_license | waiteb15/py3intro | 325dafaaa642052280d6c050eacf8b406b40e01d | 68b30f147e7408220490a46d3e595acd60513e9e | refs/heads/master | 2020-03-27T10:50:25.928836 | 2019-02-28T21:47:11 | 2019-02-28T21:47:11 | 146,448,412 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | #!/usr/bin/env python
import re
input_file_name = 'DATA/parrot.txt'
output_file_name = 'bigwords.txt'
pattern = r'\w{8,}'
def doit(m):
return f"**{m.group(0)}**"
with open(input_file_name) as parrot_in:
with open(output_file_name, 'w') as bigwords_out:
text = parrot_in.read()
new_text = re.sub(pattern, doit, text)
bigwords_out.write(new_text)
| [
"[email protected]"
]
| |
93bff11c5085d5d9b492a51224c4a331395ffe4b | f6c6e0ebc18b7b1a28c23367f62c960e86194c88 | /pythonmisc/qimage2ndarray/qt_driver.py | 483cd287af1976d6c665860fa84d3304ece7273d | []
| no_license | TheGrim1/python_work | 9316d6fbb71a4be9bd901f104e939949dfd91174 | 5b34277aed4c06b62276644160e0aa97a4260233 | refs/heads/master | 2021-01-11T13:54:54.366575 | 2019-03-12T12:38:39 | 2019-03-12T12:38:39 | 94,876,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,663 | py | # Copyright 2014-2014 Hans Meine <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains a wrapper around three different Qt python bindings.
It will dynamically decide which one to use:
* First, the environment variable QT_DRIVER is checked
(may be one of 'PyQt5', 'PyQt4', 'PySide', 'PythonQt').
* If unset, previously imported binding modules are detected (in sys.modules).
* If no bindings are loaded, the environment variable QT_API is checked
(used by ETS and ipython, may be 'pyside' or 'pyqt').
In order to have compatible behavior between the different bindings,
PyQt4 (if used) is configured as follows::
sip.setapi("QString", 2)
sip.setapi("QVariant", 2)
Furthermore, there is a 'getprop' function that solves the following
problem: PythonQt exports Qt properties as Python properties *and*
gives the precedence over getters with the same name. Instead of
calling getters with parentheses (which must not be used in PythonQt,
but are required in PyQt and PySide), one may e.g. write
`getprop(widget.width)`.
"""
import sys, os
def getprop_PythonQt(prop):
"""getprop(property_or_getter)
Used on getters that have the same name as a corresponding
property. For PythonQt, this version will just return the
argument, which is assumed to be (the value of) a python property
through which PythonQt exposes Qt properties."""
return prop
def getprop_other(getter):
"""getprop(property_or_getter)
Used on getters that have the same name as a corresponding
property. For Qt bindings other than PythonQt, this version will
return the result of calling the argument, which is assumed to be
a Qt getter function. (With PythonQt, properties override getters
and no calling must be done.)"""
return getter()
class QtDriver(object):
DRIVERS = ('PyQt5', 'PyQt4', 'PySide', 'PythonQt')
DEFAULT = 'PyQt4'
@classmethod
def detect_qt(cls):
for drv in cls.DRIVERS:
if drv in sys.modules:
return drv
if '_PythonQt' in sys.modules:
return 'PythonQt'
return None
def name(self):
return self._drv
def getprop(self):
return getprop_PythonQt if self._drv == 'PythonQt' else getprop_other
def __init__(self, drv = os.environ.get('QT_DRIVER')):
"""Supports QT_API (used by ETS and ipython)"""
if drv is None:
drv = self.detect_qt()
if drv is None:
drv = os.environ.get('QT_API')
if drv is None:
drv = self.DEFAULT
drv = {'pyside' : 'PySide', 'pyqt' : 'PyQt4', 'pyqt5' : 'PyQt5'}.get(drv, drv) # map ETS syntax
assert drv in self.DRIVERS
self._drv = drv
@staticmethod
def _initPyQt4():
"""initialize PyQt4 to be compatible with PySide"""
if 'PyQt4.QtCore' in sys.modules:
# too late to configure API
pass
else:
import sip
sip.setapi("QString", 2)
sip.setapi("QVariant", 2)
@staticmethod
def requireCompatibleAPI():
"""If PyQt4's API should be configured to be compatible with PySide's
(i.e. QString and QVariant should not be explicitly exported,
cf. documentation of sip.setapi()), call this function to check that
the PyQt4 was properly imported. (It will always be configured this
way by this module, but it could have been imported before we got a
hand on doing so.)
"""
if 'PyQt4.QtCore' in sys.modules:
import sip
for api in ('QVariant', 'QString'):
if sip.getapi(api) != 2:
raise RuntimeError('%s API already set to V%d, but should be 2' % (api, sip.getapi(api)))
def importMod(self, mod):
if self._drv == 'PyQt4':
self._initPyQt4()
qt = __import__('%s.%s' % (self._drv, mod))
return getattr(qt, mod)
def __getattr__(self, name):
if name.startswith('Qt'):
return self.importMod(name)
return super(QtDriver, self).__getattr__(name)
| [
"[email protected]"
]
| |
4f7c48e8a99a701068ab27a5eeb99bafc6831225 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/312/usersdata/287/75300/submittedfiles/esferas.py | 970420237374c04f7c3159b9f92c411be16ac468 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | # -*- coding: utf-8 -*-
A=int(input('Digite o peso da esfera A: '))
B=int(input('Digite o peso da esfera B: '))
C=int(input('Digite o peso da esfera C: '))
D=int(input('Digite o peso da esfera D: '))
if A==B+C+D and B+C==D and B==C:
print('S')
else:
print('N')
| [
"[email protected]"
]
| |
f79a69fdebbb591c2349e3a4bd097ab47249f04a | 0f77deeffc1526d3befcb777ba4faebe2059e0bb | /lstm.py | 27295908eec7e8550fc16f3602f31bf6a21b5d7c | []
| no_license | akshay1997/TRUMP-TWITTER-BOT | 6d082a45ca939ce2f41c9cba8cd6198dadb54428 | 01f781fe2f7eeb71f11d932906b39b26776eafec | refs/heads/master | 2021-01-01T16:44:19.137215 | 2017-07-21T04:59:01 | 2017-07-21T04:59:01 | 97,906,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,735 | py | import numpy
#3563 = 2850+713 1426 2139 2850
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
lines = []
with open ('realDonaldTrump.txt','r') as filename:
for line in filename:
lines.append(line)
lines1 = lines[0:713]
lines2 = lines[713:1426]
lines3 = lines[1426:2139]
lines4 = lines[2139:2850]
lines5 = lines[2850:]
raw_text = lines1
raw_text = raw_text.lower()
chars = sorted(list(set(raw_text)))
char_to_int = dict((c, i) for i, c in enumerate(chars))
n_chars = len(raw_text)
n_vocab = len(chars)
print "Total Characters: ", n_chars
print "Total Vocab: ", n_vocab
seq_length = 20
dataX = []
dataY = []
for i in range(0, n_chars - seq_length, 1):
seq_in = raw_text[i:i + seq_length]
seq_out = raw_text[i + seq_length]
dataX.append([char_to_int[char] for char in seq_in])
dataY.append(char_to_int[seq_out])
n_patterns = len(dataX)
print "Total Patterns: ", n_patterns
X = numpy.reshape(dataX, (n_patterns, seq_length, 1))
X = X / float(n_vocab)
y = np_utils.to_categorical(dataY)
model = Sequential()
model.add(LSTM(256, input_shape=(X.shape[1], X.shape[2]), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(256))
model.add(Dropout(0.2))
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
filepath="weights-improvement-{epoch:02d}-{loss:.4f}-bigger.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
model.fit(X, y, nb_epoch=1, batch_size=64, callbacks=callbacks_list)
| [
"[email protected]"
]
| |
fda13f1054a9ca0c6d68fb3f5fb4a24adb1d5213 | 74482894c61156c13902044b4d39917df8ed9551 | /cryptoapis/model/list_transactions_by_block_hash_response_item_blockchain_specific_litecoin_vin.py | ad098de141271999edecf068c597aae890279310 | [
"MIT"
]
| permissive | xan187/Crypto_APIs_2.0_SDK_Python | bb8898556ba014cc7a4dd31b10e24bec23b74a19 | a56c75df54ef037b39be1315ed6e54de35bed55b | refs/heads/main | 2023-06-22T15:45:08.273635 | 2021-07-21T03:41:05 | 2021-07-21T03:41:05 | 387,982,780 | 1 | 0 | NOASSERTION | 2021-07-21T03:35:29 | 2021-07-21T03:35:29 | null | UTF-8 | Python | false | false | 8,985 | py | """
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_blockchain_specific_litecoin_script_sig import GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificLitecoinScriptSig
globals()['GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificLitecoinScriptSig'] = GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificLitecoinScriptSig
class ListTransactionsByBlockHashResponseItemBlockchainSpecificLitecoinVin(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'addresses': ([str],), # noqa: E501
'script_sig': (GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificLitecoinScriptSig,), # noqa: E501
'sequence': (str,), # noqa: E501
'txid': (str,), # noqa: E501
'txinwitness': ([str],), # noqa: E501
'value': (str,), # noqa: E501
'vout': (int,), # noqa: E501
'coinbase': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'addresses': 'addresses', # noqa: E501
'script_sig': 'scriptSig', # noqa: E501
'sequence': 'sequence', # noqa: E501
'txid': 'txid', # noqa: E501
'txinwitness': 'txinwitness', # noqa: E501
'value': 'value', # noqa: E501
'vout': 'vout', # noqa: E501
'coinbase': 'coinbase', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, addresses, script_sig, sequence, txid, txinwitness, value, vout, *args, **kwargs): # noqa: E501
"""ListTransactionsByBlockHashResponseItemBlockchainSpecificLitecoinVin - a model defined in OpenAPI
Args:
addresses ([str]):
script_sig (GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificLitecoinScriptSig):
sequence (str): Represents the script sequence number.
txid (str): Represents the reference transaction identifier.
txinwitness ([str]):
value (str): Represents the sent/received amount.
vout (int): It refers to the index of the output address of this transaction. The index starts from 0.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
coinbase (str): Represents the coinbase hex.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.addresses = addresses
self.script_sig = script_sig
self.sequence = sequence
self.txid = txid
self.txinwitness = txinwitness
self.value = value
self.vout = vout
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| [
"[email protected]"
]
| |
1cbaac8e46e28a8e424d041eb4c906491546cbea | 936dee544c471013bd1788b441042e22c3522633 | /deploy_tools/fabfile.py | ba1a955ef64a555f54ad8ba7f9d51b656ec4899d | []
| no_license | k5766273/test | bdaa808e1f10112a3f751a499e5890350d9ff733 | b947d5d2b69b510bb17df1f66b9b03c821f141c9 | refs/heads/master | 2023-05-19T20:11:34.849263 | 2021-06-06T01:51:10 | 2021-06-06T01:51:10 | 356,159,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | true | false | 2,325 | py | from fabric.contrib.files import append, exists, sed
from fabric.api import env, local, run
import random
REPO_URL = 'https://github.com/k5766273/test'
#env.use_ssh_config = True
def deploy():
site_folder = f'/home/ubuntu/sites/{env.host}'
source_folder = site_folder + '/suplerlists'
_create_directory_structure_if_necessary(site_folder)
_get_latest_source(source_folder)
_update_settings(source_folder, env.host)
_update_virtualenv(source_folder)
_update_static_files(source_folder)
_update_database(source_folder)
def _create_directory_structure_if_necessary(site_folder):
for subfolder in ('database', 'static', 'virtualenv', 'suplerlists'):
run(f'mkdir -p {site_folder}/{subfolder}')
def _get_latest_source(source_folder):
if exists(source_folder + '/.git'):
run(f'cd {source_folder} && git fetch')
else:
run(f'git clone {REPO_URL} {source_folder}')
current_commit = local("git log -n 1 --format=%H", capture=True)
run(f'cd {source_folder} && git reset --hard {current_commit}')
def _update_settings(source_folder, site_name):
settings_path = source_folder + '/suplerlists/settings.py'
sed(settings_path, "DEBUG = True", "DEBUG = False")
sed(settings_path,'ALLOWED_HOSTS =.+$',f'ALLOWED_HOSTS = ["{site_name}"]' )
secret_key_file = source_folder + '/suplerlists/secret_key.py'
if not exists(secret_key_file):
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
key = ''.join(random.SystemRandom().choice(chars) for _ in range(50))
append(secret_key_file, f'SECRET_KEY = "{key}"')
append(settings_path, '\nfrom .secret_key import SECRET_KEY')
def _update_virtualenv(source_folder):
virtualenv_folder = source_folder + '/../virtualenv'
if not exists(virtualenv_folder + '/bin/pip'):
run(f'python3.6 -m venv {virtualenv_folder}')
run(f'{virtualenv_folder}/bin/pip install -r {source_folder}/requirements.txt')
def _update_static_files(source_folder):
run(
f'cd {source_folder}'
' && ../virtualenv/bin/python manage.py collectstatic --noinput'
)
def _update_database(source_folder):
run(
f'cd {source_folder}'
' && ../virtualenv/bin/python manage.py migrate --noinput'
)
| [
"[email protected]"
]
| |
688f4172210d8b670a6bc922eef372027e2123bf | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/stp/rtstpifpol.py | c74118f5914bc4cd48e40179c05611a98540fed7 | []
| no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 5,225 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RtStpIfPol(Mo):
"""
A target relation to the spanning-tree protocol interface policy.
"""
meta = TargetRelationMeta("cobra.model.stp.RtStpIfPol", "cobra.model.infra.AccGrp")
meta.moClassName = "stpRtStpIfPol"
meta.rnFormat = "rtinfraStpIfPol-[%(tDn)s]"
meta.category = MoCategory.RELATIONSHIP_FROM_LOCAL
meta.label = "Abstraction of Leaf Access Policy Group"
meta.writeAccessMask = 0x4100000000001
meta.readAccessMask = 0x4100000000011
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.stp.IfPol")
meta.superClasses.add("cobra.model.reln.From")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.pol.NFromRef")
meta.rnPrefixes = [
('rtinfraStpIfPol-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 13244, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 4387
prop.defaultValueStr = "infraAccGrp"
prop._addConstant("infraAccBndlGrp", None, 4406)
prop._addConstant("infraAccBndlPolGrp", None, 6102)
prop._addConstant("infraAccGrp", None, 4387)
prop._addConstant("infraAccPortGrp", None, 4409)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 13243, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("tDn", prop)
meta.namingProps.append(getattr(meta.props, "tDn"))
getattr(meta.props, "tDn").needDelimiter = True
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("stpIfPolToPortGroups", "Portgroups", "cobra.model.vmm.EpPD"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("stpIfPolToVirtualMachines", "Virtual Machines", "cobra.model.comp.Vm"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("L2IfPolToEthIf", "Interface", "cobra.model.l1.EthIf"))
def __init__(self, parentMoOrDn, tDn, markDirty=True, **creationProps):
namingVals = [tDn]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
]
| |
a22bbe274b90794f62ec8b4f2d459c7a5e30f250 | 8e7c006a81ebbbc60c6750dbb562ebb071a1d8aa | /base/05_basic_convnet.py | 310ef0b149be15f8da82789d0b20368fdd76131c | []
| no_license | xueyangfu/tensorflow-learning | 8b65dbc0e3a437ed2a14b4987c8fe7848ed2a6c4 | ec477ac02ae5c2506819a7f8c147e3774baa3a4a | refs/heads/master | 2021-05-15T16:32:05.194502 | 2017-01-30T12:24:46 | 2017-01-30T12:24:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,864 | py | # -*- coding: utf-8 -*-
"""Simple tutorial following the TensorFlow example of a Convolutional Network.
Parag K. Mital, Jan. 2016"""
# %% Imports
import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
from libs.utils import *
import matplotlib.pyplot as plt
# %% Setup input to the network and true output label. These are
# simply placeholders which we'll fill in later.
mnist = input_data.read_data_sets('../datas/mnist/', one_hot=True)
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
# %% Since x is currently [batch, height*width], we need to reshape to a
# 4-D tensor to use it in a convolutional graph. If one component of
# `shape` is the special value -1, the size of that dimension is
# computed so that the total size remains constant. Since we haven't
# defined the batch dimension's shape yet, we use -1 to denote this
# dimension should not change size.
x_tensor = tf.reshape(x, [-1, 28, 28, 1])
# %% We'll setup the first convolutional layer
# Weight matrix is [height x width x input_channels x output_channels]
filter_size = 5
n_filters_1 = 16
W_conv1 = weight_variable([filter_size, filter_size, 1, n_filters_1])
# %% Bias is [output_channels]
b_conv1 = bias_variable([n_filters_1])
# %% Now we can build a graph which does the first layer of convolution:
# we define our stride as batch x height x width x channels
# instead of pooling, we use strides of 2 and more layers
# with smaller filters.
h_conv1 = tf.nn.relu(
tf.nn.conv2d(input=x_tensor,
filter=W_conv1,
strides=[1, 2, 2, 1],
padding='SAME') +
b_conv1)
# %% And just like the first layer, add additional layers to create
# a deep net
n_filters_2 = 16
W_conv2 = weight_variable([filter_size, filter_size, n_filters_1, n_filters_2])
b_conv2 = bias_variable([n_filters_2])
h_conv2 = tf.nn.relu(
tf.nn.conv2d(input=h_conv1,
filter=W_conv2,
strides=[1, 2, 2, 1],
padding='SAME') +
b_conv2)
# %% We'll now reshape so we can connect to a fully-connected layer:
h_conv2_flat = tf.reshape(h_conv2, [-1, 7 * 7 * n_filters_2])
# %% Create a fully-connected layer:
n_fc = 1024
W_fc1 = weight_variable([7 * 7 * n_filters_2, n_fc])
b_fc1 = bias_variable([n_fc])
h_fc1 = tf.nn.relu(tf.matmul(h_conv2_flat, W_fc1) + b_fc1)
# %% We can add dropout for regularizing and to reduce overfitting like so:
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# %% And finally our softmax layer:
W_fc2 = weight_variable([n_fc, 10])
b_fc2 = bias_variable([10])
y_pred = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
# %% Define loss/eval/training functions
cross_entropy = -tf.reduce_sum(y * tf.log(y_pred))
optimizer = tf.train.AdamOptimizer().minimize(cross_entropy)
# %% Monitor accuracy
correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
# %% We now create a new session to actually perform the initialization the
# variables:
sess = tf.Session()
sess.run(tf.initialize_all_variables())
# %% We'll train in minibatches and report accuracy:
batch_size = 100
n_epochs = 5
for epoch_i in range(n_epochs):
for batch_i in range(mnist.train.num_examples // batch_size):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(optimizer, feed_dict={
x: batch_xs, y: batch_ys, keep_prob: 0.5})
print(sess.run(accuracy,
feed_dict={
x: mnist.validation.images,
y: mnist.validation.labels,
keep_prob: 1.0
}))
# %% Let's take a look at the kernels we've learned
W = sess.run(W_conv1)
plt.imshow(montage(W / np.max(W)), cmap='coolwarm') | [
"[email protected]"
]
| |
4be5eaa27be78cccdfc542abce5822a37f988c94 | d2884c24be412a3f04b31e042bc7ee99cda37a96 | /爬虫10/爬虫/chian/chian/middlewares.py | 1acd74b0f58b9e0227b5ef2353a5b25c7e5b7e60 | []
| no_license | a1403893559/xigong | 95c2b24b3e365bff275758901b7315c3255a8f05 | b07f5c26e8bcc7a56bacb4202d266ad56daa182b | refs/heads/master | 2021-10-09T03:18:32.123278 | 2018-12-20T13:03:42 | 2018-12-20T13:03:42 | 116,777,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,595 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class ChianSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class ChianDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"[email protected]"
]
| |
7582910eaa393cc51825e953f774977c6c676280 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/T/tmayor/test-3.py | df783a0fb6ae2cb6a043ea259a571331baf5e15d | []
| no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,422 | py | # lxml is a complete library for parsing xml and html files. http://codespeak.net/lxml/
# The interface is not totally intuitive, but it is very effective to use,
# especially with cssselect.
import lxml.etree
import lxml.html
print help(lxml.html.parse)
# create an example case
samplehtml = """<html><body>
<h1>hi</h1>
<p class="cccc">something <strong>good</strong>
<p>Another paragraph</p>
<ul class="LLL">
<li class="1">first</li>
<li class="2">second</li>
<li class="1" id="nimble">third <b>jjj</b></li>junk
</ul>
</body></html>"""
#root = lxml.html.fromstring(samplehtml) # an lxml.etree.Element object
# To load directly from a url, use
root = lxml.html.parse('http://www.guardian.co.uk/news/gallery/2010/oct/12/1').getroot()
# Whenever you have an lxml element, you can convert it back to a string like so:
#print lxml.etree.tostring(root)
# Use cssselect to select elements by their css code
#print root.cssselect("li.initially-off") # returns 2 elements
#print root.cssselect("ul #nimble") # returns 1 element
#print root.cssselect(".LLL li") # returns 3 elements
# extracting text from a single element
linimble = root.cssselect("li.initially-off")[0]
#help(linimble) # prints the documentation for the object
print lxml.etree.tostring(linimble) # note how this includes trailing text 'junk'
#print linimble.text # just the text between the tag
#print linimble.tail # the trailing text
#print list(linimble) # prints the <b> object
# This recovers all the code inside the object, including any text markups like <b>
#print linimble.text + "".join(map(lxml.etree.tostring, list(linimble)))
# lxml is a complete library for parsing xml and html files. http://codespeak.net/lxml/
# The interface is not totally intuitive, but it is very effective to use,
# especially with cssselect.
import lxml.etree
import lxml.html
print help(lxml.html.parse)
# create an example case
samplehtml = """<html><body>
<h1>hi</h1>
<p class="cccc">something <strong>good</strong>
<p>Another paragraph</p>
<ul class="LLL">
<li class="1">first</li>
<li class="2">second</li>
<li class="1" id="nimble">third <b>jjj</b></li>junk
</ul>
</body></html>"""
#root = lxml.html.fromstring(samplehtml) # an lxml.etree.Element object
# To load directly from a url, use
root = lxml.html.parse('http://www.guardian.co.uk/news/gallery/2010/oct/12/1').getroot()
# Whenever you have an lxml element, you can convert it back to a string like so:
#print lxml.etree.tostring(root)
# Use cssselect to select elements by their css code
#print root.cssselect("li.initially-off") # returns 2 elements
#print root.cssselect("ul #nimble") # returns 1 element
#print root.cssselect(".LLL li") # returns 3 elements
# extracting text from a single element
linimble = root.cssselect("li.initially-off")[0]
#help(linimble) # prints the documentation for the object
print lxml.etree.tostring(linimble) # note how this includes trailing text 'junk'
#print linimble.text # just the text between the tag
#print linimble.tail # the trailing text
#print list(linimble) # prints the <b> object
# This recovers all the code inside the object, including any text markups like <b>
#print linimble.text + "".join(map(lxml.etree.tostring, list(linimble)))
| [
"[email protected]"
]
| |
7bf833a1e59a609e71791510c143929fed4b9090 | de479d4a8af0e070b2bcae4186b15a8eb74971fb | /cn/iceknc/study/h_python_web_server/__init__.py | 4c443bf5e4053a05fd09d03cb7e0b34b24ee6f4b | []
| no_license | iceknc/python_study_note | 1d8f6e38be57e4dc41a661c0a84d6ee223c5a878 | 730a35890b77ecca3d267fc875a68e96febdaa85 | refs/heads/master | 2020-05-19T18:44:55.957392 | 2019-09-27T01:15:54 | 2019-09-27T01:15:54 | 185,160,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | # -*- coding: utf-8 -*-
# @Author: 徐志鹏
# @Date : 2019/5/15
# @Desc :
def main():
pass
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
eee8bf0ebe7dad15987813d9178a9e6fc7e754d2 | 33602d2bf63bb038f29f22383c912a06045d7e00 | /v15_pong_supervised/utils.py | d000d2b3eec684a2fc88e0a73247edc928d1c2f5 | []
| no_license | evanthebouncy/nnprog | 26af89726a915d7d3f78131c4f8733cdceb6100e | 576ea87469df2135bf133325d22c23ec4b196a92 | refs/heads/master | 2020-07-06T00:32:23.949875 | 2017-12-06T20:50:18 | 2017-12-06T20:50:18 | 66,980,038 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,127 | py | import numpy as np
import gym
import random
import pickle
import gym.envs.atari
import draw
# Preprocesses the given image:
# (1) remove the scoreboard
# (2) make it monochromatic
# (3) make the background black
#
# obs: Image
# return: Image
# Image = np.array([n_rows, n_cols])
def preprocess(obs):
obs = obs[34:194]
obs = obs[::2,::2,0]
obs[obs == 144] = 0
return obs.astype(np.float)
def lose_color(proccessed_obs):
return np.clip(proccessed_obs, 0, 1)
# Assumes that the pixels of the given value in the given image
# exactly form a rectangle (or else there are no pixels of that color).
# Returns the rectangle if it exists, or else None.
#
# val: int
# obs: Image
# return: None | Rectangle
# Image = np.array([n_rows, n_cols])
def _get_rectangle(obs, val):
min_val = np.argmax(obs.ravel() == val)
max_val = len(obs.ravel()) - np.argmax(np.flip(obs.ravel(), 0) == val) - 1
x_pos = min_val % obs.shape[1]
y_pos = min_val / obs.shape[1]
x_len = (max_val % obs.shape[1]) - x_pos + 1
y_len = (max_val / obs.shape[1]) - y_pos + 1
return None if x_pos == 0 and y_pos == 0 and x_len == obs.shape[1] and y_len == obs.shape[0] else np.array([x_pos + x_len/2, y_pos + y_len/2])
# Retrieves the rectangle representing our paddle.
def get_our_paddle(obs):
obs = preprocess(obs)
return _get_rectangle(obs, 92)
# Retrieves the rectangle representing the ball.
def get_ball(obs):
obs = preprocess(obs)
return _get_rectangle(obs, 236)
def same_line_print(message):
sys.stdout.write("\r" + message)
sys.stdout.flush()
def render_state(env, state):
env.reset()
env.restore_full_state(state)
def get_up_down_signal(ob1, ob2):
default_val = np.array([1.0, 0.0, 0.0])
if ob2 is None:
return default_val
# obs = preprocess(obs)
paddle = get_our_paddle(ob2)
ball = get_ball(ob2)
if ball is None or paddle is None:
return default_val
return np.array([0.0, 1.0, 0.0]) if paddle[1] >= ball[1] else np.array([0.0, 0.0, 1.0])
def get_simple_signal(ob1, ob2):
def _state1(ob1, ob2):
default_val = np.array([1.0, 0.0, 0.0])
if ob1 is None or ob2 is None:
return default_val
# obs = preprocess(obs)
paddle = get_our_paddle(ob2)
ball = get_ball(ob2)
if ball is None or paddle is None or ball[0] < 50:
return default_val
return np.array([0.0, 1.0, 0.0]) if paddle[1] >= ball[1] else np.array([0.0, 0.0, 1.0])
def _state2(ob1, ob2):
default_val = np.array([1.0, 0.0])
if ob1 is None or ob2 is None:
return default_val
paddle = get_our_paddle(ob2)
if 38 <= paddle[1] <= 42: return np.array([0.0, 1.0])
return default_val
return np.concatenate([_state1(ob1,ob2), _state2(ob1,ob2)])
def get_signal(obs, prev_obs, prev_move):
default_val = np.array([0.0 for i in range(9)])
if obs is None or prev_obs is None:
return default_val
# obs = preprocess(obs)
paddle = get_our_paddle(obs)
ball = get_ball(obs)
prev_ball = get_ball(prev_obs)
prev_paddle = get_our_paddle(prev_obs)
if ball is None or paddle is None or prev_ball is None or prev_paddle is None:
return default_val
# print "some stuff "
# print "prev ball ", prev_ball
# print "ball ", ball
# print "paddle ", paddle
# older
paddle = paddle[1:] / 80.0
prev_paddle = prev_paddle[1:] / 80.0
diff = ball - prev_ball
# print "diff ", diff
diff = diff / float(np.max(abs(diff))) if np.max(abs(diff)) > 0 else np.array([0.0, 0.0])
ball = ball / 80.0
prev_move = np.array([1.0, 0.0] if prev_move == 2 else [0.0, 1.0])
care = 1.0 if ball[0] >= 60.0 / 80.0 and ball[0] <= 71.0 / 80.0 else 0.0
# print "ball ", ball
signal = np.concatenate([paddle, prev_paddle, ball, diff, prev_move, [care]])
signal = signal * care
# newer
# print ball, prev_ball
# diff = ball - prev_ball
# print "diff ", diff
# a = 1.0 if paddle[1] > ball[1] else -1.0
# b = diff[0]
# signal = np.array([a,b, 0.0, 0.0, 0.0, 0.0])
return signal
def get_signal_full_image(obs, prev_obs):
if obs is None or prev_obs is None:
return None
obs = lose_color(preprocess(obs))
prev_obs = lose_color(preprocess(prev_obs))
# obs_diff = obs - prev_obs
# draw.draw(obs_diff, "obs.png")
return obs, prev_obs
# generate a pong trace, the actor takes in the last 2 states as inputs
def generate_pong_trace(env, start_state, agent, n=200, do_render=True):
env.reset()
env.restore_full_state(start_state)
trace = []
all_obs = [None, None]
all_actions = [2]
for i in range(n):
action = agent.act((all_obs[-2], all_obs[-1],all_actions[-1]), show_prob = do_render)
obs, reward, done, comments = env.step(action)
if do_render:
env.render()
trace.append(((all_obs[-2], all_obs[-1],all_actions[-1]), action, reward))
all_obs.append(obs)
all_actions.append(action)
if done: break
return trace
def get_random_state(env, start_state):
env.reset()
env.restore_full_state(start_state)
for i in range(random.randint(100, 500)):
_, a, b, c = env.step(random.choice([2,3]))
state = env.clone_full_state()
return state
| [
"[email protected]"
]
| |
b43a0ea6840af2ffc73bc3ec3411ff7e6682262b | 5ba3115523fb052d32db827e09443248ec5f6629 | /algorithm/PycharmProjects/week2/셀렉션 알고리즘.py | ea0f1342ff0818baef2e0b87e8900f027e4bd11d | []
| no_license | oliviaspark0825/TIL | 841095003ae794e14bd8c7e8c883826667c25f37 | 8bc66836f9a1eea5f42e9e1172f81f005abc042d | refs/heads/master | 2023-01-10T22:14:15.341489 | 2019-08-22T09:09:52 | 2019-08-22T09:09:52 | 162,099,057 | 0 | 0 | null | 2023-01-04T07:52:28 | 2018-12-17T08:32:43 | Jupyter Notebook | UTF-8 | Python | false | false | 645 | py | # k 번만큼 골라서 제일 작은 거를 찾아서 바꾼다, 교환 횟수가 작음
#
# def select(list, k):
# for i in range(0, k):
# minInex = for j in range ( i+1, len(list)):
# if list[minIndex] > list[j]:
# minIndex = j
# list[i], list[minIndex] = list[minIndex], list[i]
# return list[k -1]
def selectionSort(a):
for i in range(0, len(a) -1): # 0부터 n-1 까지
min = i
for j in range(i+1, len(a)):
if a[min] > a[j]:
min = j
a[i], a[min] = a[min], a[i]
data = [64, 25, 10, 22, 11]
selectionSort(data)
print(data)
| [
"[email protected]"
]
| |
2dc4b913336525af52c6bd856739646b091e1ebd | af4b5830b2a23d1f3d126297c7eb057bb3f8e42f | /pymatflow/cp2k/base/pw_dft.py | fd0c25c249ac97dad547a452ce45111dd806c4d6 | [
"MIT"
]
| permissive | mukhtarbayerouniversity/pymatflow | de2b2d573ceed68c1dd3c149c538588394029137 | 9ab61e56659519cd6c83d5bd32da1262f44da065 | refs/heads/master | 2023-02-13T01:50:32.993401 | 2021-01-13T15:19:36 | 2021-01-13T15:19:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,477 | py | #!/usr/bin/evn python
# _*_ coding: utf-8 _*_
import numpy as np
import sys
import os
import shutil
from pymatflow.cp2k.base.pw_dft_control import cp2k_pw_dft_control
from pymatflow.cp2k.base.pw_dft_iterative_solver import cp2k_pw_dft_iterative_solver
from pymatflow.cp2k.base.pw_dft_mixer import cp2k_pw_dft_mixer
from pymatflow.cp2k.base.pw_dft_parameters import cp2k_pw_dft_parameters
"""
usage:
"""
# ============================================
# CP2K / PW_DFT
#=============================================
class cp2k_pw_dft:
"""
"""
def __init__(self):
"""
"""
self.params = {
}
self.status = False
self.control = cp2k_pw_dft_control()
self.iterative_solver = cp2k_pw_dft_iterative_solver()
self.mixer = cp2k_pw_dft_mixer()
self.parameters = cp2k_pw_dft_parameters()
# basic setting
self.control.status = True
self.iterative_solver.status = True
self.mixer.status = True
self.parameters.status = True
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t&PW_DFT\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t%s %s\n" % (item, self.params[item]))
if self.control.status == True:
self.control.to_input(fout)
if self.iterative_solver.status == True:
self.iterative_solver.to_input(fout)
if self.mixer.status == True:
self.mixer.to_input(fout)
if self.parameters.status == True:
self.parameters.to_input(fout)
fout.write("\t&END PW_DFT\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 2:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[1] == "CONTROL":
self.control.set_params({item: params[item]})
elif item.split("-")[1] == "ITERATIVE_SOLVER":
self.iterative_solver.set_params({item: params[item]})
elif item.split("-")[1] == "MIXER":
self.mixer.set_params({item: params[item]})
elif item.split("-")[1] == "PARAMETERS":
self.parameters.set_params({item: params[item]})
else:
pass
| [
"[email protected]"
]
| |
875b4f27d8e18a7e9ec795653c633190e9d41156 | a7c35b3b94d768a5a6e0fbe87722192d7c41452d | /pyNastran/op2/tables/geom/edom.py | a63e42075f3e62732d486627dbbea5b4e3343c31 | []
| no_license | jpdeslich/pyNastran | 0ed891298647563f05fa9002b5207d9997464fcf | a09a9e40f8403633bc9d4dc89eb1c29c51ceb3fa | refs/heads/master | 2022-12-18T21:54:04.787376 | 2020-09-28T18:54:00 | 2020-09-28T18:54:00 | 299,397,732 | 0 | 0 | null | 2020-09-28T18:29:57 | 2020-09-28T18:29:57 | null | UTF-8 | Python | false | false | 71,587 | py | """
defines readers for BDF objects in the OP2 EDOM/EDOMS table
"""
from struct import Struct
from typing import Tuple, List, Union
import numpy as np
from pyNastran.op2.tables.geom.geom_common import GeomCommon
from pyNastran.op2.op2_interface.op2_reader import mapfmt, reshape_bytes_block, reshape_bytes_block_size
from .utils import get_minus1_start_end
#if TYPE_CHECKING: # pragma: no cover
from pyNastran.bdf.cards.optimization import DVPREL1, DVPREL2, DVMREL2
DSCREEN_INT_TO_RTYPE = {
3 : 'LAMA',
4 : 'EIGN',
5 : 'DISP',
6 : 'STRESS',
}
DSCREEN_RTYPE_TO_INT = {value: key for key, value in DSCREEN_INT_TO_RTYPE.items()}
class EDOM(GeomCommon):
"""defines methods for reading op2 properties"""
def _read_edom4_4(self, data, ndata):
"""
reads the EDOM table
SOL 200 design optimization and sensitivity analysis bulk entries.
"""
return self._read_geom_4(self._edom_map, data, ndata)
def __init__(self):
GeomCommon.__init__(self)
# F:\work\pyNastran\pyNastran\master2\pyNastran\bdf\test\nx_spike\out_altmdtku4.op2
# F:\work\pyNastran\pyNastran\master2\pyNastran\bdf\test\nx_spike\out_altd200x7.op2
# F:\work\pyNastran\pyNastran\master2\pyNastran\bdf\test\nx_spike\out_mdtku1.op2
# F:\work\pyNastran\pyNastran\master2\pyNastran\bdf\test\nx_spike\out_mcso14.op2
# F:\work\pyNastran\pyNastran\master2\pyNastran\bdf\test\nx_spike\out_ds105.op2
# F:\work\pyNastran\pyNastran\master2\pyNastran\bdf\test\nx_spike\out_altcc574.op2
# F:\work\pyNastran\pyNastran\master2\pyNastran\bdf\test\nx_spike\out_adjoint.op2
# F:\work\pyNastran\pyNastran\master2\pyNastran\bdf\test\nx_spike\out_mcso18.op2
# F:\work\pyNastran\pyNastran\master2\pyNastran\bdf\test\nx_spike\out_cqr4optstdis.op2
# F:\work\pyNastran\pyNastran\master2\pyNastran\bdf\test\nx_spike\out_d200ce12.op2
#: Optimization Table (I think this is NX-specifc)
self._edom_map = {
# are these 3 really EDOM?
#MAT1DOM(103,1,9944)
#MAT10DOM(2801,28,9945)
#MODTRAK(6006,60,477)
(103, 1, 9944) : ['MAT1DOM', self._read_mat1dom],
(304, 3, 276) : ['DSCONS', self._read_dscons],
(404, 4, 277) : ['DVAR', self._read_dvar],
(504, 5, 246) : ['DVSET', self._read_dvset],
(4106, 41, 362) : ['DCONSTR', self._read_dconstr],
#DDVAL(7000,70,563)
#DRESP3(6700,67,433)
#(504, 5, 246) : ['???', self._read_fake],
#(504, 5, 246) : ['???', self._read_fake],
#(504, 5, 246) : ['???', self._read_fake],
#(504, 5, 246) : ['???', self._read_fake],
#(504, 5, 246) : ['???', self._read_fake],
(3106, 31, 352) : ['DESVAR', self._read_desvar],
(3206, 32, 353) : ['DLINK', self._read_dlink],
(3306, 33, 354) : ['DVPREL1', self._read_dvprel1],
(3406, 34, 355) : ['DVPREL2', self._read_dvprel2],
#DOPTPRM(4306,43,364)
(3706, 37, 358) : ['DTABLE', self._read_dtable],
#(3806, 38, 359) : ['DRESP1', self._read_dresp1],
(3806, 38, 359) : ['DRESP1', self._read_fake],
(3906, 39, 360) : ['DRESP2', self._read_fake],
(4206, 42, 363) : ['DSCREEN', self._read_dscreen],
(4306, 43, 364) : ['DOPTPRM', self._read_doptprm],
(4406, 44, 372) : ['DVGRID', self._read_dvgrid],
#DVSHAP(5006,50,470)
(5106, 51, 471) : ['DCONADD', self._read_fake],
#DVBSHAP(5806,58,474)
#DVGEOM(5906,59,356)
(6006, 60, 477) : ['MODTRAK', self._read_fake],
#DRESP3(6700,67,433)
(6100, 61, 429) : ['DVCREL1', self._read_fake],
(6200, 62, 430) : ['DVCREL2', self._read_fake],
(6300, 63, 431) : ['DVMREL1', self._read_dvmrel1],
(6400, 64, 432) : ['DVMREL2', self._read_dvmrel2],
(6006, 60, 477) : ['???', self._read_fake],
(7000, 70, 563) : ['DCONSTR/DDVAL?', self._read_fake],
}
def _read_dconstr(self, data: bytes, n: int) -> int:
"""
Record – DCONSTR(4106,41,362)
Design constraints.
Word Name Type Description
1 DCID I Design constraint set identification number
2 RID I DRESPi entry identification number
3 LALLOW RS Lower bound on the response quantity. Undefined if
LTID is nonzero
4 UALLOW RS Upper bound on the response quantity. Undefined if
UTID is nonzero
5 LOWFQ RS Low end of frequency range in Hz
6 HIGHFQ RS High end of frequency range in Hz
7 LTID I Identification number of TABLEDi entry giving lower
bound on the response quantity as a function of
frequency or 0 if not specified
8 UTID I Identification number of TABLEDi entry giving upper
bound on the response quantity as a function of
frequency or 0 if not specified
data = (50, 2, 0.0016, 0.0018, 0.0, 1.0e+20, 0, 0)
"""
ntotal = 32 * self.factor # 8 * 4
struct1 = Struct(mapfmt(self._endian + b'ii 4f ii', self.size))
ndatai = len(data) - n
ncards = ndatai // ntotal
for unused_icard in range(ncards):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
oid, dresp_id, lallow, uallow, lowfq, highfq, ltid, utid = out
#print(oid, dresp_id, lallow, uallow, lowfq, highfq, ltid, utid)
lid = ltid if ltid != 0 else lallow
uid = utid if utid != 0 else uallow
dconstr = self.add_dconstr(oid, dresp_id, lid=lid, uid=uid,
lowfq=lowfq, highfq=highfq)
dconstr.validate()
str(dconstr)
#print(dconstr)
n += ntotal
return n
def _read_dscreen(self, data: bytes, n: int) -> int:
"""
DSCREEN(4206, 42, 363)
Design constraint screening data.
Word Name Type Description
1 RTYPE I Response type for which the screening criteria apply
2 TRS RS Truncation threshold
3 NSTR I Maximum number of constraints to be retained per region per load case
data = (5, -0.70, 10)
"""
ntotal = 12 * self.factor # 3*4
struct1 = Struct(mapfmt(self._endian + b'ifi', self.size))
ndatai = len(data) - n
ncards = ndatai // ntotal
msg = ''
for unused_icard in range(ncards):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
rtype_int, trs, nstr = out
n += ntotal
if rtype_int in DSCREEN_INT_TO_RTYPE:
rtype = DSCREEN_INT_TO_RTYPE[rtype_int]
elif rtype_int == 7: # STRAIN/FORCE/EQUA?
# C:\MSC.Software\simcenter_nastran_2019.2\tpl_post1\mereiglc.op2
#DSCREEN,DISP,-1000.0,20
#DSCREEN,STRESS,-1000.0,20
#DSCREEN,STRAIN,-1000.0,20
#DSCREEN,FORCE,-1000.0,20
#DSCREEN,EQUA,-1000.0,20
#DSCREEN,EIGN,-1000.0
#DSCREEN,LAMA,-1000.0
rtype = 'STRAIN?'
msg += f'rtype_int={rtype_int}? trs={trs} nstr={nstr}\n'
continue
elif rtype_int == 8: # STRAIN/FORCE/EQUA?
rtype = 'FORCE?'
msg += f'rtype_int={rtype_int}? trs={trs} nstr={nstr}\n'
continue
elif rtype_int == 91: # STRAIN/FORCE/EQUA?
rtype = 'EQUA?'
#DSCREEN,FORCE,-1000.0,20
#DSCREEN,EQUA,-1000.0,20
msg += f'rtype_int={rtype_int}? trs={trs} nstr={nstr}\n'
continue
else:
rtype = "?"
msg += f'rtype_int={rtype_int}? trs={trs} nstr={nstr}\n'
continue
#raise NotImplementedError(f'rtype_int={rtype_int}? trs={trs} nstr={nstr}')
dscreen = self.add_dscreen(rtype, trs=trs, nstr=nstr)
dscreen.validate()
str(dscreen)
#print(dscreen.rstrip())
if msg:
msg2 = 'Error reading DSCREEN\n' + msg
self.log.error(msg2)
#raise RuntimeError(msg2)
return n
def _read_doptprm(self, data: bytes, n: int) -> int:
"""
Record – DOPTPRM(4306,43,364)
Design optimization parameters.
Word Name Type Description
1 APRCOD I Approximation method
2 IPRINT I Print control during approximate optimization phase with DOT
3 DESMAX I Maximum number of design cycles
4 METHOD I DOT optimization method
5 DELP RS Fractional change allowed in each property during any
optimization design cycle
6 DPMIN RS Minimum move limit imposed
7 PTOL RS Maximum tolerance on differences allowed between the
property values on property entries and the property
values calculated from the design variable values on
the DESVAR entry
8 CONV1 RS Relative objective function convergence criterion
9 CONV2 RS Absolute objective function convergence criterion
10 GMAX RS Maximum constraint violation allowed at the
converged optimum
11 DELX RS Fractional change allowed in each design variable
during any optimization cycle
12 DXMIN RS Minimum absolute limit on design variable move
13 DELB RS Relative finite difference move parameter
14 GSCAL RS Constraint normalization factor
15 CONVDV RS Relative convergence criterion on design variables
16 CONVPR RS Relative convergence criterion on properties
17 P1 I Design cycles in which output is printed
18 P2 I Items to be printed at the design cycles defined
by P1
19 CT RS Constraint tolerance
20 CTMIN RS Constraint violation threshold
21 DABOBJ RS DOT absolute objective function convergence criterion
22 DELOBJ RS DOT relative objective function convergence criterion
23 DOBJ1 RS DOT 1–D search absolute objective limit
24 DOBJ2 RS DOT 1–D search relative objective limit
25 DX1 RS DOT 1–D search absolute DV limit
26 DX2 RS DOT 1–D search relative DV limit
27 ISCAL I Design variables are rescaled every ISCAL iterations
28 ITMAX I Maximum DOT MFD iterations per cycle
29 ITRMOP I Maximum consecutive DOT MFD iterations at convergence
30 IWRITE I File number for DOT optimizer printout
31 IGMAX I Active constraint counter
32 JTMAX I Maximum DOT SLP iterations per cycle
33 ITRMST I Maximum consecutive DOT SLP iterations at convergence
34 JPRINT I SLP subproblem print within DOT
35 IPRNT1 I Print scaling factors for design variable vector within DOT
36 IPRNT2 I DOT 1–D search or miscellaneous information print
37 JWRITE I File number on which iteration history is written within DOT
38 STPSCL RS Scale factor for shape finite difference step sizes
applied to all shape design variables
39 FSDMAX I Number of FSD cycles to be performed
40 FSDALP RS Relaxation parameter applied in FSD
41 DISCOD I Discrete processing method code
42 DISBEG I Design cycle ID for discrete variable processing initiation
43 PLVIOL I Flag for handling property limit violation
44 P2RSET I ID of a SET1 entry listing constrained responses to
be printed if retained
45 EDVOUT RS Fraction of DVEREL1 DESVARs to be output in f06 file
46 MXCRTRSP I Flag to handle CSV output
"""
#if self.size == 4:
ntotal = 184 * self.factor # 46 * 4
struct1 = Struct(mapfmt(self._endian + b'4i 12f 2i 8f 11i f i f 4i f i', self.size))
ndatai = len(data) - n
ncards = ndatai // ntotal
for unused_icard in range(ncards):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
(aprcod, iprint, desmax, method, # ints
delp, dpmin, ptol, conv1, conv2, gmax, delx, dxmin, delb, gscal, convdv, convpr, # floats
p1, p2, # ints
ct, ctmin, dabobj, delobj, dobj1, dobj2, dx1, dx2, # floats
iscal, itmax, itrmop, iwrite, igmax, jtmax, itrmst, jprint, iprnt1, iprnt2, jwrite, # ints
stpscl, # float
fsdmax, # int
fsdalp, # float
discod, disbeg, plviol, p2rset, # ints
edvout, # float
mxcrtrsp) = out # int
params = {
# ints
'APRCOD' : aprcod,
'IPRINT' : iprint,
'DESMAX' : desmax,
'METHOD' : method,
# floats
'DELP' : delp,
'DPMIN' : dpmin,
'PTOL' : ptol,
'CONV1' : conv1,
'CONV2' : conv2,
'GMAX' : gmax,
'DELX' : delx,
'DXMIN' : dxmin,
'DELB' : delb,
'GSCAL' : gscal,
'CONVDV' : convdv,
'CONVPR' : convpr,
# ints
'P1' : p1,
'P2' : p2,
# floats
'CT' : ct,
'CTMIN' : ctmin,
'DABOBJ' : dabobj,
'DELOBJ' : delobj,
'DOBJ1' : dobj1,
'DOBJ2' : dobj2,
'DX1' : dx1,
'DX2' : dx2,
# ints
'ISCAL' : iscal,
'ITMAX' : itmax,
'ITRMOP' : itrmop,
'IWRITE' : iwrite,
'IGMAX' : igmax,
'JTMAX' : jtmax,
'ITRMST' : itrmst,
'JPRINT' : jprint,
'IPRNT1' : iprnt1,
'IPRNT2' : iprnt2,
'JWRITE' : jwrite,
'STPSCL' : stpscl, # float
'FSDMAX' : fsdmax,
'FSDALP' : fsdalp, # float
'DISCOD' : discod,
'DISBEG' : disbeg,
'PLVIOL' : plviol,
'P2RSET' : p2rset,
'EDVOUT' : edvout, # float
'MXCRTRSP' : mxcrtrsp,
}
doptprm = self.add_doptprm(params)
for key, default_value in doptprm.defaults.items():
if default_value is None:
continue
if key not in params:
continue
value_actual = params[key]
assert isinstance(default_value, type(value_actual)), f'key={key!r} value={default_value!r} value_actual={value_actual!r}'
if isinstance(value_actual, int) and value_actual == default_value:
del doptprm.params[key]
elif isinstance(default_value, float) and np.allclose(value_actual, default_value):
del doptprm.params[key]
str(doptprm)
n += ntotal
self.card_count['DOPTPRM'] = ncards
return n
def _read_dtable(self, data: bytes, n: int) -> int:
"""
Record – DTABLE(3706,37,358)
Table constants.
Word Name Type Description
1 LABLi(2) CHAR4 Label for the constant
3 VALUi RS Value of the constant
Words 1 thru 3 repeat until -1 occurs
"""
if self.size == 4:
struct1 = Struct(self._endian + b'8s f')
else:
aaa
ints = np.frombuffer(data[n:], self.idtype8).copy()
#floats = np.frombuffer(data[n:], self.fdtype8).copy()
istart, iend = get_minus1_start_end(ints)
ncards = 0
size = self.size
ntotal = 12 * self.factor # 3*4
for (i0, i1) in zip(istart, iend):
assert ints[i1] == -1, ints[i1]
default_values = {}
nfields = (i1 - i0) // 3
for unused_i in range(nfields):
edata = data[n:n+ntotal]
key_bytes, value = struct1.unpack(edata)
if size == 4:
key = key_bytes.decode('latin1').rstrip()
default_values[key] = value
n += ntotal
assert n <= len(data), n
dtable = self.add_dtable(default_values)
str(dtable)
n += size
ncards += 1
self.card_count['DTABLE'] = ncards
return n
def _read_mat1dom(self, data: bytes, n: int) -> int:
"""
If one or more properties from a MAT1 entry are used as design
variables, the MAT1DOM record is written to the EDOM data block.
This is different than the MAT1 record in the MPT data block.
Word Name Type Description
1 MID I MAT1 identification number
2 FTE I Format code for Young’s modulus
3 FTG I Format code for shear modulus
4 FTNU I Format code for Poisson’s ratio
(2, 2, 0, 2)
"""
assert len(data) == 28, len(data)
return len(data)
def _read_dvgrid(self, data: bytes, n: int) -> int:
"""
Design variable to grid point relation.
Word Name Type Description
1 DVID I DESVAR entry identification number
2 GID I Grid point or geometric point identification number
3 CID I Coordinate system identification number
4 COEFF RS Multiplier of the vector defined by N(3)
5 N1 RS Component of the vector measured in the coordinate system defined by CID
6 N2 RS Component of the vector measured in the coordinate system defined by CID
7 N3 RS Component of the vector measured in the coordinate system defined by CID
"""
ntotal = 28 * self.factor # 7*4
struct1 = Struct(mapfmt(self._endian + b'3i 4f', self.size))
ncards = (len(data) - n) // ntotal
for unused_i in range(ncards):
edata = data[n:n + ntotal]
dvgrid_id, nid, cid, coeff, *dxyz = struct1.unpack(edata)
assert len(dxyz) == 3, dxyz
dvgrid = self.add_dvgrid(dvgrid_id, nid, dxyz,
cid=cid, coeff=coeff)
dvgrid.write_card_16()
n += ntotal
return n
def _read_dvprel1(self, data: bytes, n: int) -> int:
"""
Word Name Type Description
1 ID I Unique identification number
2 TYPE(2) CHAR4 Name of a property entry
4 PID I Property entry identification number
5 FID I FID number input. Otherwise, either 0 if property
name is input, or frequency (RS) if entry is for
frequency dependent property. (See Words 9 and 10)
6 PMIN RS Minimum value allowed for this property
7 PMAX RS Maximum value allowed for this property
8 C0 RS Constant term of relation
9 PNAME1 CHAR4 First word of property name, if any, or blanks if
FID number is nonzero in Word 5
10 PNAME2 CHAR4 Second word of property name, if any. Otherwise,
either blanks if FID number is nonzero in Word 5,
or frequency (RS) if entry is for frequency
dependent property. (See Word 5)
11 DVIDi I DESVAR entry identification number
12 COEFi RS Coefficient of linear relation
Words 11 and 12 repeat until -1 occurs
"""
ints = np.frombuffer(data[n:], self.idtype8).copy()
floats = np.frombuffer(data[n:], self.fdtype8).copy()
iminus1 = np.where(ints == -1)[0]
ncards = 0
istart = [0] + list(iminus1[:-1] + 1)
iend = iminus1
size = self.size
for (i0, i1) in zip(istart, iend):
assert ints[i1] == -1, ints[i1]
dvprel_id = ints[i0]
type_bytes = data[n+size:n+3*size]
property_name_bytes = data[n+8*size:n+10*size]
prop_type = reshape_bytes_block_size(type_bytes, size=size)
pid, fid = ints[i0+3:i0+5]
pmin, pmax, c0 = floats[i0+5:i0+8]
if fid == 0:
fid = reshape_bytes_block_size(property_name_bytes, size=size)
# fid = fidi
#print(dvprel_id, prop_type, pid, fid, (pmin, pmax, c0))
desvar_ids = ints[i0+10:i1:2]
coeffs = floats[i0+11:i1:2]
# 2 TYPE(2) CHAR4 Name of a property entry
# 4 PID I Property entry identification number
# 5 FID I FID number input. Otherwise, either 0 if property
# name is input, or frequency (RS) if entry is for
# frequency dependent property. (See Words 9 and 10)
# 6 PMIN RS Minimum value allowed for this property
# 7 PMAX RS Maximum value allowed for this property
# 8 C0 RS Constant term of relation
dvprel = DVPREL1(dvprel_id, prop_type, pid, fid,
desvar_ids, coeffs,
p_min=pmin, p_max=pmax, c0=c0,
validate=True)
if dvprel_id in self.dvprels:
dvprel_old = self.dvprels[dvprel_id]
if dvprel == dvprel_old:
pass
else:
self._add_dvprel_object(dvprel)
ncards += 1
dvprel.write_card_16()
n += (i1 - i0 + 1) * size
self.card_count['DVPREL1'] = ncards
return n
def _read_dvmrel1(self, data: bytes, n: int) -> int:
"""
Design variable to material relation.
Word Name Type Description
1 ID I Unique identification number
2 TYPE(2) CHAR4 Name of a material property entry
4 MID I Material identification number
5 FID I Entry is 0
6 MPMIN RS Minimum value allowed for this property
7 MPMAX RS Maximum value allowed for this property
8 C0 RS Constant term of relation
9 MPNAME(2) CHAR4 Name of material property
11 DVIDi I DESVAR entry identification number
12 COEFi RS Coefficient of linear relation
Words 11 and 12 repeat until -1 occurs
"""
ints = np.frombuffer(data[n:], self.idtype8).copy()
floats = np.frombuffer(data[n:], self.fdtype8).copy()
istart, iend = get_minus1_start_end(ints)
size = self.size
for (i0, i1) in zip(istart, iend):
#self.show_data(data[n+i0*size:n+i1*size], types='ifs')
assert ints[i1] == -1, ints[i1]
dvmrel_id = ints[i0]
mat_type_bytes = data[n+size:n+3*size]
mid = ints[i0+3]
mp_name_bytes = data[n+8*size:n+10*size]
mat_type = reshape_bytes_block_size(mat_type_bytes, size=size)
mp_name = reshape_bytes_block_size(mp_name_bytes, size=size)
pid, fid = ints[i0+3:i0+5]
mp_min, mp_max, c0 = floats[i0+5:i0+8]
assert fid == 0, (dvmrel_id, mid, mat_type_bytes, mp_name, pid, fid)
#print(dvmrel_id, mid, mat_type_bytes, mp_name, pid, fid)
desvar_ids = ints[i0+10:i1:2]
coeffs = floats[i0+11:i1:2]
dvmrel = self.add_dvmrel1(dvmrel_id, mat_type, mid, mp_name,
desvar_ids, coeffs,
mp_min=mp_min, mp_max=mp_max, c0=c0,
validate=True)
dvmrel.write_card_16()
n += (i1 - i0 + 1) * size
return n
def _read_dvprel2(self, data: bytes, n: int) -> int:
"""
Record – DVPREL2(3406,34,355)
Design variable to property relation based on a user-supplied equation.
Word Name Type Description
1 ID I Unique identification number
2 TYPE(2) CHAR4 Name of a property entry
4 PID I Property entry identification number
5 FID I FID number input. Otherwise, either 0 if property
name is input, or frequency (RS) if entry is for
frequency dependent property. (See Words 9 and 10)
6 PMIN RS Minimum value allowed for this property
7 PMAX RS Maximum value allowed for this property
8 EQID I DEQATN entry identification number
9 PNAME1 CHAR4 First word of property name, if any, or blank if
FID number is nonzero (Word 5)
10 PNAME2 CHAR4 Second word of property name, if any. Otherwise,
either blanks if FID number is nonzero (See Word 5),
or frequency (RS) if entry is for frequency
dependent property. (See Word 5)
11 FLAG I DESVAR/DTABLE
FLAG = 1000 DESVAR
12 DVIDi I A DESVAR entry identification number
Word 12 repeats until -1000
FLAG = 2000 DTABLE
12 LABLi(2) CHAR4 Label for a constant on the DTABLE entry
Words 12 and 13 repeat until -2000
End flag when -1 occurs
data = (2, PROD, 101, 4, -1.0e+35, 1.0e+20, 2, '', 1000, 2, -1000,
2000, L1, -2000)
"""
#return self._read_dvxrel2(data, n, DVPREL2)
n0 = n
ints = np.frombuffer(data[n:], self.idtype8).copy()
floats = np.frombuffer(data[n:], self.fdtype8).copy()
istart, iend = get_minus1_start_end(ints)
size = self.size
for (i0, i1) in zip(istart, iend):
#self.show_data(data[n+i0*size:n+i1*size], types='ifs')
assert ints[i1] == -1, ints[i1]
dvprel_id = ints[i0]
prop_type_bytes = data[n0+(i0+1)*size:n0+(i0+3)*size]
pid, fid = ints[i0+3:i0+5]
p_min, p_max = floats[i0+5:i0+7]
deqation = ints[i0+7]
#data[n0+iflag*size:n0+(iflag+1)*size])
prop_name_bytes = data[n0+(i0+8)*size:n0+(i0+10)*size]
if size == 4:
prop_type = prop_type_bytes.decode('latin1').rstrip()
prop_name = prop_name_bytes.decode('latin1').rstrip()
else:
asdf
if prop_name_bytes == b' ':
assert fid != 0
pname_fid = fid
else:
assert fid == 0, f'fid={fid} prop_name_bytes={prop_name_bytes}'
pname_fid = prop_name
#print(dvprel_id, prop_type, pid, pname_fid, deqation)
iend, dvids, labels = _read_dvxrel2_flag(data, n0, i0, i1, size, ints)
#print(dvids, labels)
dvprel = self.add_dvprel2(dvprel_id, prop_type, pid,
pname_fid, deqation,
dvids=dvids,
labels=labels,
p_min=p_min, p_max=p_max,
validate=True)
dvprel.validate()
#print(dvprel)
#print('--------------------')
dvprel.write_card_16()
n += (i1 - i0 + 1) * size
return n
def _read_dvmrel2(self, data: bytes, n: int) -> int:
"""
Record – DVMREL2(6400,64,432)
Design variable to material relation based on a user-supplied equation.
Word Name Type Description
1 ID I Unique identification number
2 TYPE(2) CHAR4 Name of a material property entry
4 MID I Material identification number
5 FID I Entry is 0
6 MPMIN RS Minimum value allowed for this property
7 MPMAX RS Maximum value allowed for this property
8 EQID I DEQATN entry identification number
9 MPNAME(2) CHAR4 Name of material property
11 FLAG I DESVAR/DTABLE
FLAG = 1000 DESVAR
12 DVIDi I A DESVAR entry identification number
Word 12 repeats until -1000
FLAG = 2000 DTABLE
12 LABLi(2) CHAR4 Label for a constant on the DTABLE entry
Words 12 and 13 repeat until -2000
End flag when -1 occurs
"""
cls = DVMREL2
#return self._read_dvxrel2(data, n, DVMREL2)
#def _read_dvxrel2(self, data: bytes, n: int, cls) -> int:
n0 = n
ints = np.frombuffer(data[n:], self.idtype8).copy()
floats = np.frombuffer(data[n:], self.fdtype8).copy()
iminus1 = np.where(ints == -1)[0]
istart = [0] + list(iminus1[:-1] + 1)
iend = iminus1
size = self.size
for (i0, i1) in zip(istart, iend):
#self.show_data(data[n+i0*size:n+i1*size], types='ifs')
assert ints[i1] == -1, ints[i1]
dvmrel_id = ints[i0]
mat_type_bytes = data[n0+(i0+1)*size:n0+(i0+3)*size]
mid, fid = ints[i0+3:i0+5]
mp_min, mp_max = floats[i0+5:i0+7]
deqation = ints[i0+7]
#data[n0+iflag*size:n0+(iflag+1)*size])
mp_name_bytes = data[n0+(i0+8)*size:n0+(i0+10)*size]
if size == 4:
mat_type = mat_type_bytes.decode('latin1').rstrip()
mp_name = mp_name_bytes.decode('latin1').rstrip()
else:
asdf
if mp_name_bytes == b' ':
assert fid != 0
mpname_fid = fid
else:
assert fid == 0, f'fid={fid} mp_name_bytes={mp_name_bytes}'
#print(dvmrel_id, mat_type, (mid, fid), (mp_min, mp_max), deqation, mp_name, flag)
iend, dvids, labels = _read_dvxrel2_flag(data, n0, i0, i1, size, ints)
#labels = labels.
#print(dvids, labels)
card_name = cls.type
if card_name == 'DVPREL2':
pid = mid
dvprel_id = dvmrel_id
prop_type = mat_type
pname_fid = mp_name
dvxrel = self.add_dvprel2(dvprel_id, prop_type, pid,
pname_fid, deqation,
dvids=dvids,
labels=labels,
p_min=mp_min, p_max=mp_max,
validate=True)
elif card_name == 'DVMREL2':
dvxrel = self.add_dvmrel2(dvmrel_id, mat_type, mid, mp_name,
deqation,
dvids=dvids,
labels=labels,
mp_min=mp_min, mp_max=mp_max,
validate=True)
dvxrel.validate()
#print(dvxrel)
#print('--------------------')
dvxrel.write_card_16()
n += (i1 - i0 + 1) * size
return n
def _read_dresp1(self, data: bytes, n: int) -> int:
"""
Word Name Type Description
1 ID I Unique entry identifier
2 LABEL(2) CHAR4 User-defined label
4 FLAG I Flag indicating response type
FLAG = 1 WEIGHT
5 UNDEF(2) None
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute (-10 for DWEIGHT which is the topology optimization design weight
9 ATTB I Response attribute
10 MONE I Entry is -1
FLAG = 2 VOLUME
5 UNDEF(2) None
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB I Response attribute
10 MONE I Entry is -1
FLAG = 3 LAMA
5 UNDEF(2) None
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB I Response attribute
10 MONE I Entry is -1
FLAG = 4 EIGN
5 UNDEF(2) None
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB I Response attribute
10 MONE I Entry is -1
FLAG = 5 DISP
5 UNDEF(2) None
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB I Response attribute
10 ATTi I Grid point IDs
Word 10 repeats until -1 occurs
FLAG = 6 STRESS
5 PTYPE(2) CHAR4 Element flag (ELEM) or property entry name
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB I Response attribute
10 ATTi I Element numbers (if Word 5 is ELEM) or property IDs
Word 10 repeats until -1 occurs
FLAG = 7 STRAIN
5 PTYPE(2) CHAR4 Element flag (ELEM) or property entry name
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB I Response attribute
10 ATTi I Element numbers (if Word 5 is ELEM) or property IDs
Word 10 repeats until -1 occurs
FLAG = 8 FORCE
5 PTYPE(2) CHAR4 Element flag (ELEM) or property entry name
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB I Response attribute
10 ATTi I Element numbers (if Word 5 is ELEM) or property IDs
Word 10 repeats until -1 occurs
FLAG = 9 CFAILURE
5 PTYPE(2) CHAR4 Element flag (ELEM) or composite property entry name
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB I Response attribute
10 ATTi I Element numbers (if Word 5 is ELEM) or composite property IDs
Word 10 repeats until -1 occurs
FLAG = 10 CSTRESS
5 PTYPE(2) CHAR4 Element flag (ELEM) or composite property entry name
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB I Response attribute
10 ATTi I Element numbers (if Word 5 is ELEM) or composite property IDs
Word 10 repeats until -1 occurs
FLAG = 11 CSTRAIN
5 PTYPE(2) CHAR4 Element flag (ELEM) or composite property entry
name
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB I Response attribute
10 ATTi I Element numbers (if Word 5 is ELEM) or composite property IDs
Word 10 repeats until -1 occurs
FLAG = 12 FREQ
5 UNDEF(2) None
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB I Response attribute
10 MONE I Entry is -1
FLAG = 13 SPCFORCE
5 UNDEF(2) None
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB I Response attribute
10 ATTi I Grid point IDs
Word 10 repeats until -1 occurs
FLAG = 14 ESE
5 PTYPE(2) CHAR4 Element flag (ELEM) or property entry name
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB I Response attribute
10 ATTi I Element numbers (if Word 5 is ELEM) or property IDs
Word 10 repeats until -1 occurs
FLAG = 15 CEIG
5 UNDEF(2) None
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB I Response attribute
10 MONE I Entry is -1
FLAG = 17 Compliance
5 UNDEF(2) None
7 UNDEF I Reserved for SEID for compliance DRESP1
8 UNDEF(2) None
10 MONE I Entry is -1
FLAG = 19 ERP
5 UNDEF(2) None
7 REGION I Region identifier
8 ATTA I Response attribute
9 ATTB I Frequency or real code for character input, or -1=spawn)
10 ATTi I Panel SET3 IDs
Word 10 repeats until -1 occurs
FLAG = 20 FRDISP
5 UNDEF(2) None
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB RS Frequency value; -1 (integer) spawn for all
frequencies in set; -1.10000E+08 for SUM;
-1.20000E+08 for AVG; -1.30000E+08 for SSQ;
-1.40000E+08 for RSS; -1.50000E+08 for MAX;
-1.60000E+08 for MIN
10 ATTi I Grid point IDs
Word 10 repeats until -1 occurs
FLAG = 21 FRVELO
5 UNDEF(2) None
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB RS Frequency value; -1 (integer) spawn for all
frequencies in set; -1.10000E+08 for SUM;
-1.20000E+08 for AVG; -1.30000E+08 for SSQ;
-1.40000E+08 for RSS; -1.50000E+08 for MAX;
-1.60000E+08 for MIN
10 ATTi I Grid point IDs
Word 10 repeats until -1 occurs
FLAG = 22 FRACCL
5 UNDEF(2) None
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB RS Frequency value; -1 (integer) spawn for all
frequencies in set; -1.10000E+08 for SUM;
-1.20000E+08 for AVG; -1.30000E+08 for SSQ;
-1.40000E+08 for RSS; -1.50000E+08 for MAX;
-1.60000E+08 for MIN
10 ATTi I Grid point IDs
Word 10 repeats until -1 occurs
FLAG = 23 FRSPCF
5 UNDEF(2) None
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB RS Frequency value; -1 (integer) spawn for all
frequencies in set; -1.10000E+08 for SUM;
-1.20000E+08 for AVG; -1.30000E+08 for SSQ;
-1.40000E+08 for RSS; -1.50000E+08 for MAX;
-1.60000E+08 for MIN
10 ATTi I Grid point IDs
Word 10 repeats until -1 occurs
FLAG = 24 FRSTRE
5 PTYPE(2) CHAR4 Element flag (ELEM) or property entry name
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB RS Frequency value; -1 (integer) spawn for all
frequencies in set; -1.10000E+08 for SUM;
-1.20000E+08 for AVG; -1.30000E+08 for SSQ;
-1.40000E+08 for RSS; -1.50000E+08 for MAX;
-1.60000E+08 for MIN
10 ATTi I Element numbers (if Word 5 is ELEM) or property IDs
Word 10 repeats until -1 occurs
FLAG = 25 FRFORC
5 PTYPE(2) CHAR4 Element flag (ELEM) or property entry name
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB RS Frequency value; -1 (integer) spawn for all
frequencies in set; -1.10000E+08 for SUM;
-1.20000E+08 for AVG; -1.30000E+08 for SSQ;
-1.40000E+08 for RSS; -1.50000E+08 for MAX;
-1.60000E+08 for MIN
10 ATTi I Element numbers (if Word 5 is ELEM) or property IDs
Word 10 repeats until -1 occurs
FLAG = 26 RMSDISP
5 UNDEF(2) None
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB I Random ID
10 ATTi I Grid point IDs
Word 10 repeats until -1 occurs
FLAG = 27 RMSVELO
5 UNDEF(2) None
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB I Random ID
10 ATTi I Grid point IDs
Word 10 repeats until -1 occurs
FLAG = 28 RMSACCL
5 UNDEF(2) None
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB I Random ID
10 ATTi I Grid point IDs
Word 10 repeats until -1 occurs
FLAG = 29 PSDDISP
5 UNDEF None
6 PTYPE I Random ID
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB RS Frequency value; -1 (integer) spawn for all
frequencies in set; -1.10000E+08 for SUM;
-1.20000E+08 for AVG; -1.30000E+08 for SSQ;
-1.40000E+08 for RSS; -1.50000E+08 for MAX;
-1.60000E+08 for MIN
10 ATTi I Grid point IDs
Word 10 repeats until -1 occurs
FLAG = 30 PSDVELO
5 UNDEF None
6 PTYPE I Random ID
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB RS Frequency value; -1 (integer) spawn for all
frequencies in set; -1.10000E+08 for SUM;
-1.20000E+08 for AVG; -1.30000E+08 for SSQ;
-1.40000E+08 for RSS; -1.50000E+08 for MAX;
-1.60000E+08 for MIN
10 ATTi I Grid point IDs
Word 10 repeats until -1 occurs
FLAG = 31 PSDACCL
5 UNDEF None
6 PTYPE I Random ID
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB RS Frequency value; -1 (integer) spawn for all
frequencies in set; -1.10000E+08 for SUM;
-1.20000E+08 for AVG; -1.30000E+08 for SSQ;
-1.40000E+08 for RSS; -1.50000E+08 for MAX;
-1.60000E+08 for MIN
10 ATTi I Grid point IDs
Word 10 repeats until -1 occurs
FLAG = 60 TDISP
5 UNDEF(2) None
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB RS Time value; -1 (integer) spawn for all time steps
in set; -1.10000E+08 for SUM; -1.20000E+08 for
AVG; -1.30000E+08 for SSQ; -1.40000E+08 for
RSS; -1.50000E+08 for MAX; -1.60000E+08 for MIN
10 ATTi I Grid point IDs
Word 10 repeats until -1 occurs
FLAG = 61 TVELO
5 UNDEF(2) None
7 REGION I Region identifier for constraint screening
8 ATTA I Response attribute
9 ATTB RS Time value; -1 (integer) spawn for all time steps
in set; -1.10000E+08 for SUM; -1.20000E+08 for
AVG; -1.30000E+08 for SSQ; -1.40000E+08 for
RSS; -1.50000E+08 for MAX; -1.60000E+08 for MIN
10 ATTi I Grid point IDs
Word 10 repeats until -1 occurs
FLAG = 62 TACCL
"""
flag_to_resp = {
1 : 'WEIGHT',
2 : 'VOLUME',
3 : 'LAMA',
4 : 'EIGN',
5 : 'DISP',
6 : 'STRESS',
7 : 'STRAIN',
8 : 'FORCE',
9 : 'CFAILURE',
10 : 'CSTRESS',
11 : 'CSTRAIN',
12 : 'FREQ',
13 : 'SPCFORCE',
14 : 'ESE',
15 : 'CEIG',
17 : 'Compliance',
19 : 'ERP',
20: 'FRDISP',
21: 'FRVELO',
22: 'FRACCL',
23: 'FRSPCF',
24: 'FRSTRE',
25: 'FRFORC',
26: 'RMSDISP',
27: 'RMSVELO',
28: 'RMSACCL',
29: 'PSDDISP',
30: 'PSDVELO',
31: 'PSDACCL',
60 : 'TDISP',
61 : 'TVELO',
62 : 'TACCL',
}
#self.show_data(data[n:], types='qds')
ints = np.frombuffer(data[n:], self.idtype8).copy()
floats = np.frombuffer(data[n:], self.fdtype8).copy()
istart, iend = get_minus1_start_end(ints)
#if self.size == 4:
#struct1 = Struct(self._endian + b'i 8s i')
#strs = np.frombuffer(data[n:], dtype='|S4')
#else:
#struct1 = Struct(self._endian + b'q 16s q')
#strs = np.frombuffer(data[n:], dtype='|S8')
#6i
#ntotal1 = 16 * self.factor # 4*4
size = self.size
def _pick_attbi_attbf(attbi: int, attbf: float) -> Union[float, str]:
"""
9 ATTB RS Frequency value; -1 (integer) spawn for all
frequencies in set; -1.10000E+08 for SUM;
-1.20000E+08 for AVG; -1.30000E+08 for SSQ;
-1.40000E+08 for RSS; -1.50000E+08 for MAX;
-1.60000E+08 for MIN
"""
if attbi == -1:
attb = 'ALL'
else:
attb = attbf
assert attb > -1.0e+8, attb
#print(attbf)
#ddd
return attb
size = self.size
for (i0, i1) in zip(istart, iend):
assert ints[i1] == -1, ints[i1]
#print(i0, i1)
dresp_id = ints[i0]
label_bytes = data[n+size:n+3*size]
label = reshape_bytes_block_size(label_bytes, size=size)
flag = ints[i0+3]
response_type = flag_to_resp[flag]
#print(dresp_id, flag, label)
if flag == 1:
# WEIGHT
# 5 UNDEF(2) None
# 7 REGION I Region identifier for constraint screening
# 8 ATTA I Response attribute (-10 for DWEIGHT which is the topology optimization design weight
# 9 ATTB I Response attribute
# 10 MONE I Entry is -1
region, atta, attb = ints[i0+6:i0+9]
property_type = None
#response_type = 'WEIGHT'
assert atta == 33, atta
assert attb == -9999, attb
atta = None
attb = None
atti = None
elif flag == 2:
# FLAG = 2 VOLUME
# 5 UNDEF(2) None
# 7 REGION I Region identifier for constraint screening
# 8 ATTA I Response attribute
# 9 ATTB I Response attribute
# 10 MONE I Entry is -1
property_type = None
region, atta, attb = ints[i0+6:i0+9]
atti = None
attb = None
elif flag == 3:
# FLAG = 3 LAMA
# 5 UNDEF(2) None
# 7 REGION I Region identifier for constraint screening
# 8 ATTA I Response attribute
# 9 ATTB I Response attribute
# 10 MONE I Entry is -1
print(response_type, ints[i0+6:i1], floats[i0+6:i1])
region, atta, attb = ints[i0+6:i0+9]
property_type = None
#response_type = 'EIGN'
assert atta == 1, atta
assert attb == 0, attb
atti = None
elif flag == 4:
# FLAG = 4 EIGN
# 5 UNDEF(2) None
# 7 REGION I Region identifier for constraint screening
# 8 ATTA I Response attribute
# 9 ATTB I Response attribute
# 10 MONE I Entry is -1
region, atta, attb = ints[i0+6:i0+9]
property_type = None
#response_type = 'EIGN'
#assert atta == 1, atta
assert attb == 0, attb
atti = None
#atta = None
#attb = None
elif flag == 5:
# DISP
# 5 UNDEF(2) None
# 7 REGION I Region identifier for constraint screening
# 8 ATTA I Response attribute
# 9 ATTB I Response attribute
# 10 ATTi I Grid point IDs
# Word 10 repeats until -1 occurs
property_type = None
response_type = 'DISP'
region, atta, attb = ints[i0+6:i0+9]
atti = ints[i0+9:i1].tolist()
elif flag in [6, 7, 11]:
# FLAG = 6 STRESS
# 5 PTYPE(2) CHAR4 Element flag (ELEM) or property entry name
# 7 REGION I Region identifier for constraint screening
# 8 ATTA I Response attribute
# 9 ATTB I Response attribute
# 10 ATTi I Element numbers (if Word 5 is ELEM) or property IDs
# Word 10 repeats until -1 occurs
property_type_bytes = data[n+4*size:n+6*size]
property_type = reshape_bytes_block_size(property_type_bytes, size=size)
region, atta, attb = ints[i0+6:i0+9]
atti = ints[i0+9:i1].tolist()
#print('ptype =', property_type)
#print('region =', region)
#print(atta, attb, atti)
#response_type = 'STRESS'
#elif flag == 7:
# FLAG = 7 STRAIN
# 5 PTYPE(2) CHAR4 Element flag (ELEM) or property entry name
# 7 REGION I Region identifier for constraint screening
# 8 ATTA I Response attribute
# 9 ATTB I Response attribute
# 10 ATTi I Element numbers (if Word 5 is ELEM) or property IDs
# Word 10 repeats until -1 occurs
#elif flag == 11:
# FLAG = 11 CSTRAIN
# 5 PTYPE(2) CHAR4 Element flag (ELEM) or composite property entry name
# 7 REGION I Region identifier for constraint screening
# 8 ATTA I Response attribute
# 9 ATTB I Response attribute
# 10 ATTi I Element numbers (if Word 5 is ELEM) or composite property IDs
# Word 10 repeats until -1 occurs
elif flag == 12:
# FLAG = 12 FREQ
# 5 UNDEF(2) None
# 7 REGION I Region identifier for constraint screening
# 8 ATTA I Response attribute
# 9 ATTB I Response attribute
# 10 MONE I Entry is -1
property_type = None
region, atta, attb = ints[i0+6:i0+9]
atti = None
elif flag == 15:
# FLAG = 15 CEIG
# 5 UNDEF(2) None
# 7 REGION I Region identifier for constraint screening
# 8 ATTA I Response attribute
# 9 ATTB I Response attribute
# 10 MONE I Entry is -1
#print(ints[i0+6:i1])
#print(floats[i0+6:i1])
property_type = None
region, atta, attb = ints[i0+6:i0+9]
atti = None
elif flag == 19:
# FLAG = 19 ERP
# 5 UNDEF(2) None
# 7 REGION I Region identifier
# 8 ATTA I Response attribute
# 9 ATTB I Frequency or real code for character input, or -1=spawn)
# 10 ATTi I Panel SET3 IDs
# Word 10 repeats until -1 occurs
property_type = None
region, atta, attb = ints[i0+6:i0+9]
atti = ints[i0+9:i1].tolist()
elif flag == 20:
property_type = None
# FLAG = 20 FRDISP
# 5 UNDEF(2) None
# 7 REGION I Region identifier for constraint screening
# 8 ATTA I Response attribute
# 9 ATTB RS Frequency value; -1 (integer) spawn for all
# frequencies in set; -1.10000E+08 for SUM;
# -1.20000E+08 for AVG; -1.30000E+08 for SSQ;
# -1.40000E+08 for RSS; -1.50000E+08 for MAX;
# -1.60000E+08 for MIN
# 10 ATTi I Grid point IDs
# Word 10 repeats until -1 occurs
#print(ints[i0+5:i1])
#print(floats[i0+5:i1])
#print(ints[i0+6:i1])
#print(floats[i0+6:i1])
region, atta, attbi = ints[i0+6:i0+9]
attbf = floats[i0+8]
atti = ints[i0+9:i1].tolist()
attb = _pick_attbi_attbf(attbi, attbf)
#print(region, atta, attb, atti)
elif flag == 22:
# FLAG = 22 FRACCL
# 5 UNDEF(2) None
# 7 REGION I Region identifier for constraint screening
# 8 ATTA I Response attribute
# 9 ATTB RS Frequency value; -1 (integer) spawn for all
# frequencies in set; -1.10000E+08 for SUM;
# -1.20000E+08 for AVG; -1.30000E+08 for SSQ;
# -1.40000E+08 for RSS; -1.50000E+08 for MAX;
# -1.60000E+08 for MIN
# 10 ATTi I Grid point IDs
# Word 10 repeats until -1 occurs
property_type = None
region, atta, attbi = ints[i0+6:i0+9]
attbf = floats[i0+8]
attb = _pick_attbi_attbf(attbi, attbf)
atti = ints[i0+9:i1].tolist()
elif flag in [24, 25]:
# FLAG = 24 FRSTRE
# 5 PTYPE(2) CHAR4 Element flag (ELEM) or property entry name
# 7 REGION I Region identifier for constraint screening
# 8 ATTA I Response attribute
# 9 ATTB RS Frequency value; -1 (integer) spawn for all
# frequencies in set; -1.10000E+08 for SUM;
# -1.20000E+08 for AVG; -1.30000E+08 for SSQ;
# -1.40000E+08 for RSS; -1.50000E+08 for MAX;
# -1.60000E+08 for MIN
# 10 ATTi I Element numbers (if Word 5 is ELEM) or property IDs
# Word 10 repeats until -1 occurs
property_type_bytes = data[n+4*size:n+6*size]
property_type = reshape_bytes_block_size(property_type_bytes, size=size)
region, atta, attbi = ints[i0+6:i0+9]
attbf = floats[i0+8]
attb = _pick_attbi_attbf(attbi, attbf)
atti = ints[i0+9:i1].tolist()
#print(property_type, region, atta, attb, atti)
#elif flag == 25:
# FRDISP
# 5 PTYPE(2) CHAR4 Element flag (ELEM) or property entry name
# 7 REGION I Region identifier for constraint screening
# 8 ATTA I Response attribute
# 9 ATTB RS Frequency value; -1 (integer) spawn for all
# frequencies in set; -1.10000E+08 for SUM;
# -1.20000E+08 for AVG; -1.30000E+08 for SSQ;
# -1.40000E+08 for RSS; -1.50000E+08 for MAX;
# -1.60000E+08 for MIN
# 10 ATTi I Element numbers (if Word 5 is ELEM) or property IDs
# Word 10 repeats until -1 occurs
elif flag == 29:
#FLAG = 29 PSDDISP
# 5 UNDEF None
# 6 PTYPE I Random ID
# 7 REGION I Region identifier for constraint screening
# 8 ATTA I Response attribute
# 9 ATTB RS Frequency value; -1 (integer) spawn for all
# frequencies in set; -1.10000E+08 for SUM;
# -1.20000E+08 for AVG; -1.30000E+08 for SSQ;
# -1.40000E+08 for RSS; -1.50000E+08 for MAX;
# -1.60000E+08 for MIN
# 10 ATTi I Grid point IDs
# Word 10 repeats until -1 occurs
property_type, region, atta, attbi = ints[i0+5:i0+9]
print(ints[i0+4:i1+5])
print(floats[i0+4:i1+5])
attbf = floats[i0+8]
attb = _pick_attbi_attbf(attbi, attbf)
atti = ints[i0+9:i1].tolist()
else:
raise NotImplementedError(flag)
print(response_type)
if atta == 0:
atta = None
if attb == 0:
attb = None
if atta is not None:
atta = int(atta)
print(dresp_id, label,
response_type, property_type, region,
atta, attb, atti)
dresp1 = self.add_dresp1(dresp_id, label,
response_type, property_type, region,
atta, attb, atti, validate=True)
dresp1.write_card_16()
n += (i1 - i0 + 1) * self.size
del dresp_id, label, response_type, property_type, region, atta, attb, atti
#for i in range(10):
#edata = data[n:n+ntotal1]
#dresp1_id, label_bytes, flag = struct1.unpack(edata)
##, undef1, undef2, atta, attb, mone
#label = reshape_bytes_block(label_bytes).decode('latin1').rstrip()
#print(dresp1_id, label, flag)
#self.show_data
#ddd
return n
def _read_dvset(self, data: bytes, n: int) -> int:
"""
DVSET 13013 PSHELL 4 .02 1.0 13013
DVSET 13016 PSHELL 4 .02 1.0 13016
(11013, 902, 9, 4, 2, 1.0, 1.0, 11013, -1)
(11014, 902, 9, 4, 2, 1.0, 1.0, 11014, -1)
(13013, 2302, 23, 4, 2, 0.02, 1.0, 13013, -1)
(13016, 2302, 23, 4, 2, 0.02, 1.0, 13016, -1)
MSC 2018.2
Word Name Type Description
1 VID I
2 TYPE(2) I
4 FIELD I
5 I
=1
6 PREF I
7 ALPHA I
=2
6 PREF RS
7 ALPHA RS
End
8 PID I
Word 8 repeats until End of Record
data = (
41, 902, 9, 4, 2, 1.0, 1.0, 21, -1,
42, 302, 3, 5, 2, 1.0, 1.0, 22, -1,
43, 902, 9, 4, 2, 1.0, 1.0, 23, -1,
44, 902, 9, 4, 2, 1.0, 1.0, 24, -1,
45, 302, 3, 5, 2, 1.0, 1.0, 25, -1,
46, 902, 9, 4, 2, 1.0, 1.0, 26, -1,
47, 52, 20, 4, 2, 1.0, 1.0, 27, -1,
48, 5402, 54, -7, 2, 1.0, 1.0, 28, -1,
48, 5402, 54, -167, 2, 1.0, 1.0, 28, -1,
49, 5402, 54, -7, 2, 1.0, 1.0, 29, -1,
49, 5402, 54, -167, 2, 1.0, 1.0, 29, -1,
50, 52, 20, 4, 2, 1.0, 1.0, 30, -1,
99, 52, 20, 3, 1, 91, 0/0.0, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, -1,
99, 5402, 54, 3, 1, 91, 0/0.0, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, -1,
99, 902, 9, 3, 1, 91, 0/0.0, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, -1,
100, 402, 4, 3, 3, 538976288, 1.0, 100, -1, ??? PMASS,None,None
100, 402, 4, 3, 3, 1.3563156426940112e-19, 1.0, 100, -1, ???
410, 902, 9, 4, 2, 1.0, -1.0, 21, -1,
430, 902, 9, 4, 2, 1.0, -1.0, 23, -1,
440, 902, 9, 4, 2, 1.0, -1.0, 24, -1,
460, 902, 9, 4, 2, 1.0, -1.0, 26, -1,
470, 52, 20, 4, 2, 1.0, -1.0, 27, -1,
480, 5402, 54, -7, 2, 1.0, -1.0, 28, -1,
480, 5402, 54, -167, 2, 1.0, -1.0, 28, -1,
490, 5402, 54, -7, 2, 1.0, -1.0, 29, -1,
490, 5402, 54, -167, 2, 1.0, -1.0, 29, -1,
500, 52, 20, 4, 2, 1.0, -1.0, 30, -1,
999, 52, 20, 3, 1, 91, 0/0.0, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, -1,
)
999, 302, 3, 3, 3, 538976288, 1.0, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, -1, ???
999, 302, 3, 3, 3, 1.3563156426940112e-19, 1.0, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, -1, ???
999, 5402, 54, 3, 1, 91, 0/0.0, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, -1,
999, 902, 9, 3, 1, 91, 0/0.0, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, -1)
"""
#self.show_data(data[12:50], types='ifs')
n0 = n
size = self.size
structi = Struct(self._endian + b'iii ii ff ii')
ints = np.frombuffer(data[n:], self.idtype8).copy()
floats = np.frombuffer(data[n:], self.fdtype8).copy()
istart, iend = get_minus1_start_end(ints)
for (i0, i1) in zip(istart, iend):
assert ints[i1] == -1, ints[i1]
#edata = data[n:n + ntotal]
#out = structi.unpack(edata)
#print(out)
dvset_id, dvset_ptype1, dvset_ptype2, field, flag = ints[i0:i0+5]
if flag == 1:
pref, alpha = ints[i0+5:i0+7]
elif flag == 2:
pref, alpha = floats[i0+5:i0+7]
elif flag == 3:
#print(dvset_id, dvset_ptype1, dvset_ptype2, field, flag)
#print(' ? =', ints[i0+5:i0+7], floats[i0+5:i0+7], data[n0+(i0+4)*size:n0+(i0+7)*size])
#pref, alpha = '???', '???'
pref = None
alpha = None
#flag3
else:
print(dvset_id, dvset_ptype1, dvset_ptype2, field, flag)
raise NotImplementedError(flag)
pids = ints[i0+7:i1].tolist()
#assert field in [3, 4], field
dvset_ptype = (dvset_ptype1, dvset_ptype2)
#if dvset_ptype == (902, 9):
#ptype = 'PSHELL'
if dvset_ptype == (902, 9):
ptype = 'PROD'
elif dvset_ptype == (5402, 54):
ptype = 'PBEAM'
elif dvset_ptype == (302, 3):
ptype = 'PELAS'
elif dvset_ptype == (52, 20):
ptype = 'PBAR'
elif dvset_ptype == (402, 4):
ptype = 'PMASS'
elif dvset_ptype == (2302, 23):
ptype = 'PSHELL'
elif dvset_ptype == (1002, 10):
ptype = 'PSHEAR'
#elif dvset_ptype == (402, 4):
#ptype = 'PMASS'
#elif dvset_ptype == (402, 4):
#ptype = 'PMASS'
#elif dvset_ptype == (2302, 23):
#ptype = 'PROD'
#elif dvset_ptype == (1002, 10):
#ptype = 'PSHEAR'
else:
raise NotImplementedError(f'DVSET={dvset_id} dvset_ptype={dvset_ptype}')
#print(dvset_id, (ptype, field), flag, (pref, alpha), pids)
self.add_dvset(dvset_id, ptype, field, pref, pids, alpha=alpha)
n += (i1 - i0 + 1) * size
#self.log.info(f'skipping {self.card_name} in {self.table_name}; ndata={len(data)-12}')
return n
def _read_dvar(self, data: bytes, n: int) -> int:
"""
DVAR 13013 SPARPNL .01 13013
data = (404, 4, 277,
11013, 'SPARPNL ', 0.01, 11013, -1)
"""
ntotal = 24
ndatai = len(data) - n
ncards = ndatai // ntotal
assert ndatai % ntotal == 0
structi = Struct(self._endian + b'i 8s fii')
for unused_i in range(ncards):
edata = data[n:n + ntotal]
#self.show_data(edata, types='ifs')
#(11013, b'SPRCAPS ', 0.01, 11013, -1)
#(11014, b'SPRCAPS ', 0.01, 11014, -1)
#(11015, b'SPRCAPS ', 0.01, 11015, -1)
#(11016, b'SPRCAPS ', 0.01, 11016, -1)
out = structi.unpack(edata)
bid, label_bytes, deltab, vid, minus1 = out
assert minus1 == -1, out
assert isinstance(deltab, float), deltab
label = label_bytes.decode('latin1').rstrip()
vids = [vid]
self.add_dvar(bid, label, vids, deltab=deltab)
n += ntotal
return n
def _read_dscons(self, data: bytes, n: int) -> int:
"""DSCONS
DSCONS 110131 SPRCAPS STRESS 11013 2 25000. MAX
"""
ndatai = len(data) - n
# !12
ntotal = 32
ncards = ndatai // ntotal
assert ndatai % ntotal == 0
structi = Struct(self._endian + b'i 8s i 2i fi')
constraint_map = {
1 : 'DISP',
2 : 'STRESS',
3 : 'FORCE',
4 : 'LAMA',
5 : 'FREQ',
}
for unused_i in range(ncards):
edata = data[n:n + ntotal]
#self.show_data(edata, types='ifs')
#(110131, b'SPRCAPS ', 2, 11013, 2, 25000.0, 0)
#(110132, b'SPRCAPS ', 2, 11013, 2, -25000.0, 1)
#(110141, b'SPRCAPS ', 2, 11014, 2, 25000.0, 0)
#(110142, b'SPRCAPS ', 2, 11014, 2, -25000.0, 1)
#(110151, b'SPRCAPS ', 2, 11015, 2, 25000.0, 0)
#(110152, b'SPRCAPS ', 2, 11015, 2, -25000.0, 1)
#(110161, b'SPRCAPS ', 2, 11016, 2, 25000.0, 0)
out = structi.unpack(edata)
dscid, label_bytes, constraint_int, nid_eid, comp, limit, min_max = out
label = label_bytes.decode('latin1').rstrip()
try:
constraint_type = constraint_map[constraint_int]
except KeyError:
raise NotImplementedError(f'disp_stress_force={disp_stress_force} out={out}')
assert min_max in [0, 1], min_max
out = list(out)
#print(dscid, label, constraint_type, nid_eid, comp, limit, min_max)
if min_max == 0:
opt = 'MAX'
elif min_max == 1:
opt = 'MIN'
layer_id = 1
self.add_dscons(dscid, label, constraint_type, nid_eid, comp,
limit=limit, opt=opt, layer_id=layer_id)
n += ntotal
return n
def _read_dlink(self, data: bytes, n: int) -> int:
"""
DLINK(3206,32,353)
Word Name Type Description
1 ID I
2 DVID I
3 C0 RS
4 CMULT RS
5 INDV I
6 C RS
Words 5 through 6 repeat until End of Record
ints = (1, 2, 0, 1.0, 1, 1.0, -1)
floats = (1, 2, 0.0, 1.0, 1, 1.0, nan)
"""
ints = np.frombuffer(data[n:], self.idtype8).copy()
floats = np.frombuffer(data[n:], self.fdtype8).copy()
istart, iend = get_minus1_start_end(ints)
for (i0, i1) in zip(istart, iend):
assert ints[i1] == -1, ints[i1]
dlink_id, dependent_desvar = ints[i0:i0+2]
c0, cmult = floats[i0+2:i0+4]
independent_desvars = ints[i0+4:i1:2]
coeffs = floats[i0+5:i1:2]
#print(dlink_id, dependent_desvar, c0, cmult)
#print(independent_desvars, coeffs)
assert len(independent_desvars) == len(coeffs)
assert len(independent_desvars) > 0, independent_desvars
dlink = self.add_dlink(dlink_id, dependent_desvar,
independent_desvars,
coeffs,
c0=c0, cmult=cmult)
#print(dlink)
str(dlink)
n += (i1 - i0 + 1) * self.size
return n
def _read_desvar(self, data: bytes, n: int) -> int:
"""
(3106, 31, 352)
Word Name Type Description
1 ID I Unique design variable identification number
2 LABEL(2) CHAR4 User-supplied name for printing purposes
4 XINIT RS Initial value
5 XLB RS Lower bound
6 XUB RS Upper bound
7 DELXV RS Fractional change allowed for the design variable
during approximate optimization
8 DDVAL I ID of a DDVAL entry that provides a set of allowable
discrete values
"""
if self.size == 4:
ntotal = 32 # 8*4
structi = Struct(self._endian + b'i8s ffff i')
else:
ntotal = 64
structi = Struct(self._endian + b'q16s dddd q')
ncards = (len(data) - n) // ntotal
for unused_i in range(ncards):
edata = data[n:n + ntotal]
desvar_id, blabel, xinit, xlb, xub, delx, ddval = structi.unpack(edata)
label = blabel.decode('ascii')
if delx == 0:
delx = None
if ddval == 0:
ddval = None
if desvar_id not in self.desvars:
desvar = self.add_desvar(desvar_id, label, xinit, xlb=xlb, xub=xub,
delx=delx, ddval=ddval, comment='')
else:
# duplicate DESVAR
desvar_temp = self.add_desvar(1.0, label, xinit, xlb=xlb, xub=xub,
delx=delx, ddval=ddval, comment='')
del self.desvars[1.0]
desvar_temp.desvar_id = desvar_id
assert desvar_temp == self.desvars[desvar_id]
n += ntotal
#print(desvar)
self.card_count['DESVAR'] = ncards
return n
def _read_dvxrel2_flag(data: bytes, n0: int,
i0: int, i1: int,
size: int,
ints: np.ndarray) -> Tuple[List[int], List[str]]:
"""reads the DVxREL2 flag table"""
flag = ints[i0+10]
#print(ints[i0+11:])
#print(floats[i0+11:])
assert flag in [1000, 2000], flag
iflag = i0 + 10
dvids = []
labels = []
flags_found = []
while flag != -1:
flags_found.append(flag)
#print(f'i0={i0} iflag={iflag} i1={i1}')
flag2 = ints[iflag]
assert flag == flag2
flag_test, = Struct(b'i').unpack(data[n0+iflag*size:n0+(iflag+1)*size])
assert flag == flag_test, f'flag={flag} flag_test={flag_test}; n={n}'
if flag == 1000:
assert ints[iflag] == 1000, ints[iflag]
#print(' ', ints[iflag:i1])
iend = np.where(ints[iflag+1:i1] == -1000)[0][0] + (iflag+1)
dvids = ints[iflag+1:iend].tolist()
assert ints[iend] == -1000, ints[iflag+1:i1]
elif flag == 2000:
#print(' ', ints[iflag:i1])
iend = np.where(ints[iflag+1:i1] == -2000)[0][0] + (iflag+1)
assert ints[iflag] == 2000, ints[iflag]
assert ints[iend] == -2000, ints[iflag+1:i1]
labels_bytes = data[n0+(iflag+1)*size:n0+iend*size]
labels_bytes2 = data[n0+(iflag+1)*size:n0+(iend+1)*size]
#print('labels_bytes =', labels_bytes)
nbytes = len(labels_bytes)
nlabels = nbytes // 8
assert nbytes % 8 == 0
assert nlabels > 0, nlabels
for ilabel in range(nlabels):
#print(ilabel*size*2, (ilabel+2)*size)
labels_bytesi = labels_bytes[ilabel*size*2:(ilabel+2)*size]
label = labels_bytesi.decode('latin1').rstrip()
assert 1 <= len(str(label)) <= 8, f'label={label}; labels_bytesi={labels_bytesi} labels_bytes={labels_bytes2}'
labels.append(label)
#print(labels)
else:
raise RuntimeError(flag)
iflag = iend + 1
flag = ints[iflag]
#print(f'\nflag={flag}')
assert len(flags_found) in [1, 2], flags_found
return iend, dvids, labels
| [
"[email protected]"
]
| |
60c7d7d9db98b0d667465b77b3854c83adfa511e | d20d1d6a88dc8140673d79475554bde5e3145e68 | /tdsr-20200101/alibabacloud_tdsr20200101/models.py | fd33d1e3881b080e9bb3901270a402d449998280 | [
"Apache-2.0"
]
| permissive | realVegetable/alibabacloud-python-sdk | e455c36478aad993d6b5b1dd570ff3d155447148 | 12b3e90a9722bacf7b9004b4bd1e3337724672de | refs/heads/master | 2023-06-02T07:49:20.900358 | 2021-06-18T03:33:30 | 2021-06-18T03:33:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271,070 | py | # -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.model import TeaModel
from typing import List, Dict, Any
class GetSingleConnDataRequest(TeaModel):
def __init__(
self,
sub_scene_id: str = None,
):
# 子场景ID
self.sub_scene_id = sub_scene_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.sub_scene_id is not None:
result['SubSceneId'] = self.sub_scene_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('SubSceneId') is not None:
self.sub_scene_id = m.get('SubSceneId')
return self
class GetSingleConnDataResponseBodyList(TeaModel):
def __init__(
self,
id: str = None,
map_id: str = None,
type: str = None,
):
# ID
self.id = id
# 关联ID
self.map_id = map_id
# outer:外关联 inner:内关联 stair:楼梯关联
self.type = type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.id is not None:
result['Id'] = self.id
if self.map_id is not None:
result['MapId'] = self.map_id
if self.type is not None:
result['Type'] = self.type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Id') is not None:
self.id = m.get('Id')
if m.get('MapId') is not None:
self.map_id = m.get('MapId')
if m.get('Type') is not None:
self.type = m.get('Type')
return self
class GetSingleConnDataResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
version: str = None,
list: List[GetSingleConnDataResponseBodyList] = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 版本
self.version = version
# 关联信息
self.list = list
def validate(self):
if self.list:
for k in self.list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.version is not None:
result['Version'] = self.version
result['List'] = []
if self.list is not None:
for k in self.list:
result['List'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('Version') is not None:
self.version = m.get('Version')
self.list = []
if m.get('List') is not None:
for k in m.get('List'):
temp_model = GetSingleConnDataResponseBodyList()
self.list.append(temp_model.from_map(k))
return self
class GetSingleConnDataResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetSingleConnDataResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetSingleConnDataResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetTaskStatusRequest(TeaModel):
def __init__(
self,
task_id: str = None,
):
# 任务ID
self.task_id = task_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.task_id is not None:
result['TaskId'] = self.task_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('TaskId') is not None:
self.task_id = m.get('TaskId')
return self
class GetTaskStatusResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
status: str = None,
type: str = None,
error_code: str = None,
error_msg: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 未开始 :init 处理中 : processing 失败 :failure 完成 :succeed 取消 :canceled
self.status = status
# 墙线预测: wall_line 切图: cut_image 重建: build 直角优化:right_angle_optimization 其他:other
self.type = type
# 任务执行失败错误码
self.error_code = error_code
# 任务执行失败错误消息
self.error_msg = error_msg
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.status is not None:
result['Status'] = self.status
if self.type is not None:
result['Type'] = self.type
if self.error_code is not None:
result['ErrorCode'] = self.error_code
if self.error_msg is not None:
result['ErrorMsg'] = self.error_msg
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('Status') is not None:
self.status = m.get('Status')
if m.get('Type') is not None:
self.type = m.get('Type')
if m.get('ErrorCode') is not None:
self.error_code = m.get('ErrorCode')
if m.get('ErrorMsg') is not None:
self.error_msg = m.get('ErrorMsg')
return self
class GetTaskStatusResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetTaskStatusResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetTaskStatusResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetSceneDataRequest(TeaModel):
def __init__(
self,
token: str = None,
):
self.token = token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.token is not None:
result['Token'] = self.token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Token') is not None:
self.token = m.get('Token')
return self
class GetSceneDataResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
object_string: str = None,
data: Dict[str, Any] = None,
err_message: str = None,
success: bool = None,
):
self.request_id = request_id
self.object_string = object_string
self.data = data
self.err_message = err_message
self.success = success
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.object_string is not None:
result['ObjectString'] = self.object_string
if self.data is not None:
result['Data'] = self.data
if self.err_message is not None:
result['ErrMessage'] = self.err_message
if self.success is not None:
result['Success'] = self.success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('ObjectString') is not None:
self.object_string = m.get('ObjectString')
if m.get('Data') is not None:
self.data = m.get('Data')
if m.get('ErrMessage') is not None:
self.err_message = m.get('ErrMessage')
if m.get('Success') is not None:
self.success = m.get('Success')
return self
class GetSceneDataResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetSceneDataResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetSceneDataResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class LinkImageRequest(TeaModel):
def __init__(
self,
sub_scene_id: str = None,
file_name: str = None,
camera_height: int = None,
):
# 子场景ID
self.sub_scene_id = sub_scene_id
# 图片或者视频名称xxx.jpg
self.file_name = file_name
# 相机高度 单位 cm
self.camera_height = camera_height
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.sub_scene_id is not None:
result['SubSceneId'] = self.sub_scene_id
if self.file_name is not None:
result['FileName'] = self.file_name
if self.camera_height is not None:
result['CameraHeight'] = self.camera_height
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('SubSceneId') is not None:
self.sub_scene_id = m.get('SubSceneId')
if m.get('FileName') is not None:
self.file_name = m.get('FileName')
if m.get('CameraHeight') is not None:
self.camera_height = m.get('CameraHeight')
return self
class LinkImageResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
resource_id: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 图片/视频ID
self.resource_id = resource_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.resource_id is not None:
result['ResourceId'] = self.resource_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('ResourceId') is not None:
self.resource_id = m.get('ResourceId')
return self
class LinkImageResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: LinkImageResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = LinkImageResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class AddSceneRequest(TeaModel):
def __init__(
self,
type: str = None,
name: str = None,
project_id: str = None,
):
# 场景类型 3D模型:MODEL_3D 全景图片:PIC 全景视频:VIDEO
self.type = type
# 场景名称
self.name = name
# 项目ID
self.project_id = project_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.type is not None:
result['Type'] = self.type
if self.name is not None:
result['Name'] = self.name
if self.project_id is not None:
result['ProjectId'] = self.project_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Type') is not None:
self.type = m.get('Type')
if m.get('Name') is not None:
self.name = m.get('Name')
if m.get('ProjectId') is not None:
self.project_id = m.get('ProjectId')
return self
class AddSceneResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
id: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 场景ID
self.id = id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.id is not None:
result['Id'] = self.id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('Id') is not None:
self.id = m.get('Id')
return self
class AddSceneResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: AddSceneResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = AddSceneResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class UpdateConnDataRequest(TeaModel):
def __init__(
self,
scene_id: str = None,
conn_data: str = None,
):
# 场景ID
self.scene_id = scene_id
# 关联数据
self.conn_data = conn_data
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.scene_id is not None:
result['SceneId'] = self.scene_id
if self.conn_data is not None:
result['ConnData'] = self.conn_data
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('SceneId') is not None:
self.scene_id = m.get('SceneId')
if m.get('ConnData') is not None:
self.conn_data = m.get('ConnData')
return self
class UpdateConnDataResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
return self
class UpdateConnDataResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: UpdateConnDataResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = UpdateConnDataResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class BucketIsExistRequest(TeaModel):
def __init__(
self,
bucket_name: str = None,
):
# bucket名称
self.bucket_name = bucket_name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.bucket_name is not None:
result['BucketName'] = self.bucket_name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('BucketName') is not None:
self.bucket_name = m.get('BucketName')
return self
class BucketIsExistResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
is_exist: bool = None,
):
# Id of the request
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# bucket是否存在
self.is_exist = is_exist
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.is_exist is not None:
result['IsExist'] = self.is_exist
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('IsExist') is not None:
self.is_exist = m.get('IsExist')
return self
class BucketIsExistResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: BucketIsExistResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = BucketIsExistResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class RectifyImageRequest(TeaModel):
def __init__(
self,
url: str = None,
camera_height: int = None,
):
# 图片地址
self.url = url
# 相机高度 单位 cm
self.camera_height = camera_height
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.url is not None:
result['Url'] = self.url
if self.camera_height is not None:
result['CameraHeight'] = self.camera_height
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Url') is not None:
self.url = m.get('Url')
if m.get('CameraHeight') is not None:
self.camera_height = m.get('CameraHeight')
return self
class RectifyImageResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
task_id: str = None,
sub_scene_id: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 任务ID
self.task_id = task_id
# 子场景ID
self.sub_scene_id = sub_scene_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.task_id is not None:
result['TaskId'] = self.task_id
if self.sub_scene_id is not None:
result['SubSceneId'] = self.sub_scene_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('TaskId') is not None:
self.task_id = m.get('TaskId')
if m.get('SubSceneId') is not None:
self.sub_scene_id = m.get('SubSceneId')
return self
class RectifyImageResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: RectifyImageResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = RectifyImageResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class LabelBuildRequest(TeaModel):
def __init__(
self,
scene_id: str = None,
):
# 场景ID
self.scene_id = scene_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.scene_id is not None:
result['SceneId'] = self.scene_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('SceneId') is not None:
self.scene_id = m.get('SceneId')
return self
class LabelBuildResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
task_id: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 重建任务ID
self.task_id = task_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.task_id is not None:
result['TaskId'] = self.task_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('TaskId') is not None:
self.task_id = m.get('TaskId')
return self
class LabelBuildResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: LabelBuildResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = LabelBuildResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class DropSceneRequest(TeaModel):
def __init__(
self,
id: str = None,
):
# 主场景id
self.id = id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.id is not None:
result['Id'] = self.id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Id') is not None:
self.id = m.get('Id')
return self
class DropSceneResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
return self
class DropSceneResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: DropSceneResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = DropSceneResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class OptimizeRightAngleRequest(TeaModel):
def __init__(
self,
sub_scene_id: str = None,
):
# 子场景ID
self.sub_scene_id = sub_scene_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.sub_scene_id is not None:
result['SubSceneId'] = self.sub_scene_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('SubSceneId') is not None:
self.sub_scene_id = m.get('SubSceneId')
return self
class OptimizeRightAngleResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
task_id: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 任务ID
self.task_id = task_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.task_id is not None:
result['TaskId'] = self.task_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('TaskId') is not None:
self.task_id = m.get('TaskId')
return self
class OptimizeRightAngleResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: OptimizeRightAngleResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = OptimizeRightAngleResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class AddRelativePositionRequest(TeaModel):
def __init__(
self,
scene_id: str = None,
relative_position: str = None,
):
# 场景ID
self.scene_id = scene_id
# 相对位置信息
self.relative_position = relative_position
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.scene_id is not None:
result['SceneId'] = self.scene_id
if self.relative_position is not None:
result['RelativePosition'] = self.relative_position
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('SceneId') is not None:
self.scene_id = m.get('SceneId')
if m.get('RelativePosition') is not None:
self.relative_position = m.get('RelativePosition')
return self
class AddRelativePositionResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
return self
class AddRelativePositionResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: AddRelativePositionResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = AddRelativePositionResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class DetailSceneRequest(TeaModel):
def __init__(
self,
id: str = None,
):
# 场景Id
self.id = id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.id is not None:
result['Id'] = self.id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Id') is not None:
self.id = m.get('Id')
return self
class DetailSceneResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
id: str = None,
name: str = None,
type: str = None,
sub_scene_num: int = None,
source_num: int = None,
published: bool = None,
gmt_create: int = None,
gmt_modified: int = None,
preview_token: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 主场景Id
self.id = id
# 场景名称
self.name = name
# 场景类型
self.type = type
# 子场景数
self.sub_scene_num = sub_scene_num
# 资源数
self.source_num = source_num
# 是否已发布 true:已发布:false:未发布
self.published = published
# 创建时间
self.gmt_create = gmt_create
# 最后修改时间
self.gmt_modified = gmt_modified
# 预览Token
self.preview_token = preview_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.id is not None:
result['Id'] = self.id
if self.name is not None:
result['Name'] = self.name
if self.type is not None:
result['Type'] = self.type
if self.sub_scene_num is not None:
result['SubSceneNum'] = self.sub_scene_num
if self.source_num is not None:
result['SourceNum'] = self.source_num
if self.published is not None:
result['Published'] = self.published
if self.gmt_create is not None:
result['GmtCreate'] = self.gmt_create
if self.gmt_modified is not None:
result['GmtModified'] = self.gmt_modified
if self.preview_token is not None:
result['PreviewToken'] = self.preview_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('Id') is not None:
self.id = m.get('Id')
if m.get('Name') is not None:
self.name = m.get('Name')
if m.get('Type') is not None:
self.type = m.get('Type')
if m.get('SubSceneNum') is not None:
self.sub_scene_num = m.get('SubSceneNum')
if m.get('SourceNum') is not None:
self.source_num = m.get('SourceNum')
if m.get('Published') is not None:
self.published = m.get('Published')
if m.get('GmtCreate') is not None:
self.gmt_create = m.get('GmtCreate')
if m.get('GmtModified') is not None:
self.gmt_modified = m.get('GmtModified')
if m.get('PreviewToken') is not None:
self.preview_token = m.get('PreviewToken')
return self
class DetailSceneResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: DetailSceneResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = DetailSceneResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class CreateSceneRequest(TeaModel):
def __init__(
self,
name: str = None,
project_id: str = None,
):
self.name = name
self.project_id = project_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.name is not None:
result['Name'] = self.name
if self.project_id is not None:
result['ProjectId'] = self.project_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Name') is not None:
self.name = m.get('Name')
if m.get('ProjectId') is not None:
self.project_id = m.get('ProjectId')
return self
class CreateSceneResponseBody(TeaModel):
def __init__(
self,
scene_id: int = None,
request_id: str = None,
preview_token: str = None,
err_message: str = None,
success: bool = None,
):
self.scene_id = scene_id
self.request_id = request_id
self.preview_token = preview_token
self.err_message = err_message
self.success = success
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.scene_id is not None:
result['SceneId'] = self.scene_id
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.preview_token is not None:
result['PreviewToken'] = self.preview_token
if self.err_message is not None:
result['ErrMessage'] = self.err_message
if self.success is not None:
result['Success'] = self.success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('SceneId') is not None:
self.scene_id = m.get('SceneId')
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('PreviewToken') is not None:
self.preview_token = m.get('PreviewToken')
if m.get('ErrMessage') is not None:
self.err_message = m.get('ErrMessage')
if m.get('Success') is not None:
self.success = m.get('Success')
return self
class CreateSceneResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: CreateSceneResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = CreateSceneResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class DeleteFileRequest(TeaModel):
def __init__(
self,
param_file: str = None,
sub_scene_uuid: str = None,
):
self.param_file = param_file
self.sub_scene_uuid = sub_scene_uuid
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.param_file is not None:
result['ParamFile'] = self.param_file
if self.sub_scene_uuid is not None:
result['SubSceneUuid'] = self.sub_scene_uuid
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('ParamFile') is not None:
self.param_file = m.get('ParamFile')
if m.get('SubSceneUuid') is not None:
self.sub_scene_uuid = m.get('SubSceneUuid')
return self
class DeleteFileResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
err_message: str = None,
success: bool = None,
):
self.request_id = request_id
self.err_message = err_message
self.success = success
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.err_message is not None:
result['ErrMessage'] = self.err_message
if self.success is not None:
result['Success'] = self.success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('ErrMessage') is not None:
self.err_message = m.get('ErrMessage')
if m.get('Success') is not None:
self.success = m.get('Success')
return self
class DeleteFileResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: DeleteFileResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = DeleteFileResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class CheckResourceRequest(TeaModel):
def __init__(
self,
country: str = None,
interrupt: bool = None,
invoker: str = None,
pk: str = None,
bid: str = None,
hid: int = None,
task_identifier: str = None,
task_extra_data: str = None,
gmt_wakeup: str = None,
success: bool = None,
message: str = None,
level: int = None,
url: str = None,
prompt: str = None,
):
self.country = country
self.interrupt = interrupt
self.invoker = invoker
self.pk = pk
self.bid = bid
self.hid = hid
self.task_identifier = task_identifier
self.task_extra_data = task_extra_data
self.gmt_wakeup = gmt_wakeup
self.success = success
self.message = message
self.level = level
self.url = url
self.prompt = prompt
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.country is not None:
result['Country'] = self.country
if self.interrupt is not None:
result['Interrupt'] = self.interrupt
if self.invoker is not None:
result['Invoker'] = self.invoker
if self.pk is not None:
result['Pk'] = self.pk
if self.bid is not None:
result['Bid'] = self.bid
if self.hid is not None:
result['Hid'] = self.hid
if self.task_identifier is not None:
result['TaskIdentifier'] = self.task_identifier
if self.task_extra_data is not None:
result['TaskExtraData'] = self.task_extra_data
if self.gmt_wakeup is not None:
result['GmtWakeup'] = self.gmt_wakeup
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.level is not None:
result['Level'] = self.level
if self.url is not None:
result['Url'] = self.url
if self.prompt is not None:
result['Prompt'] = self.prompt
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Country') is not None:
self.country = m.get('Country')
if m.get('Interrupt') is not None:
self.interrupt = m.get('Interrupt')
if m.get('Invoker') is not None:
self.invoker = m.get('Invoker')
if m.get('Pk') is not None:
self.pk = m.get('Pk')
if m.get('Bid') is not None:
self.bid = m.get('Bid')
if m.get('Hid') is not None:
self.hid = m.get('Hid')
if m.get('TaskIdentifier') is not None:
self.task_identifier = m.get('TaskIdentifier')
if m.get('TaskExtraData') is not None:
self.task_extra_data = m.get('TaskExtraData')
if m.get('GmtWakeup') is not None:
self.gmt_wakeup = m.get('GmtWakeup')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('Level') is not None:
self.level = m.get('Level')
if m.get('Url') is not None:
self.url = m.get('Url')
if m.get('Prompt') is not None:
self.prompt = m.get('Prompt')
return self
class CheckResourceResponseBody(TeaModel):
def __init__(
self,
gmt_wakeup: str = None,
hid: int = None,
message: str = None,
task_identifier: str = None,
request_id: str = None,
success: bool = None,
url: str = None,
interrupt: bool = None,
invoker: str = None,
task_extra_data: str = None,
country: str = None,
prompt: str = None,
level: int = None,
pk: str = None,
bid: str = None,
):
self.gmt_wakeup = gmt_wakeup
self.hid = hid
self.message = message
self.task_identifier = task_identifier
self.request_id = request_id
self.success = success
self.url = url
self.interrupt = interrupt
self.invoker = invoker
self.task_extra_data = task_extra_data
self.country = country
self.prompt = prompt
self.level = level
self.pk = pk
self.bid = bid
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.gmt_wakeup is not None:
result['GmtWakeup'] = self.gmt_wakeup
if self.hid is not None:
result['Hid'] = self.hid
if self.message is not None:
result['Message'] = self.message
if self.task_identifier is not None:
result['TaskIdentifier'] = self.task_identifier
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.success is not None:
result['Success'] = self.success
if self.url is not None:
result['Url'] = self.url
if self.interrupt is not None:
result['Interrupt'] = self.interrupt
if self.invoker is not None:
result['Invoker'] = self.invoker
if self.task_extra_data is not None:
result['TaskExtraData'] = self.task_extra_data
if self.country is not None:
result['Country'] = self.country
if self.prompt is not None:
result['Prompt'] = self.prompt
if self.level is not None:
result['Level'] = self.level
if self.pk is not None:
result['Pk'] = self.pk
if self.bid is not None:
result['Bid'] = self.bid
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('GmtWakeup') is not None:
self.gmt_wakeup = m.get('GmtWakeup')
if m.get('Hid') is not None:
self.hid = m.get('Hid')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('TaskIdentifier') is not None:
self.task_identifier = m.get('TaskIdentifier')
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Url') is not None:
self.url = m.get('Url')
if m.get('Interrupt') is not None:
self.interrupt = m.get('Interrupt')
if m.get('Invoker') is not None:
self.invoker = m.get('Invoker')
if m.get('TaskExtraData') is not None:
self.task_extra_data = m.get('TaskExtraData')
if m.get('Country') is not None:
self.country = m.get('Country')
if m.get('Prompt') is not None:
self.prompt = m.get('Prompt')
if m.get('Level') is not None:
self.level = m.get('Level')
if m.get('Pk') is not None:
self.pk = m.get('Pk')
if m.get('Bid') is not None:
self.bid = m.get('Bid')
return self
class CheckResourceResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: CheckResourceResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = CheckResourceResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class ListSceneRequest(TeaModel):
def __init__(
self,
name: str = None,
project_id: str = None,
page_num: int = None,
page_size: int = None,
):
# 主场景名称
self.name = name
# 所有项目Id
self.project_id = project_id
# 当前页
self.page_num = page_num
# 页长
self.page_size = page_size
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.name is not None:
result['Name'] = self.name
if self.project_id is not None:
result['ProjectId'] = self.project_id
if self.page_num is not None:
result['PageNum'] = self.page_num
if self.page_size is not None:
result['PageSize'] = self.page_size
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Name') is not None:
self.name = m.get('Name')
if m.get('ProjectId') is not None:
self.project_id = m.get('ProjectId')
if m.get('PageNum') is not None:
self.page_num = m.get('PageNum')
if m.get('PageSize') is not None:
self.page_size = m.get('PageSize')
return self
class ListSceneResponseBodyList(TeaModel):
def __init__(
self,
id: str = None,
name: str = None,
type: str = None,
sub_scene_num: int = None,
source_num: int = None,
published: bool = None,
gmt_create: int = None,
gmt_modified: int = None,
preview_token: str = None,
):
# 主场景Id
self.id = id
# 场景名称
self.name = name
# 场景类型 3D模型:MODEL_3D 全景图片:PIC 全景视频:VIDEO
self.type = type
# 子场景数
self.sub_scene_num = sub_scene_num
# 资源数
self.source_num = source_num
# 是否已发布 true:已发布:false:未发布
self.published = published
# 创建时间
self.gmt_create = gmt_create
# 最后修改时间
self.gmt_modified = gmt_modified
# 预览Token
self.preview_token = preview_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.id is not None:
result['Id'] = self.id
if self.name is not None:
result['Name'] = self.name
if self.type is not None:
result['Type'] = self.type
if self.sub_scene_num is not None:
result['SubSceneNum'] = self.sub_scene_num
if self.source_num is not None:
result['SourceNum'] = self.source_num
if self.published is not None:
result['Published'] = self.published
if self.gmt_create is not None:
result['GmtCreate'] = self.gmt_create
if self.gmt_modified is not None:
result['GmtModified'] = self.gmt_modified
if self.preview_token is not None:
result['PreviewToken'] = self.preview_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Id') is not None:
self.id = m.get('Id')
if m.get('Name') is not None:
self.name = m.get('Name')
if m.get('Type') is not None:
self.type = m.get('Type')
if m.get('SubSceneNum') is not None:
self.sub_scene_num = m.get('SubSceneNum')
if m.get('SourceNum') is not None:
self.source_num = m.get('SourceNum')
if m.get('Published') is not None:
self.published = m.get('Published')
if m.get('GmtCreate') is not None:
self.gmt_create = m.get('GmtCreate')
if m.get('GmtModified') is not None:
self.gmt_modified = m.get('GmtModified')
if m.get('PreviewToken') is not None:
self.preview_token = m.get('PreviewToken')
return self
class ListSceneResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
has_next: bool = None,
current_page: int = None,
total_page: int = None,
count: int = None,
list: List[ListSceneResponseBodyList] = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 是否有下一页
self.has_next = has_next
# 当前页
self.current_page = current_page
# 总页数
self.total_page = total_page
# 数据总数
self.count = count
# 主场景数据
self.list = list
def validate(self):
if self.list:
for k in self.list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.has_next is not None:
result['HasNext'] = self.has_next
if self.current_page is not None:
result['CurrentPage'] = self.current_page
if self.total_page is not None:
result['TotalPage'] = self.total_page
if self.count is not None:
result['Count'] = self.count
result['List'] = []
if self.list is not None:
for k in self.list:
result['List'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('HasNext') is not None:
self.has_next = m.get('HasNext')
if m.get('CurrentPage') is not None:
self.current_page = m.get('CurrentPage')
if m.get('TotalPage') is not None:
self.total_page = m.get('TotalPage')
if m.get('Count') is not None:
self.count = m.get('Count')
self.list = []
if m.get('List') is not None:
for k in m.get('List'):
temp_model = ListSceneResponseBodyList()
self.list.append(temp_model.from_map(k))
return self
class ListSceneResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: ListSceneResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = ListSceneResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class PublishHotspotRequest(TeaModel):
def __init__(
self,
param_tag: str = None,
sub_scene_uuid: str = None,
):
self.param_tag = param_tag
self.sub_scene_uuid = sub_scene_uuid
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.param_tag is not None:
result['ParamTag'] = self.param_tag
if self.sub_scene_uuid is not None:
result['SubSceneUuid'] = self.sub_scene_uuid
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('ParamTag') is not None:
self.param_tag = m.get('ParamTag')
if m.get('SubSceneUuid') is not None:
self.sub_scene_uuid = m.get('SubSceneUuid')
return self
class PublishHotspotResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
data: Dict[str, Any] = None,
err_message: str = None,
success: bool = None,
):
self.request_id = request_id
self.data = data
self.err_message = err_message
self.success = success
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.data is not None:
result['Data'] = self.data
if self.err_message is not None:
result['ErrMessage'] = self.err_message
if self.success is not None:
result['Success'] = self.success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Data') is not None:
self.data = m.get('Data')
if m.get('ErrMessage') is not None:
self.err_message = m.get('ErrMessage')
if m.get('Success') is not None:
self.success = m.get('Success')
return self
class PublishHotspotResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: PublishHotspotResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = PublishHotspotResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class UpdateSceneRequest(TeaModel):
def __init__(
self,
id: str = None,
name: str = None,
):
# 场景Id
self.id = id
# 场景名称
self.name = name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.id is not None:
result['Id'] = self.id
if self.name is not None:
result['Name'] = self.name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Id') is not None:
self.id = m.get('Id')
if m.get('Name') is not None:
self.name = m.get('Name')
return self
class UpdateSceneResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
return self
class UpdateSceneResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: UpdateSceneResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = UpdateSceneResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class UpdateLayoutDataRequest(TeaModel):
def __init__(
self,
sub_scene_id: str = None,
layout_data: str = None,
):
# 子场景ID
self.sub_scene_id = sub_scene_id
# 标注数据
self.layout_data = layout_data
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.sub_scene_id is not None:
result['SubSceneId'] = self.sub_scene_id
if self.layout_data is not None:
result['LayoutData'] = self.layout_data
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('SubSceneId') is not None:
self.sub_scene_id = m.get('SubSceneId')
if m.get('LayoutData') is not None:
self.layout_data = m.get('LayoutData')
return self
class UpdateLayoutDataResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
return self
class UpdateLayoutDataResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: UpdateLayoutDataResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = UpdateLayoutDataResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class SaveHotspotTagRequest(TeaModel):
def __init__(
self,
param_tag: str = None,
sub_scene_uuid: str = None,
):
self.param_tag = param_tag
self.sub_scene_uuid = sub_scene_uuid
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.param_tag is not None:
result['ParamTag'] = self.param_tag
if self.sub_scene_uuid is not None:
result['SubSceneUuid'] = self.sub_scene_uuid
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('ParamTag') is not None:
self.param_tag = m.get('ParamTag')
if m.get('SubSceneUuid') is not None:
self.sub_scene_uuid = m.get('SubSceneUuid')
return self
class SaveHotspotTagResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
err_message: str = None,
success: bool = None,
):
self.request_id = request_id
self.err_message = err_message
self.success = success
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.err_message is not None:
result['ErrMessage'] = self.err_message
if self.success is not None:
result['Success'] = self.success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('ErrMessage') is not None:
self.err_message = m.get('ErrMessage')
if m.get('Success') is not None:
self.success = m.get('Success')
return self
class SaveHotspotTagResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: SaveHotspotTagResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = SaveHotspotTagResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class CheckPermissionRequest(TeaModel):
def __init__(
self,
aliyun_id: str = None,
):
self.aliyun_id = aliyun_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.aliyun_id is not None:
result['AliyunId'] = self.aliyun_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('AliyunId') is not None:
self.aliyun_id = m.get('AliyunId')
return self
class CheckPermissionResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
err_message: str = None,
success: bool = None,
):
self.request_id = request_id
self.err_message = err_message
self.success = success
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.err_message is not None:
result['ErrMessage'] = self.err_message
if self.success is not None:
result['Success'] = self.success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('ErrMessage') is not None:
self.err_message = m.get('ErrMessage')
if m.get('Success') is not None:
self.success = m.get('Success')
return self
class CheckPermissionResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: CheckPermissionResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = CheckPermissionResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class DeleteProjectRequest(TeaModel):
def __init__(
self,
project_id: str = None,
):
self.project_id = project_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.project_id is not None:
result['ProjectId'] = self.project_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('ProjectId') is not None:
self.project_id = m.get('ProjectId')
return self
class DeleteProjectResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
err_message: str = None,
success: bool = None,
):
self.request_id = request_id
self.err_message = err_message
self.success = success
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.err_message is not None:
result['ErrMessage'] = self.err_message
if self.success is not None:
result['Success'] = self.success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('ErrMessage') is not None:
self.err_message = m.get('ErrMessage')
if m.get('Success') is not None:
self.success = m.get('Success')
return self
class DeleteProjectResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: DeleteProjectResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = DeleteProjectResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class RectVerticalRequest(TeaModel):
def __init__(
self,
sub_scene_id: str = None,
vertical_rect: str = None,
detect_door: bool = None,
count_detect_door: int = None,
):
# 子场景ID
self.sub_scene_id = sub_scene_id
# 矫正数据
self.vertical_rect = vertical_rect
# 是否开启门预测
self.detect_door = detect_door
# 需要预测的门的数量
self.count_detect_door = count_detect_door
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.sub_scene_id is not None:
result['SubSceneId'] = self.sub_scene_id
if self.vertical_rect is not None:
result['VerticalRect'] = self.vertical_rect
if self.detect_door is not None:
result['DetectDoor'] = self.detect_door
if self.count_detect_door is not None:
result['CountDetectDoor'] = self.count_detect_door
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('SubSceneId') is not None:
self.sub_scene_id = m.get('SubSceneId')
if m.get('VerticalRect') is not None:
self.vertical_rect = m.get('VerticalRect')
if m.get('DetectDoor') is not None:
self.detect_door = m.get('DetectDoor')
if m.get('CountDetectDoor') is not None:
self.count_detect_door = m.get('CountDetectDoor')
return self
class RectVerticalResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
return self
class RectVerticalResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: RectVerticalResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = RectVerticalResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class PredImageRequest(TeaModel):
def __init__(
self,
sub_scene_id: str = None,
detect_door: bool = None,
count_detect_door: int = None,
correct_vertical: bool = None,
):
# 子场景ID
self.sub_scene_id = sub_scene_id
# 是否门预测
self.detect_door = detect_door
# 门数量(DetectDoor为false时,可为0)
self.count_detect_door = count_detect_door
# 是否垂直矫正
self.correct_vertical = correct_vertical
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.sub_scene_id is not None:
result['SubSceneId'] = self.sub_scene_id
if self.detect_door is not None:
result['DetectDoor'] = self.detect_door
if self.count_detect_door is not None:
result['CountDetectDoor'] = self.count_detect_door
if self.correct_vertical is not None:
result['CorrectVertical'] = self.correct_vertical
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('SubSceneId') is not None:
self.sub_scene_id = m.get('SubSceneId')
if m.get('DetectDoor') is not None:
self.detect_door = m.get('DetectDoor')
if m.get('CountDetectDoor') is not None:
self.count_detect_door = m.get('CountDetectDoor')
if m.get('CorrectVertical') is not None:
self.correct_vertical = m.get('CorrectVertical')
return self
class PredImageResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
task_id: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 任务ID
self.task_id = task_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.task_id is not None:
result['TaskId'] = self.task_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('TaskId') is not None:
self.task_id = m.get('TaskId')
return self
class PredImageResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: PredImageResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = PredImageResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetOssPolicyRequest(TeaModel):
def __init__(
self,
sub_scene_id: str = None,
):
# 子场景ID
self.sub_scene_id = sub_scene_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.sub_scene_id is not None:
result['SubSceneId'] = self.sub_scene_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('SubSceneId') is not None:
self.sub_scene_id = m.get('SubSceneId')
return self
class GetOssPolicyResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
access_id: str = None,
policy: str = None,
signature: str = None,
dir: str = None,
host: str = None,
expire: str = None,
callback: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# accessId
self.access_id = access_id
# 授权
self.policy = policy
# 签名
self.signature = signature
# 授权路径
self.dir = dir
# 上传地址
self.host = host
# 授权失效时间(s)
self.expire = expire
# 上传回调
self.callback = callback
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.access_id is not None:
result['AccessId'] = self.access_id
if self.policy is not None:
result['Policy'] = self.policy
if self.signature is not None:
result['Signature'] = self.signature
if self.dir is not None:
result['Dir'] = self.dir
if self.host is not None:
result['Host'] = self.host
if self.expire is not None:
result['Expire'] = self.expire
if self.callback is not None:
result['Callback'] = self.callback
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('AccessId') is not None:
self.access_id = m.get('AccessId')
if m.get('Policy') is not None:
self.policy = m.get('Policy')
if m.get('Signature') is not None:
self.signature = m.get('Signature')
if m.get('Dir') is not None:
self.dir = m.get('Dir')
if m.get('Host') is not None:
self.host = m.get('Host')
if m.get('Expire') is not None:
self.expire = m.get('Expire')
if m.get('Callback') is not None:
self.callback = m.get('Callback')
return self
class GetOssPolicyResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetOssPolicyResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetOssPolicyResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetConnDataRequest(TeaModel):
def __init__(
self,
scene_id: str = None,
):
# 场景ID
self.scene_id = scene_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.scene_id is not None:
result['SceneId'] = self.scene_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('SceneId') is not None:
self.scene_id = m.get('SceneId')
return self
class GetConnDataResponseBodyList(TeaModel):
def __init__(
self,
id: str = None,
map_id: str = None,
type: str = None,
):
# ID
self.id = id
# 关联的ID
self.map_id = map_id
# outer:外关联 inner:内关联 stair:楼梯关联
self.type = type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.id is not None:
result['Id'] = self.id
if self.map_id is not None:
result['MapId'] = self.map_id
if self.type is not None:
result['Type'] = self.type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Id') is not None:
self.id = m.get('Id')
if m.get('MapId') is not None:
self.map_id = m.get('MapId')
if m.get('Type') is not None:
self.type = m.get('Type')
return self
class GetConnDataResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
version: str = None,
extend: str = None,
list: List[GetConnDataResponseBodyList] = None,
):
# Id of the request
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 版本
self.version = version
# 扩展信息
self.extend = extend
# 关联信息
self.list = list
def validate(self):
if self.list:
for k in self.list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.version is not None:
result['Version'] = self.version
if self.extend is not None:
result['Extend'] = self.extend
result['List'] = []
if self.list is not None:
for k in self.list:
result['List'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('Version') is not None:
self.version = m.get('Version')
if m.get('Extend') is not None:
self.extend = m.get('Extend')
self.list = []
if m.get('List') is not None:
for k in m.get('List'):
temp_model = GetConnDataResponseBodyList()
self.list.append(temp_model.from_map(k))
return self
class GetConnDataResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetConnDataResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetConnDataResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class TempPreviewStatusRequest(TeaModel):
def __init__(
self,
key: str = None,
):
# 任务ID
self.key = key
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.key is not None:
result['Key'] = self.key
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Key') is not None:
self.key = m.get('Key')
return self
class TempPreviewStatusResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
status: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# processing:处理中 success:成功 failed:失败
self.status = status
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.status is not None:
result['Status'] = self.status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('Status') is not None:
self.status = m.get('Status')
return self
class TempPreviewStatusResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: TempPreviewStatusResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = TempPreviewStatusResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class AddProjectRequest(TeaModel):
def __init__(
self,
business_id: int = None,
name: str = None,
):
# 业务id
self.business_id = business_id
# 项目名称
self.name = name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.business_id is not None:
result['BusinessId'] = self.business_id
if self.name is not None:
result['Name'] = self.name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('BusinessId') is not None:
self.business_id = m.get('BusinessId')
if m.get('Name') is not None:
self.name = m.get('Name')
return self
class AddProjectResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
id: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 项目ID
self.id = id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.id is not None:
result['Id'] = self.id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('Id') is not None:
self.id = m.get('Id')
return self
class AddProjectResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: AddProjectResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = AddProjectResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class ListMainScenesRequest(TeaModel):
def __init__(
self,
query_name: str = None,
):
self.query_name = query_name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.query_name is not None:
result['QueryName'] = self.query_name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('QueryName') is not None:
self.query_name = m.get('QueryName')
return self
class ListMainScenesResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
object_string: str = None,
data: str = None,
err_message: str = None,
success: bool = None,
):
self.request_id = request_id
self.object_string = object_string
self.data = data
self.err_message = err_message
self.success = success
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.object_string is not None:
result['ObjectString'] = self.object_string
if self.data is not None:
result['Data'] = self.data
if self.err_message is not None:
result['ErrMessage'] = self.err_message
if self.success is not None:
result['Success'] = self.success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('ObjectString') is not None:
self.object_string = m.get('ObjectString')
if m.get('Data') is not None:
self.data = m.get('Data')
if m.get('ErrMessage') is not None:
self.err_message = m.get('ErrMessage')
if m.get('Success') is not None:
self.success = m.get('Success')
return self
class ListMainScenesResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: ListMainScenesResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = ListMainScenesResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class DetailSubSceneRequest(TeaModel):
def __init__(
self,
id: str = None,
):
# 子场景ID
self.id = id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.id is not None:
result['Id'] = self.id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Id') is not None:
self.id = m.get('Id')
return self
class DetailSubSceneResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
id: str = None,
name: str = None,
resource_id: str = None,
url: str = None,
cover_url: str = None,
status: int = None,
gmt_create: int = None,
gmt_modified: int = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 子场景id
self.id = id
# 子场景名称
self.name = name
# 图片ID/视频ID
self.resource_id = resource_id
# 图片路径/视频路径
self.url = url
# 图片路径/视频封面路径
self.cover_url = cover_url
# 子场景状态
self.status = status
# 创建时间
self.gmt_create = gmt_create
# 最后修改时间
self.gmt_modified = gmt_modified
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.id is not None:
result['Id'] = self.id
if self.name is not None:
result['Name'] = self.name
if self.resource_id is not None:
result['ResourceId'] = self.resource_id
if self.url is not None:
result['Url'] = self.url
if self.cover_url is not None:
result['CoverUrl'] = self.cover_url
if self.status is not None:
result['Status'] = self.status
if self.gmt_create is not None:
result['GmtCreate'] = self.gmt_create
if self.gmt_modified is not None:
result['GmtModified'] = self.gmt_modified
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('Id') is not None:
self.id = m.get('Id')
if m.get('Name') is not None:
self.name = m.get('Name')
if m.get('ResourceId') is not None:
self.resource_id = m.get('ResourceId')
if m.get('Url') is not None:
self.url = m.get('Url')
if m.get('CoverUrl') is not None:
self.cover_url = m.get('CoverUrl')
if m.get('Status') is not None:
self.status = m.get('Status')
if m.get('GmtCreate') is not None:
self.gmt_create = m.get('GmtCreate')
if m.get('GmtModified') is not None:
self.gmt_modified = m.get('GmtModified')
return self
class DetailSubSceneResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: DetailSubSceneResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = DetailSubSceneResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class ListSubSceneRequest(TeaModel):
def __init__(
self,
scene_id: str = None,
page_num: int = None,
page_size: int = None,
):
# 场景ID
self.scene_id = scene_id
# 页码
self.page_num = page_num
# 页长
self.page_size = page_size
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.scene_id is not None:
result['SceneId'] = self.scene_id
if self.page_num is not None:
result['PageNum'] = self.page_num
if self.page_size is not None:
result['PageSize'] = self.page_size
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('SceneId') is not None:
self.scene_id = m.get('SceneId')
if m.get('PageNum') is not None:
self.page_num = m.get('PageNum')
if m.get('PageSize') is not None:
self.page_size = m.get('PageSize')
return self
class ListSubSceneResponseBodyList(TeaModel):
def __init__(
self,
id: str = None,
name: str = None,
resource_id: str = None,
url: str = None,
cover_url: str = None,
status: int = None,
gmt_create: int = None,
gmt_modified: int = None,
):
# 子场景ID
self.id = id
# 子场景名称
self.name = name
# 图片ID/视频ID
self.resource_id = resource_id
# 图片路径/视频路径
self.url = url
# 图片路径/视频封面路径
self.cover_url = cover_url
# 子场景状态 1.未重建, * 2.中间模型重建中, * 3.中间模型重建完成, * 4.待重建, * 5.服务商重建中, * 6.服务商重建完成, * 7.已发布 * 8.发布中
self.status = status
# 创建时间
self.gmt_create = gmt_create
# 最后修改时间
self.gmt_modified = gmt_modified
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.id is not None:
result['Id'] = self.id
if self.name is not None:
result['Name'] = self.name
if self.resource_id is not None:
result['ResourceId'] = self.resource_id
if self.url is not None:
result['Url'] = self.url
if self.cover_url is not None:
result['CoverUrl'] = self.cover_url
if self.status is not None:
result['Status'] = self.status
if self.gmt_create is not None:
result['GmtCreate'] = self.gmt_create
if self.gmt_modified is not None:
result['GmtModified'] = self.gmt_modified
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Id') is not None:
self.id = m.get('Id')
if m.get('Name') is not None:
self.name = m.get('Name')
if m.get('ResourceId') is not None:
self.resource_id = m.get('ResourceId')
if m.get('Url') is not None:
self.url = m.get('Url')
if m.get('CoverUrl') is not None:
self.cover_url = m.get('CoverUrl')
if m.get('Status') is not None:
self.status = m.get('Status')
if m.get('GmtCreate') is not None:
self.gmt_create = m.get('GmtCreate')
if m.get('GmtModified') is not None:
self.gmt_modified = m.get('GmtModified')
return self
class ListSubSceneResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
has_next: bool = None,
current_page: int = None,
total_page: int = None,
count: int = None,
list: List[ListSubSceneResponseBodyList] = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 是否有下一页
self.has_next = has_next
# 当前页
self.current_page = current_page
# 总页数
self.total_page = total_page
# 数据总条数
self.count = count
# 子场景列表集
self.list = list
def validate(self):
if self.list:
for k in self.list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.has_next is not None:
result['HasNext'] = self.has_next
if self.current_page is not None:
result['CurrentPage'] = self.current_page
if self.total_page is not None:
result['TotalPage'] = self.total_page
if self.count is not None:
result['Count'] = self.count
result['List'] = []
if self.list is not None:
for k in self.list:
result['List'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('HasNext') is not None:
self.has_next = m.get('HasNext')
if m.get('CurrentPage') is not None:
self.current_page = m.get('CurrentPage')
if m.get('TotalPage') is not None:
self.total_page = m.get('TotalPage')
if m.get('Count') is not None:
self.count = m.get('Count')
self.list = []
if m.get('List') is not None:
for k in m.get('List'):
temp_model = ListSubSceneResponseBodyList()
self.list.append(temp_model.from_map(k))
return self
class ListSubSceneResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: ListSubSceneResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = ListSubSceneResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class UpdateSubSceneRequest(TeaModel):
def __init__(
self,
id: str = None,
name: str = None,
):
# 子场景ID
self.id = id
# 子场景名称
self.name = name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.id is not None:
result['Id'] = self.id
if self.name is not None:
result['Name'] = self.name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Id') is not None:
self.id = m.get('Id')
if m.get('Name') is not None:
self.name = m.get('Name')
return self
class UpdateSubSceneResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
return self
class UpdateSubSceneResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: UpdateSubSceneResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = UpdateSubSceneResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class CreateProjectRequest(TeaModel):
def __init__(
self,
name: str = None,
business_id: str = None,
business_user_id_list: str = None,
gather_user_id_list: str = None,
builder_user_id_list: str = None,
):
self.name = name
self.business_id = business_id
self.business_user_id_list = business_user_id_list
self.gather_user_id_list = gather_user_id_list
self.builder_user_id_list = builder_user_id_list
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.name is not None:
result['Name'] = self.name
if self.business_id is not None:
result['BusinessId'] = self.business_id
if self.business_user_id_list is not None:
result['BusinessUserIdList'] = self.business_user_id_list
if self.gather_user_id_list is not None:
result['GatherUserIdList'] = self.gather_user_id_list
if self.builder_user_id_list is not None:
result['BuilderUserIdList'] = self.builder_user_id_list
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Name') is not None:
self.name = m.get('Name')
if m.get('BusinessId') is not None:
self.business_id = m.get('BusinessId')
if m.get('BusinessUserIdList') is not None:
self.business_user_id_list = m.get('BusinessUserIdList')
if m.get('GatherUserIdList') is not None:
self.gather_user_id_list = m.get('GatherUserIdList')
if m.get('BuilderUserIdList') is not None:
self.builder_user_id_list = m.get('BuilderUserIdList')
return self
class CreateProjectResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
id: int = None,
err_message: str = None,
success: bool = None,
name: str = None,
):
self.request_id = request_id
self.id = id
self.err_message = err_message
self.success = success
self.name = name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.id is not None:
result['Id'] = self.id
if self.err_message is not None:
result['ErrMessage'] = self.err_message
if self.success is not None:
result['Success'] = self.success
if self.name is not None:
result['Name'] = self.name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Id') is not None:
self.id = m.get('Id')
if m.get('ErrMessage') is not None:
self.err_message = m.get('ErrMessage')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Name') is not None:
self.name = m.get('Name')
return self
class CreateProjectResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: CreateProjectResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = CreateProjectResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class DropBucketRequest(TeaModel):
def __init__(
self,
id: str = None,
):
# bucket数据ID
self.id = id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.id is not None:
result['Id'] = self.id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Id') is not None:
self.id = m.get('Id')
return self
class DropBucketResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
status: bool = None,
):
# Id of the request
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 操作是否成功
self.status = status
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.status is not None:
result['Status'] = self.status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('Status') is not None:
self.status = m.get('Status')
return self
class DropBucketResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: DropBucketResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = DropBucketResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetUserBucketConfigResponseBodyConfigInput(TeaModel):
def __init__(
self,
id: str = None,
bucket_name: str = None,
location: str = None,
):
self.id = id
# bucket名称(3-63位字符)
self.bucket_name = bucket_name
# oss地域
self.location = location
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.id is not None:
result['Id'] = self.id
if self.bucket_name is not None:
result['BucketName'] = self.bucket_name
if self.location is not None:
result['Location'] = self.location
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Id') is not None:
self.id = m.get('Id')
if m.get('BucketName') is not None:
self.bucket_name = m.get('BucketName')
if m.get('Location') is not None:
self.location = m.get('Location')
return self
class GetUserBucketConfigResponseBodyConfigOutput(TeaModel):
def __init__(
self,
id: str = None,
bucket_name: str = None,
location: str = None,
):
self.id = id
# bucket名称(3-63位字符)
self.bucket_name = bucket_name
# oss地域
self.location = location
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.id is not None:
result['Id'] = self.id
if self.bucket_name is not None:
result['BucketName'] = self.bucket_name
if self.location is not None:
result['Location'] = self.location
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Id') is not None:
self.id = m.get('Id')
if m.get('BucketName') is not None:
self.bucket_name = m.get('BucketName')
if m.get('Location') is not None:
self.location = m.get('Location')
return self
class GetUserBucketConfigResponseBodyConfig(TeaModel):
def __init__(
self,
input: GetUserBucketConfigResponseBodyConfigInput = None,
output: GetUserBucketConfigResponseBodyConfigOutput = None,
):
self.input = input
self.output = output
def validate(self):
if self.input:
self.input.validate()
if self.output:
self.output.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.input is not None:
result['Input'] = self.input.to_map()
if self.output is not None:
result['Output'] = self.output.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Input') is not None:
temp_model = GetUserBucketConfigResponseBodyConfigInput()
self.input = temp_model.from_map(m['Input'])
if m.get('Output') is not None:
temp_model = GetUserBucketConfigResponseBodyConfigOutput()
self.output = temp_model.from_map(m['Output'])
return self
class GetUserBucketConfigResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
config: GetUserBucketConfigResponseBodyConfig = None,
):
# Id of the request
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
self.config = config
def validate(self):
if self.config:
self.config.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.config is not None:
result['Config'] = self.config.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('Config') is not None:
temp_model = GetUserBucketConfigResponseBodyConfig()
self.config = temp_model.from_map(m['Config'])
return self
class GetUserBucketConfigResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetUserBucketConfigResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetUserBucketConfigResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class AddBucketRequest(TeaModel):
def __init__(
self,
bucket_name: str = None,
type: int = None,
):
# bucket名称
self.bucket_name = bucket_name
# 输入或输出类型(1,2)
self.type = type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.bucket_name is not None:
result['BucketName'] = self.bucket_name
if self.type is not None:
result['Type'] = self.type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('BucketName') is not None:
self.bucket_name = m.get('BucketName')
if m.get('Type') is not None:
self.type = m.get('Type')
return self
class AddBucketResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
status: bool = None,
):
# Id of the request
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 是否成功
self.status = status
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.status is not None:
result['Status'] = self.status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('Status') is not None:
self.status = m.get('Status')
return self
class AddBucketResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: AddBucketResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = AddBucketResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class SaveHotspotConfigRequest(TeaModel):
def __init__(
self,
param_tag: str = None,
preview_token: str = None,
):
self.param_tag = param_tag
self.preview_token = preview_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.param_tag is not None:
result['ParamTag'] = self.param_tag
if self.preview_token is not None:
result['PreviewToken'] = self.preview_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('ParamTag') is not None:
self.param_tag = m.get('ParamTag')
if m.get('PreviewToken') is not None:
self.preview_token = m.get('PreviewToken')
return self
class SaveHotspotConfigResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
err_message: str = None,
success: bool = None,
):
self.request_id = request_id
self.err_message = err_message
self.success = success
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.err_message is not None:
result['ErrMessage'] = self.err_message
if self.success is not None:
result['Success'] = self.success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('ErrMessage') is not None:
self.err_message = m.get('ErrMessage')
if m.get('Success') is not None:
self.success = m.get('Success')
return self
class SaveHotspotConfigResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: SaveHotspotConfigResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = SaveHotspotConfigResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetWindowConfigRequest(TeaModel):
def __init__(
self,
preview_token: str = None,
):
self.preview_token = preview_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.preview_token is not None:
result['PreviewToken'] = self.preview_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('PreviewToken') is not None:
self.preview_token = m.get('PreviewToken')
return self
class GetWindowConfigResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
object_string: str = None,
data: Dict[str, Any] = None,
err_message: str = None,
success: bool = None,
):
self.request_id = request_id
self.object_string = object_string
self.data = data
self.err_message = err_message
self.success = success
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.object_string is not None:
result['ObjectString'] = self.object_string
if self.data is not None:
result['Data'] = self.data
if self.err_message is not None:
result['ErrMessage'] = self.err_message
if self.success is not None:
result['Success'] = self.success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('ObjectString') is not None:
self.object_string = m.get('ObjectString')
if m.get('Data') is not None:
self.data = m.get('Data')
if m.get('ErrMessage') is not None:
self.err_message = m.get('ErrMessage')
if m.get('Success') is not None:
self.success = m.get('Success')
return self
class GetWindowConfigResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetWindowConfigResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetWindowConfigResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class StatisQueryByTypeRequest(TeaModel):
def __init__(
self,
start_time: int = None,
end_time: int = None,
):
# 开始时间
self.start_time = start_time
# 结束时间
self.end_time = end_time
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.start_time is not None:
result['StartTime'] = self.start_time
if self.end_time is not None:
result['EndTime'] = self.end_time
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('StartTime') is not None:
self.start_time = m.get('StartTime')
if m.get('EndTime') is not None:
self.end_time = m.get('EndTime')
return self
class StatisQueryByTypeResponseBodyList(TeaModel):
def __init__(
self,
type: int = None,
count: int = None,
):
# 0:默认类型(增加type字段时默认值) 1:3D模型 2:全景图片3:全景视频
self.type = type
# 当前日期主场景数量
self.count = count
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.type is not None:
result['Type'] = self.type
if self.count is not None:
result['Count'] = self.count
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Type') is not None:
self.type = m.get('Type')
if m.get('Count') is not None:
self.count = m.get('Count')
return self
class StatisQueryByTypeResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
list: List[StatisQueryByTypeResponseBodyList] = None,
):
# Id of the request
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 返回集合
self.list = list
def validate(self):
if self.list:
for k in self.list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
result['List'] = []
if self.list is not None:
for k in self.list:
result['List'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
self.list = []
if m.get('List') is not None:
for k in m.get('List'):
temp_model = StatisQueryByTypeResponseBodyList()
self.list.append(temp_model.from_map(k))
return self
class StatisQueryByTypeResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: StatisQueryByTypeResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = StatisQueryByTypeResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class StatisExportSceneInfoRequest(TeaModel):
def __init__(
self,
start_time: int = None,
end_time: int = None,
):
# 开始时间
self.start_time = start_time
# 结束时间
self.end_time = end_time
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.start_time is not None:
result['StartTime'] = self.start_time
if self.end_time is not None:
result['EndTime'] = self.end_time
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('StartTime') is not None:
self.start_time = m.get('StartTime')
if m.get('EndTime') is not None:
self.end_time = m.get('EndTime')
return self
class StatisExportSceneInfoResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
url: str = None,
):
# Id of the request
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# excel下载地址
self.url = url
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.url is not None:
result['Url'] = self.url
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('Url') is not None:
self.url = m.get('Url')
return self
class StatisExportSceneInfoResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: StatisExportSceneInfoResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = StatisExportSceneInfoResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetHotspotConfigRequest(TeaModel):
def __init__(
self,
preview_token: str = None,
):
self.preview_token = preview_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.preview_token is not None:
result['PreviewToken'] = self.preview_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('PreviewToken') is not None:
self.preview_token = m.get('PreviewToken')
return self
class GetHotspotConfigResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
object_string: str = None,
data: str = None,
err_message: str = None,
success: bool = None,
):
self.request_id = request_id
self.object_string = object_string
self.data = data
self.err_message = err_message
self.success = success
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.object_string is not None:
result['ObjectString'] = self.object_string
if self.data is not None:
result['Data'] = self.data
if self.err_message is not None:
result['ErrMessage'] = self.err_message
if self.success is not None:
result['Success'] = self.success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('ObjectString') is not None:
self.object_string = m.get('ObjectString')
if m.get('Data') is not None:
self.data = m.get('Data')
if m.get('ErrMessage') is not None:
self.err_message = m.get('ErrMessage')
if m.get('Success') is not None:
self.success = m.get('Success')
return self
class GetHotspotConfigResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetHotspotConfigResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetHotspotConfigResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetSceneBuildTaskStatusRequest(TeaModel):
def __init__(
self,
scene_id: str = None,
):
# 场景ID
self.scene_id = scene_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.scene_id is not None:
result['SceneId'] = self.scene_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('SceneId') is not None:
self.scene_id = m.get('SceneId')
return self
class GetSceneBuildTaskStatusResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
id: str = None,
scene_id: str = None,
status: str = None,
type: str = None,
error_code: str = None,
error_msg: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 任务ID
self.id = id
# 场景ID
self.scene_id = scene_id
# 未开始 init 处理中 失败 failure processing 完成 succeed 取消 canceled
self.status = status
# 墙线预测: wall_line 切图: cut_image 重建: build 直角优化:right_angle_optimization 其他:other
self.type = type
# 任务失败错误码
self.error_code = error_code
# 任务失败错误消息
self.error_msg = error_msg
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.id is not None:
result['Id'] = self.id
if self.scene_id is not None:
result['SceneId'] = self.scene_id
if self.status is not None:
result['Status'] = self.status
if self.type is not None:
result['Type'] = self.type
if self.error_code is not None:
result['ErrorCode'] = self.error_code
if self.error_msg is not None:
result['ErrorMsg'] = self.error_msg
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('Id') is not None:
self.id = m.get('Id')
if m.get('SceneId') is not None:
self.scene_id = m.get('SceneId')
if m.get('Status') is not None:
self.status = m.get('Status')
if m.get('Type') is not None:
self.type = m.get('Type')
if m.get('ErrorCode') is not None:
self.error_code = m.get('ErrorCode')
if m.get('ErrorMsg') is not None:
self.error_msg = m.get('ErrorMsg')
return self
class GetSceneBuildTaskStatusResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetSceneBuildTaskStatusResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetSceneBuildTaskStatusResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class TempPreviewRequest(TeaModel):
def __init__(
self,
id: str = None,
):
# 场景ID
self.id = id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.id is not None:
result['Id'] = self.id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Id') is not None:
self.id = m.get('Id')
return self
class TempPreviewResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
preview_url: str = None,
key: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 预览链接
self.preview_url = preview_url
# 任务ID
self.key = key
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.preview_url is not None:
result['PreviewUrl'] = self.preview_url
if self.key is not None:
result['Key'] = self.key
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('PreviewUrl') is not None:
self.preview_url = m.get('PreviewUrl')
if m.get('Key') is not None:
self.key = m.get('Key')
return self
class TempPreviewResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: TempPreviewResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = TempPreviewResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class PublishSceneRequest(TeaModel):
def __init__(
self,
scene_id: str = None,
):
# 场景ID
self.scene_id = scene_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.scene_id is not None:
result['SceneId'] = self.scene_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('SceneId') is not None:
self.scene_id = m.get('SceneId')
return self
class PublishSceneResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
preview_url: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 预览链接
self.preview_url = preview_url
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.preview_url is not None:
result['PreviewUrl'] = self.preview_url
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('PreviewUrl') is not None:
self.preview_url = m.get('PreviewUrl')
return self
class PublishSceneResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: PublishSceneResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = PublishSceneResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class DetailProjectRequest(TeaModel):
def __init__(
self,
id: str = None,
):
# 项目Id
self.id = id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.id is not None:
result['Id'] = self.id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Id') is not None:
self.id = m.get('Id')
return self
class DetailProjectResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
id: str = None,
name: str = None,
business_id: int = None,
business_name: str = None,
gmt_create: int = None,
gmt_modified: int = None,
token: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 项目ID
self.id = id
# 项目名称
self.name = name
# 业务ID
self.business_id = business_id
# 业务名称
self.business_name = business_name
# 创建时间
self.gmt_create = gmt_create
# 最后修改时间
self.gmt_modified = gmt_modified
# Token
self.token = token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.id is not None:
result['Id'] = self.id
if self.name is not None:
result['Name'] = self.name
if self.business_id is not None:
result['BusinessId'] = self.business_id
if self.business_name is not None:
result['BusinessName'] = self.business_name
if self.gmt_create is not None:
result['GmtCreate'] = self.gmt_create
if self.gmt_modified is not None:
result['GmtModified'] = self.gmt_modified
if self.token is not None:
result['Token'] = self.token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('Id') is not None:
self.id = m.get('Id')
if m.get('Name') is not None:
self.name = m.get('Name')
if m.get('BusinessId') is not None:
self.business_id = m.get('BusinessId')
if m.get('BusinessName') is not None:
self.business_name = m.get('BusinessName')
if m.get('GmtCreate') is not None:
self.gmt_create = m.get('GmtCreate')
if m.get('GmtModified') is not None:
self.gmt_modified = m.get('GmtModified')
if m.get('Token') is not None:
self.token = m.get('Token')
return self
class DetailProjectResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: DetailProjectResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = DetailProjectResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class ListScenesRequest(TeaModel):
def __init__(
self,
project_id: str = None,
is_publish_query: bool = None,
):
self.project_id = project_id
self.is_publish_query = is_publish_query
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.project_id is not None:
result['ProjectId'] = self.project_id
if self.is_publish_query is not None:
result['IsPublishQuery'] = self.is_publish_query
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('ProjectId') is not None:
self.project_id = m.get('ProjectId')
if m.get('IsPublishQuery') is not None:
self.is_publish_query = m.get('IsPublishQuery')
return self
class ListScenesResponseBodyData(TeaModel):
def __init__(
self,
scene_id: str = None,
):
self.scene_id = scene_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.scene_id is not None:
result['SceneId'] = self.scene_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('SceneId') is not None:
self.scene_id = m.get('SceneId')
return self
class ListScenesResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
data: List[ListScenesResponseBodyData] = None,
err_message: str = None,
success: bool = None,
):
self.request_id = request_id
self.data = data
self.err_message = err_message
self.success = success
def validate(self):
if self.data:
for k in self.data:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
result['Data'] = []
if self.data is not None:
for k in self.data:
result['Data'].append(k.to_map() if k else None)
if self.err_message is not None:
result['ErrMessage'] = self.err_message
if self.success is not None:
result['Success'] = self.success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
self.data = []
if m.get('Data') is not None:
for k in m.get('Data'):
temp_model = ListScenesResponseBodyData()
self.data.append(temp_model.from_map(k))
if m.get('ErrMessage') is not None:
self.err_message = m.get('ErrMessage')
if m.get('Success') is not None:
self.success = m.get('Success')
return self
class ListScenesResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: ListScenesResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = ListScenesResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class DropSubSceneRequest(TeaModel):
def __init__(
self,
id: str = None,
):
# 子场景ID
self.id = id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.id is not None:
result['Id'] = self.id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Id') is not None:
self.id = m.get('Id')
return self
class DropSubSceneResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
return self
class DropSubSceneResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: DropSubSceneResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = DropSubSceneResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class IsEnableOssRequest(TeaModel):
def __init__(
self,
is_enable: bool = None,
):
# 是否启用
self.is_enable = is_enable
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.is_enable is not None:
result['IsEnable'] = self.is_enable
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('IsEnable') is not None:
self.is_enable = m.get('IsEnable')
return self
class IsEnableOssResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
status: bool = None,
):
# Id of the request
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 操作是否成功
self.status = status
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.status is not None:
result['Status'] = self.status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('Status') is not None:
self.status = m.get('Status')
return self
class IsEnableOssResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: IsEnableOssResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = IsEnableOssResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetHotspotTagRequest(TeaModel):
def __init__(
self,
preview_token: str = None,
sub_scene_uuid: str = None,
type: str = None,
):
self.preview_token = preview_token
self.sub_scene_uuid = sub_scene_uuid
self.type = type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.preview_token is not None:
result['PreviewToken'] = self.preview_token
if self.sub_scene_uuid is not None:
result['SubSceneUuid'] = self.sub_scene_uuid
if self.type is not None:
result['Type'] = self.type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('PreviewToken') is not None:
self.preview_token = m.get('PreviewToken')
if m.get('SubSceneUuid') is not None:
self.sub_scene_uuid = m.get('SubSceneUuid')
if m.get('Type') is not None:
self.type = m.get('Type')
return self
class GetHotspotTagResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
object_string: str = None,
data: str = None,
err_message: str = None,
success: bool = None,
):
self.request_id = request_id
self.object_string = object_string
self.data = data
self.err_message = err_message
self.success = success
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.object_string is not None:
result['ObjectString'] = self.object_string
if self.data is not None:
result['Data'] = self.data
if self.err_message is not None:
result['ErrMessage'] = self.err_message
if self.success is not None:
result['Success'] = self.success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('ObjectString') is not None:
self.object_string = m.get('ObjectString')
if m.get('Data') is not None:
self.data = m.get('Data')
if m.get('ErrMessage') is not None:
self.err_message = m.get('ErrMessage')
if m.get('Success') is not None:
self.success = m.get('Success')
return self
class GetHotspotTagResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetHotspotTagResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetHotspotTagResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class DropProjectRequest(TeaModel):
def __init__(
self,
project_id: str = None,
):
# 项目ID
self.project_id = project_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.project_id is not None:
result['ProjectId'] = self.project_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('ProjectId') is not None:
self.project_id = m.get('ProjectId')
return self
class DropProjectResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
):
# 请求ID与入参中requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
return self
class DropProjectResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: DropProjectResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = DropProjectResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetUserOssStatusResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
status: bool = None,
):
# Id of the request
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 是否启用oss
self.status = status
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.status is not None:
result['Status'] = self.status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('Status') is not None:
self.status = m.get('Status')
return self
class GetUserOssStatusResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetUserOssStatusResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetUserOssStatusResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class ListProjectRequest(TeaModel):
def __init__(
self,
page_num: int = None,
page_size: int = None,
name: str = None,
):
# 页码
self.page_num = page_num
# 页长
self.page_size = page_size
# 项目名称(使用name%搜索)
self.name = name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.page_num is not None:
result['PageNum'] = self.page_num
if self.page_size is not None:
result['PageSize'] = self.page_size
if self.name is not None:
result['Name'] = self.name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('PageNum') is not None:
self.page_num = m.get('PageNum')
if m.get('PageSize') is not None:
self.page_size = m.get('PageSize')
if m.get('Name') is not None:
self.name = m.get('Name')
return self
class ListProjectResponseBodyList(TeaModel):
def __init__(
self,
id: str = None,
name: str = None,
business_id: int = None,
business_name: str = None,
create_time: int = None,
modified_time: int = None,
token: str = None,
):
# 项目ID
self.id = id
# 项目名称
self.name = name
# 业务ID
self.business_id = business_id
# 业务名称
self.business_name = business_name
# 创建时间
self.create_time = create_time
# 最后修改时间
self.modified_time = modified_time
# Token
self.token = token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.id is not None:
result['Id'] = self.id
if self.name is not None:
result['Name'] = self.name
if self.business_id is not None:
result['BusinessId'] = self.business_id
if self.business_name is not None:
result['BusinessName'] = self.business_name
if self.create_time is not None:
result['CreateTime'] = self.create_time
if self.modified_time is not None:
result['ModifiedTime'] = self.modified_time
if self.token is not None:
result['Token'] = self.token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Id') is not None:
self.id = m.get('Id')
if m.get('Name') is not None:
self.name = m.get('Name')
if m.get('BusinessId') is not None:
self.business_id = m.get('BusinessId')
if m.get('BusinessName') is not None:
self.business_name = m.get('BusinessName')
if m.get('CreateTime') is not None:
self.create_time = m.get('CreateTime')
if m.get('ModifiedTime') is not None:
self.modified_time = m.get('ModifiedTime')
if m.get('Token') is not None:
self.token = m.get('Token')
return self
class ListProjectResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
has_next: bool = None,
current_page: int = None,
total_page: int = None,
count: int = None,
list: List[ListProjectResponseBodyList] = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 是否有下一页
self.has_next = has_next
# 当前页
self.current_page = current_page
# 总页数
self.total_page = total_page
# count
self.count = count
# 项目数据
self.list = list
def validate(self):
if self.list:
for k in self.list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.has_next is not None:
result['HasNext'] = self.has_next
if self.current_page is not None:
result['CurrentPage'] = self.current_page
if self.total_page is not None:
result['TotalPage'] = self.total_page
if self.count is not None:
result['Count'] = self.count
result['List'] = []
if self.list is not None:
for k in self.list:
result['List'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('HasNext') is not None:
self.has_next = m.get('HasNext')
if m.get('CurrentPage') is not None:
self.current_page = m.get('CurrentPage')
if m.get('TotalPage') is not None:
self.total_page = m.get('TotalPage')
if m.get('Count') is not None:
self.count = m.get('Count')
self.list = []
if m.get('List') is not None:
for k in m.get('List'):
temp_model = ListProjectResponseBodyList()
self.list.append(temp_model.from_map(k))
return self
class ListProjectResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: ListProjectResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = ListProjectResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetOriginLayoutDataRequest(TeaModel):
def __init__(
self,
sub_scene_id: str = None,
):
# 子场景ID
self.sub_scene_id = sub_scene_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.sub_scene_id is not None:
result['SubSceneId'] = self.sub_scene_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('SubSceneId') is not None:
self.sub_scene_id = m.get('SubSceneId')
return self
class GetOriginLayoutDataResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
data: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 标注数据
self.data = data
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.data is not None:
result['Data'] = self.data
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('Data') is not None:
self.data = m.get('Data')
return self
class GetOriginLayoutDataResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetOriginLayoutDataResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetOriginLayoutDataResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class StatisListSceneInfoRequest(TeaModel):
def __init__(
self,
start_time: int = None,
end_time: int = None,
page_num: int = None,
page_size: int = None,
):
# 开始时间
self.start_time = start_time
# 结束时间
self.end_time = end_time
# 页码
self.page_num = page_num
# 一页显示数量
self.page_size = page_size
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.start_time is not None:
result['StartTime'] = self.start_time
if self.end_time is not None:
result['EndTime'] = self.end_time
if self.page_num is not None:
result['PageNum'] = self.page_num
if self.page_size is not None:
result['PageSize'] = self.page_size
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('StartTime') is not None:
self.start_time = m.get('StartTime')
if m.get('EndTime') is not None:
self.end_time = m.get('EndTime')
if m.get('PageNum') is not None:
self.page_num = m.get('PageNum')
if m.get('PageSize') is not None:
self.page_size = m.get('PageSize')
return self
class StatisListSceneInfoResponseBodyList(TeaModel):
def __init__(
self,
scene_type: int = None,
publish_time: str = None,
scene_name: str = None,
res_count: int = None,
project_name: str = None,
measure_count: int = None,
publish_status: int = None,
create_time: str = None,
scene_id: int = None,
project_id: int = None,
):
# 0:默认类型(增加type字段时默认值) 1:3D模型 2:全景图片3:全景视频
self.scene_type = scene_type
# 发布时间到秒
self.publish_time = publish_time
# 主场景名称
self.scene_name = scene_name
# 资源数量
self.res_count = res_count
# 项目名称
self.project_name = project_name
# 计费量
self.measure_count = measure_count
# statustinyin是否已发布 0:未发布 1:已发布
self.publish_status = publish_status
# 主场景创建时间
self.create_time = create_time
# 主场景id
self.scene_id = scene_id
# 项目Id
self.project_id = project_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.scene_type is not None:
result['SceneType'] = self.scene_type
if self.publish_time is not None:
result['PublishTime'] = self.publish_time
if self.scene_name is not None:
result['SceneName'] = self.scene_name
if self.res_count is not None:
result['ResCount'] = self.res_count
if self.project_name is not None:
result['ProjectName'] = self.project_name
if self.measure_count is not None:
result['MeasureCount'] = self.measure_count
if self.publish_status is not None:
result['PublishStatus'] = self.publish_status
if self.create_time is not None:
result['CreateTime'] = self.create_time
if self.scene_id is not None:
result['SceneId'] = self.scene_id
if self.project_id is not None:
result['ProjectId'] = self.project_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('SceneType') is not None:
self.scene_type = m.get('SceneType')
if m.get('PublishTime') is not None:
self.publish_time = m.get('PublishTime')
if m.get('SceneName') is not None:
self.scene_name = m.get('SceneName')
if m.get('ResCount') is not None:
self.res_count = m.get('ResCount')
if m.get('ProjectName') is not None:
self.project_name = m.get('ProjectName')
if m.get('MeasureCount') is not None:
self.measure_count = m.get('MeasureCount')
if m.get('PublishStatus') is not None:
self.publish_status = m.get('PublishStatus')
if m.get('CreateTime') is not None:
self.create_time = m.get('CreateTime')
if m.get('SceneId') is not None:
self.scene_id = m.get('SceneId')
if m.get('ProjectId') is not None:
self.project_id = m.get('ProjectId')
return self
class StatisListSceneInfoResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
list: List[StatisListSceneInfoResponseBodyList] = None,
page_num: int = None,
page_size: int = None,
total_count: int = None,
total_pages: int = None,
):
# Id of the request
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 返回集合
self.list = list
# 页码
self.page_num = page_num
# 一页显示数量
self.page_size = page_size
# 总数
self.total_count = total_count
# 总页数
self.total_pages = total_pages
def validate(self):
if self.list:
for k in self.list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
result['List'] = []
if self.list is not None:
for k in self.list:
result['List'].append(k.to_map() if k else None)
if self.page_num is not None:
result['PageNum'] = self.page_num
if self.page_size is not None:
result['PageSize'] = self.page_size
if self.total_count is not None:
result['TotalCount'] = self.total_count
if self.total_pages is not None:
result['TotalPages'] = self.total_pages
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
self.list = []
if m.get('List') is not None:
for k in m.get('List'):
temp_model = StatisListSceneInfoResponseBodyList()
self.list.append(temp_model.from_map(k))
if m.get('PageNum') is not None:
self.page_num = m.get('PageNum')
if m.get('PageSize') is not None:
self.page_size = m.get('PageSize')
if m.get('TotalCount') is not None:
self.total_count = m.get('TotalCount')
if m.get('TotalPages') is not None:
self.total_pages = m.get('TotalPages')
return self
class StatisListSceneInfoResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: StatisListSceneInfoResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = StatisListSceneInfoResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class ScenePublishRequest(TeaModel):
def __init__(
self,
scene_id: str = None,
):
# 场景ID
self.scene_id = scene_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.scene_id is not None:
result['SceneId'] = self.scene_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('SceneId') is not None:
self.scene_id = m.get('SceneId')
return self
class ScenePublishResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
preview_url: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 预览链接
self.preview_url = preview_url
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.preview_url is not None:
result['PreviewUrl'] = self.preview_url
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('PreviewUrl') is not None:
self.preview_url = m.get('PreviewUrl')
return self
class ScenePublishResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: ScenePublishResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = ScenePublishResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class SaveFileRequest(TeaModel):
def __init__(
self,
param_file: str = None,
sub_scene_uuid: str = None,
):
self.param_file = param_file
self.sub_scene_uuid = sub_scene_uuid
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.param_file is not None:
result['ParamFile'] = self.param_file
if self.sub_scene_uuid is not None:
result['SubSceneUuid'] = self.sub_scene_uuid
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('ParamFile') is not None:
self.param_file = m.get('ParamFile')
if m.get('SubSceneUuid') is not None:
self.sub_scene_uuid = m.get('SubSceneUuid')
return self
class SaveFileResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
object_string: str = None,
data: str = None,
err_message: str = None,
success: bool = None,
):
self.request_id = request_id
self.object_string = object_string
self.data = data
self.err_message = err_message
self.success = success
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.object_string is not None:
result['ObjectString'] = self.object_string
if self.data is not None:
result['Data'] = self.data
if self.err_message is not None:
result['ErrMessage'] = self.err_message
if self.success is not None:
result['Success'] = self.success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('ObjectString') is not None:
self.object_string = m.get('ObjectString')
if m.get('Data') is not None:
self.data = m.get('Data')
if m.get('ErrMessage') is not None:
self.err_message = m.get('ErrMessage')
if m.get('Success') is not None:
self.success = m.get('Success')
return self
class SaveFileResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: SaveFileResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = SaveFileResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetRectifyImageRequest(TeaModel):
def __init__(
self,
sub_scene_id: str = None,
):
# 子场景ID
self.sub_scene_id = sub_scene_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.sub_scene_id is not None:
result['SubSceneId'] = self.sub_scene_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('SubSceneId') is not None:
self.sub_scene_id = m.get('SubSceneId')
return self
class GetRectifyImageResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
url: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 图片地址
self.url = url
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.url is not None:
result['Url'] = self.url
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('Url') is not None:
self.url = m.get('Url')
return self
class GetRectifyImageResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetRectifyImageResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetRectifyImageResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class UpdateProjectRequest(TeaModel):
def __init__(
self,
id: str = None,
name: str = None,
business_id: str = None,
):
# 项目id
self.id = id
# 项目名称
self.name = name
# 业务Id
self.business_id = business_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.id is not None:
result['Id'] = self.id
if self.name is not None:
result['Name'] = self.name
if self.business_id is not None:
result['BusinessId'] = self.business_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Id') is not None:
self.id = m.get('Id')
if m.get('Name') is not None:
self.name = m.get('Name')
if m.get('BusinessId') is not None:
self.business_id = m.get('BusinessId')
return self
class UpdateProjectResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
return self
class UpdateProjectResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: UpdateProjectResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = UpdateProjectResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class UpdateBucketRequest(TeaModel):
def __init__(
self,
id: str = None,
new_bucket_name: str = None,
):
self.id = id
# bucket名称
self.new_bucket_name = new_bucket_name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.id is not None:
result['Id'] = self.id
if self.new_bucket_name is not None:
result['NewBucketName'] = self.new_bucket_name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Id') is not None:
self.id = m.get('Id')
if m.get('NewBucketName') is not None:
self.new_bucket_name = m.get('NewBucketName')
return self
class UpdateBucketResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
status: bool = None,
):
# Id of the request
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 是否更新成功
self.status = status
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.status is not None:
result['Status'] = self.status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('Status') is not None:
self.status = m.get('Status')
return self
class UpdateBucketResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: UpdateBucketResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = UpdateBucketResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class StatisQueryByDayRequest(TeaModel):
def __init__(
self,
start_time: int = None,
end_time: int = None,
):
# 开始时间
self.start_time = start_time
# 结束时间
self.end_time = end_time
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.start_time is not None:
result['StartTime'] = self.start_time
if self.end_time is not None:
result['EndTime'] = self.end_time
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('StartTime') is not None:
self.start_time = m.get('StartTime')
if m.get('EndTime') is not None:
self.end_time = m.get('EndTime')
return self
class StatisQueryByDayResponseBodyList(TeaModel):
def __init__(
self,
status: int = None,
count: int = None,
day: str = None,
):
# 是否已发布 0:未发布 1:已发布
self.status = status
# 当前类型数量
self.count = count
# 日期格式yyyy-MM-dd
self.day = day
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.status is not None:
result['Status'] = self.status
if self.count is not None:
result['Count'] = self.count
if self.day is not None:
result['Day'] = self.day
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Status') is not None:
self.status = m.get('Status')
if m.get('Count') is not None:
self.count = m.get('Count')
if m.get('Day') is not None:
self.day = m.get('Day')
return self
class StatisQueryByDayResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
list: List[StatisQueryByDayResponseBodyList] = None,
):
# Id of the request
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 返回集合
self.list = list
def validate(self):
if self.list:
for k in self.list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
result['List'] = []
if self.list is not None:
for k in self.list:
result['List'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
self.list = []
if m.get('List') is not None:
for k in m.get('List'):
temp_model = StatisQueryByDayResponseBodyList()
self.list.append(temp_model.from_map(k))
return self
class StatisQueryByDayResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: StatisQueryByDayResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = StatisQueryByDayResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetSceneListRequest(TeaModel):
def __init__(
self,
account_id: str = None,
):
self.account_id = account_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.account_id is not None:
result['AccountId'] = self.account_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('AccountId') is not None:
self.account_id = m.get('AccountId')
return self
class GetSceneListResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
data: Dict[str, Any] = None,
err_message: str = None,
success: bool = None,
):
self.request_id = request_id
self.data = data
self.err_message = err_message
self.success = success
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.data is not None:
result['Data'] = self.data
if self.err_message is not None:
result['ErrMessage'] = self.err_message
if self.success is not None:
result['Success'] = self.success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Data') is not None:
self.data = m.get('Data')
if m.get('ErrMessage') is not None:
self.err_message = m.get('ErrMessage')
if m.get('Success') is not None:
self.success = m.get('Success')
return self
class GetSceneListResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetSceneListResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetSceneListResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetSubSceneTaskStatusRequest(TeaModel):
def __init__(
self,
sub_scene_id: str = None,
):
# 子场景ID
self.sub_scene_id = sub_scene_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.sub_scene_id is not None:
result['SubSceneId'] = self.sub_scene_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('SubSceneId') is not None:
self.sub_scene_id = m.get('SubSceneId')
return self
class GetSubSceneTaskStatusResponseBodyList(TeaModel):
def __init__(
self,
id: str = None,
scene_id: str = None,
sub_scene_id: str = None,
status: str = None,
type: str = None,
error_code: str = None,
error_msg: str = None,
):
# 任务ID
self.id = id
# 场景ID
self.scene_id = scene_id
# 子场景ID
self.sub_scene_id = sub_scene_id
# 未开始 init 处理中 processing 失败 failure 完成 succeed 取消 canceled
self.status = status
# 墙线预测: wall_line 切图: cut_image 重建: build 直角优化:right_angle_optimization 其他:other
self.type = type
# 任务失败错误码
self.error_code = error_code
# 任务失败错误信息
self.error_msg = error_msg
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.id is not None:
result['Id'] = self.id
if self.scene_id is not None:
result['SceneId'] = self.scene_id
if self.sub_scene_id is not None:
result['SubSceneId'] = self.sub_scene_id
if self.status is not None:
result['Status'] = self.status
if self.type is not None:
result['Type'] = self.type
if self.error_code is not None:
result['ErrorCode'] = self.error_code
if self.error_msg is not None:
result['ErrorMsg'] = self.error_msg
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Id') is not None:
self.id = m.get('Id')
if m.get('SceneId') is not None:
self.scene_id = m.get('SceneId')
if m.get('SubSceneId') is not None:
self.sub_scene_id = m.get('SubSceneId')
if m.get('Status') is not None:
self.status = m.get('Status')
if m.get('Type') is not None:
self.type = m.get('Type')
if m.get('ErrorCode') is not None:
self.error_code = m.get('ErrorCode')
if m.get('ErrorMsg') is not None:
self.error_msg = m.get('ErrorMsg')
return self
class GetSubSceneTaskStatusResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
list: List[GetSubSceneTaskStatusResponseBodyList] = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 任务信息
self.list = list
def validate(self):
if self.list:
for k in self.list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
result['List'] = []
if self.list is not None:
for k in self.list:
result['List'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
self.list = []
if m.get('List') is not None:
for k in m.get('List'):
temp_model = GetSubSceneTaskStatusResponseBodyList()
self.list.append(temp_model.from_map(k))
return self
class GetSubSceneTaskStatusResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetSubSceneTaskStatusResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetSubSceneTaskStatusResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class PredictionWallLineRequest(TeaModel):
def __init__(
self,
url: str = None,
camera_height: int = None,
):
# 图片地址
self.url = url
# 相机高度 单位 cm
self.camera_height = camera_height
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.url is not None:
result['Url'] = self.url
if self.camera_height is not None:
result['CameraHeight'] = self.camera_height
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Url') is not None:
self.url = m.get('Url')
if m.get('CameraHeight') is not None:
self.camera_height = m.get('CameraHeight')
return self
class PredictionWallLineResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
task_id: str = None,
sub_scene_id: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 任务ID
self.task_id = task_id
# 子场景ID
self.sub_scene_id = sub_scene_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.task_id is not None:
result['TaskId'] = self.task_id
if self.sub_scene_id is not None:
result['SubSceneId'] = self.sub_scene_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('TaskId') is not None:
self.task_id = m.get('TaskId')
if m.get('SubSceneId') is not None:
self.sub_scene_id = m.get('SubSceneId')
return self
class PredictionWallLineResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: PredictionWallLineResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = PredictionWallLineResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetPolicyRequest(TeaModel):
def __init__(
self,
sub_scene_uuid: str = None,
type: str = None,
):
self.sub_scene_uuid = sub_scene_uuid
self.type = type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.sub_scene_uuid is not None:
result['SubSceneUuid'] = self.sub_scene_uuid
if self.type is not None:
result['Type'] = self.type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('SubSceneUuid') is not None:
self.sub_scene_uuid = m.get('SubSceneUuid')
if m.get('Type') is not None:
self.type = m.get('Type')
return self
class GetPolicyResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
object_string: str = None,
data: Dict[str, Any] = None,
err_message: str = None,
success: bool = None,
):
self.request_id = request_id
self.object_string = object_string
self.data = data
self.err_message = err_message
self.success = success
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.object_string is not None:
result['ObjectString'] = self.object_string
if self.data is not None:
result['Data'] = self.data
if self.err_message is not None:
result['ErrMessage'] = self.err_message
if self.success is not None:
result['Success'] = self.success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('ObjectString') is not None:
self.object_string = m.get('ObjectString')
if m.get('Data') is not None:
self.data = m.get('Data')
if m.get('ErrMessage') is not None:
self.err_message = m.get('ErrMessage')
if m.get('Success') is not None:
self.success = m.get('Success')
return self
class GetPolicyResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetPolicyResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetPolicyResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class AddSubSceneRequest(TeaModel):
def __init__(
self,
scene_id: str = None,
name: str = None,
):
# 场景ID
self.scene_id = scene_id
# 子场景名称
self.name = name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.scene_id is not None:
result['SceneId'] = self.scene_id
if self.name is not None:
result['Name'] = self.name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('SceneId') is not None:
self.scene_id = m.get('SceneId')
if m.get('Name') is not None:
self.name = m.get('Name')
return self
class AddSubSceneResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
id: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 子场景ID
self.id = id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.id is not None:
result['Id'] = self.id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('Id') is not None:
self.id = m.get('Id')
return self
class AddSubSceneResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: AddSubSceneResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = AddSubSceneResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetLayoutDataRequest(TeaModel):
def __init__(
self,
sub_scene_id: str = None,
):
# 子场景ID
self.sub_scene_id = sub_scene_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.sub_scene_id is not None:
result['SubSceneId'] = self.sub_scene_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('SubSceneId') is not None:
self.sub_scene_id = m.get('SubSceneId')
return self
class GetLayoutDataResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
code: int = None,
success: bool = None,
message: str = None,
data: str = None,
):
# 请求ID,与入参requestId对应
self.request_id = request_id
# 返回码
self.code = code
# 是否请求成功
self.success = success
# 错误消息
self.message = message
# 标注信息
self.data = data
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.code is not None:
result['Code'] = self.code
if self.success is not None:
result['Success'] = self.success
if self.message is not None:
result['Message'] = self.message
if self.data is not None:
result['Data'] = self.data
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('Code') is not None:
self.code = m.get('Code')
if m.get('Success') is not None:
self.success = m.get('Success')
if m.get('Message') is not None:
self.message = m.get('Message')
if m.get('Data') is not None:
self.data = m.get('Data')
return self
class GetLayoutDataResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetLayoutDataResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetLayoutDataResponseBody()
self.body = temp_model.from_map(m['body'])
return self
| [
"[email protected]"
]
| |
5bb21c48e760efd194306a2d24666e8b1469ffc2 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2022_09_01/aio/operations/_object_replication_policies_operations.py | c4c082d4f5bd5760b77da7ef3067ba85098539f8 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
]
| permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 22,798 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._object_replication_policies_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ObjectReplicationPoliciesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.storage.v2022_09_01.aio.StorageManagementClient`'s
:attr:`object_replication_policies` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def list(
self, resource_group_name: str, account_name: str, **kwargs: Any
) -> AsyncIterable["_models.ObjectReplicationPolicy"]:
"""List the object replication policies associated with the storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ObjectReplicationPolicy or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storage.v2022_09_01.models.ObjectReplicationPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2022-09-01"))
cls: ClsType[_models.ObjectReplicationPolicies] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ObjectReplicationPolicies", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/objectReplicationPolicies"
}
@distributed_trace_async
async def get(
self, resource_group_name: str, account_name: str, object_replication_policy_id: str, **kwargs: Any
) -> _models.ObjectReplicationPolicy:
"""Get the object replication policy of the storage account by policy ID.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param object_replication_policy_id: For the destination account, provide the value 'default'.
Configure the policy on the destination account first. For the source account, provide the
value of the policy ID that is returned when you download the policy that was defined on the
destination account. The policy is downloaded as a JSON file. Required.
:type object_replication_policy_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ObjectReplicationPolicy or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2022_09_01.models.ObjectReplicationPolicy
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2022-09-01"))
cls: ClsType[_models.ObjectReplicationPolicy] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
account_name=account_name,
object_replication_policy_id=object_replication_policy_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("ObjectReplicationPolicy", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/objectReplicationPolicies/{objectReplicationPolicyId}"
}
@overload
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
object_replication_policy_id: str,
properties: _models.ObjectReplicationPolicy,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ObjectReplicationPolicy:
"""Create or update the object replication policy of the storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param object_replication_policy_id: For the destination account, provide the value 'default'.
Configure the policy on the destination account first. For the source account, provide the
value of the policy ID that is returned when you download the policy that was defined on the
destination account. The policy is downloaded as a JSON file. Required.
:type object_replication_policy_id: str
:param properties: The object replication policy set to a storage account. A unique policy ID
will be created if absent. Required.
:type properties: ~azure.mgmt.storage.v2022_09_01.models.ObjectReplicationPolicy
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ObjectReplicationPolicy or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2022_09_01.models.ObjectReplicationPolicy
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
object_replication_policy_id: str,
properties: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ObjectReplicationPolicy:
"""Create or update the object replication policy of the storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param object_replication_policy_id: For the destination account, provide the value 'default'.
Configure the policy on the destination account first. For the source account, provide the
value of the policy ID that is returned when you download the policy that was defined on the
destination account. The policy is downloaded as a JSON file. Required.
:type object_replication_policy_id: str
:param properties: The object replication policy set to a storage account. A unique policy ID
will be created if absent. Required.
:type properties: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ObjectReplicationPolicy or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2022_09_01.models.ObjectReplicationPolicy
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
object_replication_policy_id: str,
properties: Union[_models.ObjectReplicationPolicy, IO],
**kwargs: Any
) -> _models.ObjectReplicationPolicy:
"""Create or update the object replication policy of the storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param object_replication_policy_id: For the destination account, provide the value 'default'.
Configure the policy on the destination account first. For the source account, provide the
value of the policy ID that is returned when you download the policy that was defined on the
destination account. The policy is downloaded as a JSON file. Required.
:type object_replication_policy_id: str
:param properties: The object replication policy set to a storage account. A unique policy ID
will be created if absent. Is either a ObjectReplicationPolicy type or a IO type. Required.
:type properties: ~azure.mgmt.storage.v2022_09_01.models.ObjectReplicationPolicy or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ObjectReplicationPolicy or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2022_09_01.models.ObjectReplicationPolicy
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2022-09-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ObjectReplicationPolicy] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(properties, (IOBase, bytes)):
_content = properties
else:
_json = self._serialize.body(properties, "ObjectReplicationPolicy")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
account_name=account_name,
object_replication_policy_id=object_replication_policy_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("ObjectReplicationPolicy", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/objectReplicationPolicies/{objectReplicationPolicyId}"
}
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, account_name: str, object_replication_policy_id: str, **kwargs: Any
) -> None:
"""Deletes the object replication policy associated with the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param object_replication_policy_id: For the destination account, provide the value 'default'.
Configure the policy on the destination account first. For the source account, provide the
value of the policy ID that is returned when you download the policy that was defined on the
destination account. The policy is downloaded as a JSON file. Required.
:type object_replication_policy_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2022-09-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
account_name=account_name,
object_replication_policy_id=object_replication_policy_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/objectReplicationPolicies/{objectReplicationPolicyId}"
}
| [
"[email protected]"
]
| |
770a4f0fac37204c56e7b46b9d6c7531262723e9 | dc99adb79f15b3889a7ef6139cfe5dfc614889b8 | /Aplikace_1_0/Source/ewitis/gui/dfTableRaceInfo.py | 43c2a4ae52ddd807c9956e0ae4200b57071eb4be | []
| no_license | meloun/ew_aplikace | 95d1e4063a149a10bb3a96f372691b5110c26b7b | f890c020ad8d3d224f796dab3f1f222c1f6ba0eb | refs/heads/master | 2023-04-28T06:43:12.252105 | 2023-04-18T19:59:36 | 2023-04-18T19:59:36 | 2,674,595 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,188 | py | # -*- coding: utf-8 -*-
import time
import pandas as pd
import pandas.io.sql as psql
from PyQt4 import QtCore, QtGui
from ewitis.gui.aTab import MyTab
from libs.myqt.DataframeTableModel import DataframeTableModel, ModelUtils
from ewitis.gui.dfTable import DfTable
from ewitis.gui.dfTableTimes import tableTimes
from ewitis.gui.dfTableUsers import tableUsers
from ewitis.gui.dfTableCategories import tableCategories
from ewitis.data.dstore import dstore
class DfModelRaceInfo(DataframeTableModel):
"""
RaceInfo table
states:
- race (default)
- dns (manually set)
- dq (manually set)
- dnf (manually set)
- finished (time received)
NOT finally results:
- only finished
Finally results:
- finished = with time
- race + dnf = DNF
- dns = DNS
- dq = DQ
"""
def __init__(self, table):
super(DfModelRaceInfo, self).__init__(table)
def getDefaultTableRow(self):
row = pd.Series()
row["id"] = 0
row["name"] = "NOTDEF"
row["cell#1"] = "-"
row["cell#2"] = "-"
row["cell#3"] = "-"
row["cell#4"] = "-"
row["cell#5"] = "-"
row["cell#250"] = "-"
return row
#virtual function to override
def GetDataframe(self):
row_id = 1
rows = pd.DataFrame()
#check if df is alread available
if tableTimes.model.df.empty:
return pd.DataFrame()
'''ADD TOTAL'''
#group by cell and get size
serTimesByCell_size = tableTimes.model.df.groupby("cell", as_index=False).size()
#create new row
row = self.getDefaultTableRow()
row["id"] = row_id
row["name"] = "Total"
for (k,v) in serTimesByCell_size.iteritems():
key = "cell#"+str(k)
row[key] = v
#append new row
rows = rows.append(row, ignore_index=True)
row_id = row_id + 1
'''ADD CATEGORIES'''
#group by category and get size
gbTimesByCategory = tableTimes.model.df.groupby("category")
for category, dfTimesInCategory in gbTimesByCategory:
serTimesForCategoryByCell_size = dfTimesInCategory.groupby("cell").size()
#create new row
row = self.getDefaultTableRow()
row["id"] = row_id
row["name"] = category
for (k,v) in serTimesForCategoryByCell_size.iteritems():
key = "cell#"+str(k)
row[key] = v
#add new row and increment id
rows = rows.append(row, ignore_index=True)
row_id = row_id + 1
df = pd.DataFrame(rows, columns=row.keys())
return df
'''
Proxy Model
'''
class DfProxymodelRaceInfo(QtGui.QSortFilterProxyModel, ModelUtils):
def __init__(self, parent = None):
QtGui.QSortFilterProxyModel.__init__(self, parent)
#This property holds whether the proxy model is dynamically sorted and filtered whenever the contents of the source model change.
self.setDynamicSortFilter(True)
#This property holds the column where the key used to filter the contents of the source model is read from.
#The default value is 0. If the value is -1, the keys will be read from all columns.
self.setFilterKeyColumn(-1)
# view <- proxymodel <- model
class DfTableRaceInfo(DfTable):
def __init__(self):
DfTable.__init__(self, "RaceInfo")
def Init(self):
DfTable.Init(self)
self.gui['view'].sortByColumn(0, QtCore.Qt.AscendingOrder)
#v modelu tahle funkce šahá do db, raceinfo nema tabulku v db
def updateDbCounter(self):
pass
tableRaceInfo = DfTableRaceInfo()
tabRaceInfo = MyTab(tables = [tableRaceInfo,])
| [
"[email protected]"
]
| |
48ae5f9162dd9fe01cfd9e5c0cec8a2cdd326f8c | fa841ab3564e0e0fd6065201846fb6f305c43719 | /jalon.elasticsearch/jalon/elasticsearch/content/__init__.py | a3203a25b32d84426596f95eb518017ad7ff902d | []
| no_license | suipnice/Jalon | dc008232baba6c1295cb8a6d6001147e22e03c2a | bc003d10ed15d6ecc5f15fdb3809e9dd53b568bd | refs/heads/master | 2021-01-08T05:46:55.757385 | 2016-06-13T11:58:31 | 2016-06-13T11:58:31 | 241,926,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26 | py | import jalonelasticsearch
| [
"[email protected]"
]
| |
ec4e390fde2b57c9278f907fe4e06f6e1b2ec1af | eaa7b4fd05624bea164f5ac897f8f4a9700f5c24 | /CACLA code/log-files/V-rep_AL5D_no_sim/Jan-22_15.45.30_best_sim/main.py | e960c03efe35e1e98d9d8c361916897c217a2a7a | []
| no_license | benquick123/CACLA-python | de187d387b53d338874e5b94f134bb557cd95ed5 | eb74c928b8b097c4ae42fd918385adbf3e867262 | refs/heads/master | 2020-03-30T06:35:50.717106 | 2019-03-21T22:54:46 | 2019-03-21T22:54:46 | 150,873,461 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,592 | py | import gym
import numpy as np
import time
from cacla import Cacla
from utils import Logger
from datetime import datetime
import pickle
import copy
import vrep_arm3 as arm
env_name = "V-rep_AL5D_no_sim"
now = datetime.utcnow().strftime("%b-%d_%H.%M.%S") # for unique directories
logger = None
def run_episode(model, episode, animate=False):
"""
The core of data collection.
For each movement (until the variable done == True) calculates value function at time T0 and T1,
based on explored action a0 ~ A0 + exploration.
Fits critic and actor according to learning rule.
Saves each step into variable trajectory, and at the end returns full list of steps.
"""
done = False
trajectory = []
observation0 = model.env.reset()
# iteration_n = 0
# print("RESET observation", observation0)
# scale, offset = scaler.get()
while not done:
if model.env.simulation:
model.env.render()
V0 = model.critic.predict(np.array([observation0]))
A0 = model.actor.predict(np.array([observation0]))
joint_positions0 = model.env.get_joint_positions()
# print("BEFORE STEP joint positions", joint_positions0.tolist())
a0 = model.sample(A0[0], model.exploration_factor)
a0 = [a0]
# print("EXPLORING ACTION", a0)
# env_state0 = copy.deepcopy(model.env)
observation1, reward, done, info = model.env.step(a0[0])
a0 = info["actual_action"]
# print("AFTER STEP observation, reward", observation1.tolist(), reward)
V1 = model.critic.predict(np.array([observation1]))
delta = reward + model.gamma * V1 - V0
# print("DELTA", delta)
# fit critic
model.critic.fit(np.array([observation0]), [reward + model.gamma * V1], batch_size=1, verbose=0)
# print("FITTING CRITIC")
if delta > 0:
# if delta is positive, fit actor
model.actor.fit(np.array([observation0]), [a0], batch_size=1, verbose=0)
observation0 = observation1
# print("FITTING ACTOR; SEE IF OBSERVATION IS CHANGED")
else:
# otherwise set things to how they were before model.env.step().
model.env.set_joint_positions(joint_positions0)
# print("OBSERVATION SHOULD BE THE SAME AS BEFORE")
# model.env = env_state0
step = {"observation0": observation0, "observation1": observation1,
# "observation_unscaled": observation_unscaled,
"V0": V0[0], "V1": V1[0], "A0": A0[0][:], "a0": a0[0][:],
"reward": reward, "delta": delta[0][0]}
trajectory.append(step)
# print("OBSERVATION AT END", observation0.tolist())
# if iteration_n >= 50:
# break
# iteration_n += 1
return trajectory
def run_batch(model, batch_size, episode, animate=False):
"""
Accepts CACLA model, scaler and batch size. Runs number of episodes equal to batch_size.
Logs the rewards and at the end returns all traversed trajectories.
"""
trajectories = []
total_steps = 0
for _ in range(batch_size):
trajectory = run_episode(model, episode, animate=animate)
total_steps += len(trajectory)
trajectories.append(trajectory)
last_rewards = [trajectory[-1]["reward"] for trajectory in trajectories]
logger.log({"_MeanReward": np.mean([t["reward"] for trajectory in trajectories for t in trajectory]),
# "_MeanReward": np.mean([np.sum([t["reward"] for t in trajectory]) for trajectory in trajectories]),
"Steps": total_steps,
"mean_last_reward": np.mean(last_rewards),
"_std_last_reward": np.std(last_rewards),
"_min_last_reward": np.min(last_rewards),
"_max_last_reward": np.max(last_rewards)})
return trajectories
def log_batch_stats(trajectories, episode, alpha, beta, exploration_factor):
"""
Creates dictionary with values to log.
return dictionary of those values, if they are to be used somewhere else.
"""
rewards = [t["reward"] for trajectory in trajectories for t in trajectory]
actions_0 = [t["A0"] for trajectory in trajectories for t in trajectory]
policy_loss = [np.square(np.array(t["a0"]) - np.array(t["A0"])).mean() for trajectory in trajectories for t in trajectory]
deltas = [np.square(np.array(t["delta"])).mean() for trajectory in trajectories for t in trajectory]
observations = [t["observation0"] for trajectory in trajectories for t in trajectory]
d = {"_min_reward": np.min(rewards),
"_max_reward": np.max(rewards),
"_mean_reward": np.mean(rewards),
"_std_reward": np.std(rewards),
"_min_action": np.min(actions_0),
"_max_action": np.max(actions_0),
"_mean_action": np.mean(actions_0),
"_std_action": np.std(actions_0),
"_min_observation": np.min(observations),
"_max_observation": np.max(observations),
"_mean_observation": np.mean(observations),
"_std_observations": np.std(observations),
"_min_value_loss": np.min(deltas),
"_max_value_loss": np.max(deltas),
"mean_value_loss": np.mean(deltas),
"_std_value_loss": np.std(deltas),
"_min_policy_loss": np.min(policy_loss),
"_max_policy_loss": np.max(policy_loss),
"mean_policy_loss": np.mean(policy_loss),
"_std_policy_loss": np.std(policy_loss),
"policy_lr": alpha,
"value_lr": beta,
"exploration_factor": exploration_factor,
"_episode": episode}
logger.log(d)
return d
def train(model, n_episodes, batch_size, animate=False):
"""
Accepts model (CACLA in our case), number of all episodes, batch size and optional argument to run GUI.
Trains the actor and critic for number of episodes.
"""
episode = 0
best_reward = 0
while episode < n_episodes:
# compute trajectories, that CACLA will use to train critic and actor.
trajectories = run_batch(model, batch_size, episode, animate)
episode += batch_size
# save logging data
d = log_batch_stats(trajectories, episode, model.alpha, model.beta, model.exploration_factor)
if d["_mean_reward"] >= best_reward:
best_reward = d["_mean_reward"]
pickle.dump(model, open(logger.path + "/model.pickle", "wb"))
logger.write(display=True)
# update learning and exploration rates for the algorithm.
model.update_lr(model.lr_decay)
# if model.exploration_factor < 0.04:
# exploration_decay = (n_episodes - episode) / (n_episodes - episode + batch_size)
model.update_exploration()
# model.update_exploration()
pickle.dump(model, open(logger.path + "/model_final.pickle", "wb"))
logger.close()
def test(model, n):
# model.env = gym.make(env_name)
success = 0
for i in range(n):
observation = model.env.reset()
done = False
while not done:
if model.env.simulation:
model.env.render()
action = model.actor.predict(np.array([observation]))
observation, reward, done, info = model.env.step(action[0])
print("iteration:", i, "reward:", reward, "distance:", info["distance"], "done:", done)
if info["distance"] < 0.01:
success += 1
time.sleep(3)
print("success rate:", success / n)
if __name__ == "__main__":
action_multiplier = 0.1
env = arm.VrepArm(action_multiplier=action_multiplier)
# cacla = pickle.load(open("C:/Users/Jonathan/Documents/School/Project_Farkas/CACLA code/log-files/V-rep_AL5D_no_sim/Jan-21_00.49.49/model_final.pickle", "rb"))
# cacla.env = env
# test(cacla, 20)
# exit()
input_dim = env.observation_space.shape[0]
output_dim = env.action_space.shape[0]
alpha = 0.0007 # learning rate for actor
beta = 0.001 # learning rate for critic
lr_decay = 0.997 # lr decay
exploration_decay = 0.997 # exploration decay
gamma = 0.0 # discount factor
exploration_factor = 0.15
n_episodes = 20000
batch_size = 50
logger = Logger(logname=env_name, now=now)
cacla = Cacla(env, input_dim, output_dim, alpha, beta, gamma, lr_decay, exploration_decay, exploration_factor)
train(cacla, n_episodes, batch_size)
input("Continue?")
env = arm.VrepArm(simulation=True, action_multiplier=action_multiplier)
cacla.env = env
test(cacla, 10)
| [
"[email protected]"
]
| |
5b16b645adc70ad58e2d3385d3f9776e44594bf6 | 44a7330dfa4fe321eb432ee57a32328578dec109 | /milk/supervised/gridsearch.py | 265712ad1d69da0ae0eb8f9345e6471362c1c126 | [
"MIT"
]
| permissive | tzuryby/milk | 7cb6760fad600e9e0d0c9216dc749db289b596fb | a7159b748414d4d095741978fb994c4affcf6b9b | refs/heads/master | 2020-12-29T02:45:33.044864 | 2011-03-15T20:23:29 | 2011-03-15T20:25:11 | 1,485,748 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,128 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2008-2011, Luis Pedro Coelho <[email protected]>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# License: MIT. See COPYING.MIT file in the milk distribution
from __future__ import division
import numpy as np
from .classifier import normaliselabels
__all__ = [
'gridminimise',
'gridsearch',
]
def _allassignments(options):
try:
from itertools import product
except ImportError:
def product(*args, **kwds):
# from http://docs.python.org/library/itertools.html#itertools.product
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
from itertools import repeat, izip
for ks,vs in izip(repeat(options.keys()), product(*options.values())):
yield zip(ks,vs)
def _set_assignment(obj,assignments):
for k,v in assignments:
obj.set_option(k,v)
def gridminimise(learner, features, labels, params, measure=None, nfolds=10):
'''
best = gridminimise(learner, features, labels, params, measure={0/1 loss})
Grid search for the settings of parameters that maximises a given measure
This function is equivalent to searching the grid, but does not actually
search the whole grid.
Parameters
----------
learner : a classifier object
features : sequence of features
labels : sequence of labels
params : dictionary of sequences
keys are the options to change,
values are sequences of corresponding elements to try
measure : function, optional
a function that takes labels and outputs and returns the loss.
Default: 0/1 loss. This must be an *additive* function.
nfolds : integer, optional
nr of folds to run, default: 10
Returns
-------
best : a sequence of assignments
'''
# The algorithm is as follows:
#
# for all assignments: error = 0, next_iteration = 0
#
# at each iteration:
# look for assignment with smallest error
# if that is done: return it
# else: perform one more iteration
#
# When the function returns, that assignment has the lowest error of all
# assignments and all the iterations are done. Therefore, other assignments
# could only be worse even if we never computed the whole error!
from ..measures.nfoldcrossvalidation import foldgenerator
if measure is None:
def measure(real, preds):
return np.sum(np.asarray(real) != np.asarray(preds))
labels,_ = normaliselabels(labels)
allassignments = list(_allassignments(params))
N = len(allassignments)
iteration = np.zeros(N, int)
error = np.zeros(N, float)
folds = [(Tr.copy(), Te.copy()) for Tr,Te in foldgenerator(labels, nfolds)]
# foldgenerator might actually decide on a smaller number of folds,
# depending on the distribution of class sizes:
nfolds = len(folds)
while True:
next_pos = (error == error.min())
iter = iteration[next_pos].max()
if iter == nfolds:
(besti,) = np.where(next_pos & (iteration == iter))
besti = besti[0]
return allassignments[besti]
(ps,) = np.where(next_pos & (iteration == iter))
p = ps[0]
_set_assignment(learner, allassignments[p])
train, test = folds[iter]
model = learner.train(features[train], labels[train], normalisedlabels=True)
preds = [model.apply(f) for f in features[test]]
error[p] += measure(labels[test], preds)
iteration[p] += 1
class gridsearch(object):
'''
G = gridsearch(base, measure=accuracy, nfolds=10, params={ 'param1 : [...], param2 : [...]})
Perform a grid search for the best parameter values.
When G.train() is called, then for each combination of p1 in param1, p2 in
param2, ... it performs::
base_classifier.param1 = p1
base_classifier.param2 = p2
...
value[p1, p2,...] = measure(crossvaliation(base_classifier)
it then picks the highest set of parameters and re-learns a model on the
whole data.
Parameters
-----------
base_classifier : classifier to use
measure : function, optional
a function that takes labels and outputs and returns the loss.
Default: 0/1 loss. This must be an *additive* function.
nfolds : integer, optional
Nr of folds
params : dictionary
'''
def __init__(self, base, measure=None, nfolds=10, params={}):
self.params = params
self.base = base
self.nfolds = 10
self.measure = measure
def is_multi_class(self):
return self.base.is_multi_class()
def train(self, features, labels, normalisedlabels=False):
self.best = gridminimise(self.base, features, labels, self.params, self.measure, self.nfolds)
_set_assignment(self.base, self.best)
return self.base.train(features, labels, normalisedlabels=normalisedlabels)
| [
"[email protected]"
]
| |
deec8d033bdd63658b11e459446f53fd82d72ea6 | fd4510e0bf959de7527bd0c62d3b4fb3f78cee5e | /drivers/hot.py | c4607b3bd6c73821e3566c20a1a2b3c2605203d1 | []
| no_license | RuoAndo/nii-cyber-security-admin | 8dde8ab68b0f7fa882adbe8e828546aa1739e685 | e77b9d581e124f9fd5f721e18cd77d3bccecad19 | refs/heads/master | 2022-12-13T21:40:46.330389 | 2022-12-07T14:01:00 | 2022-12-07T14:01:00 | 71,614,880 | 5 | 1 | null | 2020-10-13T08:40:46 | 2016-10-22T03:41:30 | Python | UTF-8 | Python | false | false | 465 | py | # -*- coding:utf-8 -*-
import psycopg2
conn = psycopg2.connect(
host = "192.168.1.1",
port = 5432,
database="xen460",
user="postgres",
password="")
cur = conn.cursor()
sql = "SELECT relname, n_tup_upd, n_tup_hot_upd, round(n_tup_hot_upd*100/n_tup_upd, 2) AS hot_upd_ratio FROM pg_stat_user_tables WHERE n_tup_upd > 0 ORDER BY hot_upd_ratio;"
cur.execute(sql)
ans =cur.fetchall()
print ans
#conn.commit()
cur.close()
conn.close()
| [
"[email protected]"
]
| |
1a8a4b1ec31cbf8dbe7046d88ecb72043feede10 | 8096e140f0fd38b9492e0fcf307990b1a5bfc3dd | /Python/madlibs/version3.py | 68bb0d07fff518a717900d9beeaf33fbb77b915a | []
| no_license | perennialAutodidact/PDXCodeGuild_Projects | 0cacd44499c0bdc0c157555fe5466df6d8eb09b6 | 28a8258eba41e1fe6c135f54b230436ea7d28678 | refs/heads/master | 2022-11-15T22:26:45.775550 | 2020-07-07T17:13:01 | 2020-07-07T17:13:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,322 | py | game_over = False
while(game_over == False):
print("\nPlease enter:")
adjectives = input("Five adjectives, separated by commas, then press enter: ").replace(' ', '').split(",")
gerunds = input("Two \"-ing\" verbs, separated by commas: ").replace(' ', '').split(",")
place = input("A place: ")
plural_noun = input("A plural noun: ")
noun = input("A noun: ")
adjective_1 = adjectives[0]
adjective_2 = adjectives[1]
adjective_3 = adjectives[2]
adjective_4 = adjectives[3]
adjective_5 = adjectives[4]
verb_1 = gerunds[0]
verb_2 = gerunds[1]
f"\nResult: \n\nIf you go to some {adjective_1} place like {place} , you must know how to deal with wild animals such as bears, wolves and{plural_noun} . The most important of these is the bear. There are three kinds of bear, the grizzly bear, the {adjective_2} bear and the {adjective_3} bear. Bears spend most of their time {verb_1} or {verb_2} . They look very {adjective_4} , but if you make them {adjective_5} , they might bite your {noun} ."
play_again = input("\n\nWould you like to play again? Enter \"y\" for Yes and \"n\" for No: ")
if play_again == "y":
print("\nOkay, let's do it!")
game_over = False
elif play_again == "n":
print("Okay, goodbye!")
game_over = True
| [
"[email protected]"
]
| |
43cff66ffbaeb8e9ebc37fe1c4ddde3bf3d93ec0 | 613152f5e19ab472974f0c8a87a38c1bb1c792fc | /users/migrations/0002_auto_20190529_1957.py | 2650af02ec1518e6067eddb54c7ad539ae9ac4a7 | []
| no_license | KIM-JAEHYUNG/boro-wang | ed19181b2282f47a5ba1fe0f84f74f3a76b9902b | 544bbbcc8b589ab0dfb936734d999c172a201864 | refs/heads/master | 2022-12-12T00:53:22.740279 | 2019-08-09T19:22:54 | 2019-08-09T19:22:54 | 201,409,510 | 0 | 1 | null | 2022-12-08T05:59:44 | 2019-08-09T06:54:13 | HTML | UTF-8 | Python | false | false | 668 | py | # Generated by Django 2.0.13 on 2019-05-29 10:57
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='followers',
field=models.ManyToManyField(related_name='_user_followers_+', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='user',
name='followings',
field=models.ManyToManyField(related_name='_user_followings_+', to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
]
| |
fc2230059a02ace7499e2412070bd52ff2ab1e5d | f9a2e67dd2f40b37d8ff81bf6cdce47c38d2dee4 | /.c9/metadata/environment/clean_architecture/clean_architecture_resources/fb_post_clean_arch/models/comment.py | 4656363266106d471c438dc540263751c8e923a2 | []
| no_license | mohan277/backend_repo | 4eae065cf0fffa29866a2b549028cb8df4c97643 | 25dbb4d0f1c174b6da95f4c73737e49db9978429 | refs/heads/master | 2022-11-13T00:08:37.600743 | 2020-07-09T04:36:44 | 2020-07-09T04:36:44 | 278,259,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | {"filter":false,"title":"comment.py","tooltip":"/clean_architecture/clean_architecture_resources/fb_post_clean_arch/models/comment.py","undoManager":{"mark":-1,"position":-1,"stack":[]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":9,"column":8},"end":{"row":9,"column":8},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1589532198411,"hash":"81351de4359a22cfbdbd6a9630eb6cdf91baa396"} | [
"[email protected]"
]
| |
35d64fcc70ce2f581774ee385e08a42d750180c6 | 19d43cac1c70ad7c1e202486bd6d0951d774c7ab | /a_social_project/settings/__init__.py | 14b4ac3405b477a70dc3201e2ace18a69ab7c397 | []
| no_license | Raju-Pinnam/raju-social-app | e75d6f11964d08103ce2df85fc49ff5141ce346f | b809745df2e7a26a32b5ff151c414f68c83112ed | refs/heads/master | 2023-02-20T23:45:42.649867 | 2021-01-24T18:19:37 | 2021-01-24T18:19:37 | 331,834,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | from .base import *
if config('ENV') == 'LOCAL':
from .local import *
elif config('ENV') == 'PROD':
from .prod import *
| [
"[email protected]"
]
| |
f4bc23d4d623902bcf8c8e4cd2238b727839d0e9 | 3b786d3854e830a4b46ee55851ca186becbfa650 | /SystemTesting/pylib/nsx/nvp/transport_node/schema/nvp_transport_node_schema.py | ad2bff5982153948e66bc22e6ea73f016294ab8c | []
| no_license | Cloudxtreme/MyProject | d81f8d38684333c22084b88141b712c78b140777 | 5b55817c050b637e2747084290f6206d2e622938 | refs/heads/master | 2021-05-31T10:26:42.951835 | 2015-12-10T09:57:04 | 2015-12-10T09:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,113 | py | import base_schema
import credential_schema
import transport_connector_schema
import nvp_transport_zone_binding_schema
import nvp_tag_schema
class TransportNode(base_schema.BaseSchema):
_schema_name = "transportNode"
def __init__(self, py_dict=None):
super(TransportNode, self).__init__()
self.display_name = None
self.transport_connectors = [transport_connector_schema.TransportConnector()]
self.uuid = None
self.tags = [nvp_tag_schema.Tag()]
self.integration_bridge_id = None
self.mgmt_rendezvous_client = None
self.mgmt_rendezvous_server = None
self.credential = credential_schema.Credential()
self.tunnel_probe_random_vlan = None
self.zone_forwarding = None
if py_dict is not None:
self.get_object_from_py_dict(py_dict)
self._uuid_meta = {'isReq':False,'type':'string'}
self._tags_meta = {'isReq':False,'type':'array','maxLen':5}
self._display_name_meta = {'isReq':False,'type':'string',
'default':'<uuid>','maxLen':40}
self._transport_connectors_meta = {'isReq':False,'type':'array'}
self._integration_bridge_id_meta = {'isReq':False,'type':'string'}
self._mgmt_rendezvous_client_meta = {'isReq':False,'type':'boolean',
'default':False}
self._mgmt_rendezvous_server_meta = {'isReq':False,'type':'boolean',
'default':False}
self._credential_meta = {'isReq':False,'type':'object'}
self._tunnel_probe_random_vlan_meta = {'isReq':False,'type':'boolean',
'default':False}
self._zone_forwarding_meta = {'isReq':False,'type':'boolean',
'default':False}
def add_transport_connector(self, transport_connetor):
self.transport_connectors.append(transport_connector)
def add_tag(self, tag):
self.tags.append(tag)
if __name__=='__main__':
pass
| [
"[email protected]"
]
| |
6301dd36de7e57837bd0faca3facc53b2efcd28b | 9360aeefb3605a3fe0c5e512e52ec3bc0942903f | /bin/jupyter-kernel | 52f4f4b2c7281de7c9de88b24087c584be53a550 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
]
| permissive | eliaswalyba/facebound | 1ff7dc32cc4bf50d14f2e6434af2adfb14300245 | 92500e61b1bc50702ea339563ee8b38b55a31169 | refs/heads/master | 2022-07-01T17:42:02.360416 | 2020-05-08T15:23:03 | 2020-05-08T15:23:03 | 262,851,606 | 0 | 0 | MIT | 2020-05-10T18:37:03 | 2020-05-10T18:37:02 | null | UTF-8 | Python | false | false | 264 | #!/Users/fodediop/dev/deep-learning/facebound/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_client.kernelapp import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
]
| ||
b5d47fab88804b7fb68fcbecbdf9db94a8054a2a | b6475b69ae89f5a2ffb3c03c21d747bc6fddbdd2 | /facility/urls.py | c7d25fdaceb3e173d5a1a1b442d033ca3d0ba1c2 | []
| no_license | LeeSuHa98/14-2nd-SHABANG-backend | 3718516abc1a423da7e97d9363c61bfc7dd5ec4f | 13cc50c80aca273277bae8d8b15a1623b860ce55 | refs/heads/main | 2023-02-18T05:57:27.863525 | 2021-01-19T04:47:20 | 2021-01-19T04:47:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | from django.urls import path
from facility.views import NearFacilityView
urlpatterns = [
path("/<int:id>", NearFacilityView.as_view())
] | [
"[email protected]"
]
| |
83a592cb4e501fdef642301cc6c4b81c6f8e086a | c2ee51902020596e08aacd4462ab44715432c8f8 | /pyapprox/tests/test_mixture_model.py | 817c0ce729c40dff37793eefee398f5e6f383587 | [
"MIT"
]
| permissive | ConnectedSystems/pyapprox | bb1462aa8ee54258ee559d734f7bffb744e09c78 | 4f405654c707cba83d211f327c0f0fdbc95efa29 | refs/heads/master | 2021-09-13T09:49:59.048327 | 2021-08-29T03:38:43 | 2021-08-29T03:38:43 | 252,080,343 | 0 | 0 | MIT | 2020-04-01T05:26:29 | 2020-04-01T05:26:29 | null | UTF-8 | Python | false | false | 4,778 | py | import unittest
from functools import partial
from scipy import stats
import numpy as np
from pyapprox.mixture_model import \
get_leja_univariate_quadrature_rules_of_beta_mixture, sample_mixture, \
get_mixture_sparse_grid_quadrature_rule, \
get_mixture_tensor_product_gauss_quadrature, \
compute_grammian_of_mixture_models_using_sparse_grid_quadrature
from pyapprox.univariate_polynomials.quadrature import leja_growth_rule
from pyapprox.multivariate_polynomials import PolynomialChaosExpansion, \
define_poly_options_from_variable_transformation
from pyapprox.variable_transformations import \
define_iid_random_variable_transformation
from pyapprox.indexing import compute_hyperbolic_indices
class TestMixtureModel(unittest.TestCase):
def test_mixture_model_sparse_grid_quadrature(self):
num_vars = 2
level = 5
rv_params = [[2, 4], [4, 2]]
rv_params = [[2, 6], [6, 2]]
# rv_params = [[6,2]]
num_mixtures = len(rv_params)
def function(x): return np.array(
[np.sum(x**2, axis=0), np.sum(x**3, axis=0)+x[0, :]*x[1, :]]).T
mixture_samplers = []
for ii in range(num_mixtures):
def lambda_sampler(a, b, nn): return 2 * \
np.random.beta(a, b, (num_vars, nn))-1
# partial is needed to make sure correct alpha and beta parameters
# are used and not overwritten
sampler = partial(
lambda_sampler, rv_params[ii][0], rv_params[ii][1])
mixture_samplers.append(sampler)
mc_samples = sample_mixture(mixture_samplers, num_vars, int(1e6))
mc_integral = function(mc_samples).mean(axis=0)
# print ('mc',mc_integral)
leja_basename = None
mixtures, mixture_univariate_quadrature_rules = \
get_leja_univariate_quadrature_rules_of_beta_mixture(
rv_params, leja_growth_rule, leja_basename)
mixture_univariate_growth_rules = [leja_growth_rule]*num_mixtures
sg_samples, sg_weights = get_mixture_sparse_grid_quadrature_rule(
mixture_univariate_quadrature_rules,
mixture_univariate_growth_rules,
num_vars, level)
sg_integral = function(sg_samples).T.dot(sg_weights)
# print ('sg',sg_integral)
print('todo: replace with exact analytical integral')
assert np.allclose(sg_integral, mc_integral, atol=1e-2)
mixtures, mixture_univariate_quadrature_rules = \
get_leja_univariate_quadrature_rules_of_beta_mixture(
rv_params, leja_growth_rule, leja_basename,
return_weights_for_all_levels=False)
nquad_samples_1d = leja_growth_rule(level)
tp_samples, tp_weights = get_mixture_tensor_product_gauss_quadrature(
mixture_univariate_quadrature_rules, nquad_samples_1d, num_vars)
tp_integral = function(sg_samples).T.dot(sg_weights)
# print ('tp',tp_integral)
assert np.allclose(tp_integral, mc_integral, atol=1e-2)
def test_compute_grammian_of_mixture_models_using_sparse_grid_quadrature(
self):
num_vars = 2
degree = 3
# rv_params = [[6,2],[2,6]]
rv_params = [[1, 1]]
leja_basename = None
mixtures, mixture_univariate_quadrature_rules = \
get_leja_univariate_quadrature_rules_of_beta_mixture(
rv_params, leja_growth_rule, leja_basename)
poly = PolynomialChaosExpansion()
var_trans = define_iid_random_variable_transformation(
stats.uniform(-1, 2), num_vars)
poly_opts = define_poly_options_from_variable_transformation(var_trans)
indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
poly.configure(poly_opts)
poly.set_indices(indices)
num_mixtures = len(rv_params)
mixture_univariate_growth_rules = [leja_growth_rule]*num_mixtures
grammian_matrix = \
compute_grammian_of_mixture_models_using_sparse_grid_quadrature(
poly.basis_matrix, indices,
mixture_univariate_quadrature_rules,
mixture_univariate_growth_rules, num_vars)
assert (np.all(np.isfinite(grammian_matrix)))
if num_mixtures == 1:
II = np.where(abs(grammian_matrix) > 1e-8)
# check only non-zero inner-products are along diagonal, i.e.
# for integrals of indices multiplied by themselves
assert np.allclose(
II, np.tile(np.arange(indices.shape[1]), (2, 1)))
if __name__ == "__main__":
mixture_model_test_suite = unittest.TestLoader().loadTestsFromTestCase(
TestMixtureModel)
unittest.TextTestRunner(verbosity=2).run(mixture_model_test_suite)
| [
"[email protected]"
]
| |
142e1232f03e6245fe3538cf1dbe1a8210792eef | 3cc7def40ac121c25105ffac6b33e7f12d1c7f97 | /muddery/typeclasses/locked_exit.py | 3d5d615a7bdff60213ec1a33bb047d9f5236e029 | [
"BSD-3-Clause"
]
| permissive | ming-inside/muddery | 8a6d8c9f25fed6137616d109904788927a1059e1 | 8442d6339d4776b8fb81827bcfe0138cf0bc73b5 | refs/heads/master | 2020-03-31T11:13:30.792060 | 2018-10-08T16:49:58 | 2018-10-08T16:49:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,615 | py | """
Exits
Exits are connectors between Rooms. An exit always has a destination property
set and has a single command defined on itself with the same name as its key,
for allowing Characters to traverse the exit to its destination.
"""
from __future__ import print_function
import traceback
from muddery.utils import utils
from muddery.statements.statement_handler import STATEMENT_HANDLER
from muddery.utils.localized_strings_handler import _
from muddery.mappings.typeclass_set import TYPECLASS
from evennia.utils import logger
from django.conf import settings
class MudderyLockedExit(TYPECLASS("EXIT")):
"""
Characters must unlock these exits to pass it.
The view and commands of locked exits are different from unlocked exits.
"""
typeclass_key = "LOCKED_EXIT"
typeclass_name = _("Locked Exit", "typeclasses")
def after_data_loaded(self):
"""
Set data_info to the object."
"""
super(MudderyLockedExit, self).after_data_loaded()
self.unlock_condition = getattr(self.dfield, "unlock_condition", "")
self.unlock_verb = getattr(self.dfield, "unlock_verb", "")
self.locked_desc = getattr(self.dfield, "locked_desc", "")
self.auto_unlock = getattr(self.dfield, "auto_unlock", False)
def at_before_traverse(self, traversing_object):
"""
Called just before an object uses this object to traverse to
another object (i.e. this object is a type of Exit)
Args:
traversing_object (Object): The object traversing us.
Notes:
The target destination should normally be available as
`self.destination`.
If this method returns False/None, the traverse is cancelled
before it is even started.
"""
if not super(MudderyLockedExit, self).at_before_traverse(traversing_object):
return False
# Only can pass exits which have already unlockde.
if traversing_object.is_exit_unlocked(self.get_data_key()):
return True
if self.auto_unlock:
if self.can_unlock(traversing_object):
# Automatically unlock the exit when a character looking at it.
traversing_object.unlock_exit(self)
return True
# Show the object's appearance.
appearance = self.get_appearance(traversing_object)
traversing_object.msg({"look_obj": appearance})
return False
def can_unlock(self, caller):
"""
Unlock an exit.
"""
# Only can unlock exits which match there conditions.
return STATEMENT_HANDLER.match_condition(self.unlock_condition, caller, self)
def get_appearance(self, caller):
"""
This is a convenient hook for a 'look'
command to call.
"""
# Get name and description.
if caller.is_exit_unlocked(self.get_data_key()):
# If is unlocked, use common appearance.
return super(MudderyLockedExit, self).get_appearance(caller)
can_unlock = self.can_unlock(caller)
if self.auto_unlock and can_unlock:
# Automatically unlock the exit when a character looking at it.
caller.unlock_exit(self)
# If is unlocked, use common appearance.
return super(MudderyLockedExit, self).get_appearance(caller)
cmds = []
if can_unlock:
# show unlock command
verb = self.unlock_verb
if not verb:
verb = _("Unlock")
cmds = [{"name": verb, "cmd": "unlock_exit", "args": self.dbref}]
info = {"dbref": self.dbref,
"name": self.name,
"desc": self.locked_desc,
"cmds": cmds}
return info
def get_available_commands(self, caller):
"""
This returns a list of available commands.
"args" must be a string without ' and ", usually it is self.dbref.
"""
if caller.is_exit_unlocked(self.get_data_key()):
# If is unlocked, use common commands.
return super(MudderyLockedExit, self).get_available_commands(caller)
cmds = []
can_unlock = STATEMENT_HANDLER.match_condition(self.unlock_condition, caller, self)
if can_unlock:
# show unlock command
verb = self.unlock_verb
if not verb:
verb = _("Unlock")
cmds = [{"name": verb, "cmd": "unlock", "args": self.dbref}]
return cmds
| [
"[email protected]"
]
| |
279113e81590ca7d8b73b237349369281db91a6c | 72880d033c9948098291efebf934255635f8c6ea | /pythonexamples/createdirectories.py | 4bf20074931112e5bb2cf8c33503cb0686a91f22 | []
| no_license | manutdmohit/mypythonexamples | 729347aec300bda01f629224337c84d5838a71f2 | b189c201d07b1a345478699bbb3852c02eb96ce5 | refs/heads/master | 2023-04-18T01:55:22.026867 | 2021-05-13T05:59:09 | 2021-05-13T05:59:09 | 366,946,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | import os
os.mkdir('valmiki')
print('valkimi created in current working directory') | [
"[email protected]"
]
| |
efd659f109141d794d98452979ad0f7016c59ad0 | a54007706a09b387690f79fd7ffd889decad42f1 | /day11/code/03_pygame框架使用.py | 500591bf438640322dcfd73908d34c025d1fabd3 | []
| no_license | lvah/201903python | d425534544a1f91e5b80b5ff0de5ca34037fe6e9 | 1415fcb7697dfa2884d94dcd8963477e12fe0624 | refs/heads/master | 2020-07-06T16:45:37.882819 | 2019-09-08T10:13:07 | 2019-09-08T10:13:07 | 203,082,401 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | import pygame
import sys
pygame.init() # 初始化pygame
size = width, height = 320, 240 # 设置窗口大小
screen = pygame.display.set_mode(size) # 显示窗口
while True: # 死循环确保窗口一直显示
for event in pygame.event.get(): # 遍历所有事件
if event.type == pygame.QUIT: # 如果单击关闭窗口,则退出
# exit(0) --- 结束程序, 0代表正常退出,
sys.exit(0)
# pygame.KEYDOWN: 代表按下键盘
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
print("UP")
elif event.key == pygame.K_DOWN:
print('DOWN')
elif event.key == pygame.K_q: # Q
sys.exit(0)
pygame.quit() # 退出pygame
| [
"[email protected]"
]
| |
90413f84cf6b0e827f63c0a6370c22e5db575ae4 | a8062308fb3bf6c8952257504a50c3e97d801294 | /test/test_524_longest_word_in_dictionary_through_deleting.py | dd8614be77d386c33da05a8c850208c59e040bcc | []
| no_license | wan-catherine/Leetcode | 650d697a873ad23c0b64d08ad525bf9fcdb62b1b | 238995bd23c8a6c40c6035890e94baa2473d4bbc | refs/heads/master | 2023-09-01T00:56:27.677230 | 2023-08-31T00:49:31 | 2023-08-31T00:49:31 | 143,770,000 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | from unittest import TestCase
from problems.N524_Longest_Word_In_Dictionary_Through_Deleting import Solution
class TestSolution(TestCase):
def test_findLongestWord(self):
self.assertEqual("apple", Solution().findLongestWord(s = "abpcplea", d = ["ale","apple","monkey","plea"]))
def test_findLongestWord_1(self):
self.assertEqual("a", Solution().findLongestWord(s = "abpcplea", d = ["a","b","c"]))
def test_findLongestWord_2(self):
s = "wordgoodgoodgoodbestword"
d = ["word", "good", "best", "good"]
self.assertEqual("best", Solution().findLongestWord(s, d)) | [
"[email protected]"
]
| |
b3f79671754cfe80ab04743bc318dc84ee6f0b93 | c3e47ce05f1d6a237a03742ce431d6958ca388b2 | /crowd/plug-in/bkp/whatIsCmd.py | c65eacdc5846cdb8491ebdd3b6694ae6e3f60396 | []
| no_license | fsanges/subins_tutorials | 27426ac71365124c28e924c502484c5bb172f715 | 9c50ec8e3200c29f1c7141ca013cbb0a5b4f8e4e | refs/heads/master | 2020-09-16T04:38:43.696690 | 2019-11-23T13:06:51 | 2019-11-23T13:06:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,481 | py | """
To use, make sure that whatIsCmd.py is in your MAYA_PLUG_IN_PATH (and the C++
version is not) then do the following:
import maya.cmds
maya.cmds.loadPlugin("whatIsCmd.py")
maya.cmds.spWhatIs()
"""
import sys
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
# command
class WhatIsCmd(OpenMayaMPx.MPxCommand):
kPluginCmdName = "spWhatIs"
def __init__(self):
OpenMayaMPx.MPxCommand.__init__(self)
@staticmethod
def cmdCreator():
return OpenMayaMPx.asMPxPtr( WhatIsCmd() )
def doIt(self, args):
selectList = OpenMaya.MSelectionList()
OpenMaya.MGlobal.getActiveSelectionList( selectList )
node = OpenMaya.MObject()
depFn = OpenMaya.MFnDependencyNode()
iter = OpenMaya.MItSelectionList(selectList)
while (iter.isDone() == 0):
iter.getDependNode( node )
depFn.setObject(node)
name = depFn.name()
types = []
OpenMaya.MGlobal.getFunctionSetList( node, types )
print "Name: %s" % name
print "Type: %s" % node.apiTypeStr()
sys.stdout.write( "Function Sets: " )
sys.stdout.write(", ".join(types) + '\n')
iter.next()
# Initialize the script plug-in
def initializePlugin(plugin):
pluginFn = OpenMayaMPx.MFnPlugin(plugin)
try:
pluginFn.registerCommand(
WhatIsCmd.kPluginCmdName, WhatIsCmd.cmdCreator
)
except:
sys.stderr.write(
"Failed to register command: %s\n" % WhatIsCmd.kPluginCmdName
)
raise
# Uninitialize the script plug-in
def uninitializePlugin(plugin):
pluginFn = OpenMayaMPx.MFnPlugin(plugin)
try:
pluginFn.deregisterCommand(WhatIsCmd.kPluginCmdName)
except:
sys.stderr.write(
"Failed to unregister command: %s\n" % WhatIsCmd.kPluginCmdName
)
raise
#-
# ==========================================================================
# Copyright (C) 2011 Autodesk, Inc. and/or its licensors. All
# rights reserved.
#
# The coded instructions, statements, computer programs, and/or related
# material (collectively the "Data") in these files contain unpublished
# information proprietary to Autodesk, Inc. ("Autodesk") and/or its
# licensors, which is protected by U.S. and Canadian federal copyright
# law and by international treaties.
#
# The Data is provided for use exclusively by You. You have the right
# to use, modify, and incorporate this Data into other products for
# purposes authorized by the Autodesk software license agreement,
# without fee.
#
# The copyright notices in the Software and this entire statement,
# including the above license grant, this restriction and the
# following disclaimer, must be included in all copies of the
# Software, in whole or in part, and all derivative works of
# the Software, unless such copies or derivative works are solely
# in the form of machine-executable object code generated by a
# source language processor.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND.
# AUTODESK DOES NOT MAKE AND HEREBY DISCLAIMS ANY EXPRESS OR IMPLIED
# WARRANTIES INCLUDING, BUT NOT LIMITED TO, THE WARRANTIES OF
# NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR
# PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE, OR
# TRADE PRACTICE. IN NO EVENT WILL AUTODESK AND/OR ITS LICENSORS
# BE LIABLE FOR ANY LOST REVENUES, DATA, OR PROFITS, OR SPECIAL,
# DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES, EVEN IF AUTODESK
# AND/OR ITS LICENSORS HAS BEEN ADVISED OF THE POSSIBILITY
# OR PROBABILITY OF SUCH DAMAGES.
#
# ==========================================================================
#+
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.