hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
01ce6d6100e46a2adfec35c9179bd9156caaa1f9
| 2,133 |
py
|
Python
|
fixture/user.py
|
planofmind/python_training
|
3b8f9a44b5760e3b9b18abc2b8f1441fff982b43
|
[
"Apache-2.0"
] | null | null | null |
fixture/user.py
|
planofmind/python_training
|
3b8f9a44b5760e3b9b18abc2b8f1441fff982b43
|
[
"Apache-2.0"
] | null | null | null |
fixture/user.py
|
planofmind/python_training
|
3b8f9a44b5760e3b9b18abc2b8f1441fff982b43
|
[
"Apache-2.0"
] | null | null | null |
from selenium.webdriver.support.select import Select
class UserHelper:
def __init__(self, app):
self.app = app
def create(self, user):
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
wd.find_element_by_name("firstname").send_keys(user.firstname)
wd.find_element_by_name("middlename").send_keys(user.middlename)
wd.find_element_by_name("lastname").send_keys(user.lastname)
wd.find_element_by_name("nickname").send_keys(user.nickname)
wd.find_element_by_name("title").send_keys(user.title)
wd.find_element_by_name("company").send_keys(user.company)
wd.find_element_by_name("address").send_keys(user.address)
wd.find_element_by_name("home").send_keys(user.home_phone)
wd.find_element_by_name("mobile").send_keys(user.mobile_phone)
wd.find_element_by_name("work").send_keys(user.work_phone)
wd.find_element_by_name("fax").send_keys(user.fax)
wd.find_element_by_name("email").send_keys(user.email)
wd.find_element_by_name("email2").send_keys(user.email2)
wd.find_element_by_name("email3").send_keys(user.email3)
wd.find_element_by_name("homepage").send_keys(user.homepage)
Select(wd.find_element_by_name("bday")).select_by_visible_text(user.bday)
Select(wd.find_element_by_name("bmonth")).select_by_visible_text(user.bmonth)
wd.find_element_by_name("byear").send_keys(user.byear)
Select(wd.find_element_by_name("aday")).select_by_visible_text(user.aday)
Select(wd.find_element_by_name("amonth")).select_by_visible_text(user.amonth)
wd.find_element_by_name("ayear").send_keys(user.ayear)
wd.find_element_by_name("address2").send_keys(user.address2)
wd.find_element_by_name("phone2").send_keys(user.phone2)
wd.find_element_by_name("notes").send_keys(user.notes)
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.return_to_home_page()
def return_to_home_page(self):
wd = self.app.wd
wd.find_element_by_link_text("home page").click()
| 52.02439 | 85 | 0.7173 |
99aed854872f094624965bf09910d0b6c855543c
| 63 |
py
|
Python
|
test/fixtures/python/corpus/list-comprehension.B.py
|
matsubara0507/semantic
|
67899f701abc0f1f0cb4374d8d3c249afc33a272
|
[
"MIT"
] | 8,844 |
2019-05-31T15:47:12.000Z
|
2022-03-31T18:33:51.000Z
|
test/fixtures/python/corpus/list-comprehension.B.py
|
matsubara0507/semantic
|
67899f701abc0f1f0cb4374d8d3c249afc33a272
|
[
"MIT"
] | 401 |
2019-05-31T18:30:26.000Z
|
2022-03-31T16:32:29.000Z
|
test/fixtures/python/corpus/list-comprehension.B.py
|
matsubara0507/semantic
|
67899f701abc0f1f0cb4374d8d3c249afc33a272
|
[
"MIT"
] | 504 |
2019-05-31T17:55:03.000Z
|
2022-03-30T04:15:04.000Z
|
[ f for e in d if f() for g in h if g() ]
[ c + 1 for b in a ]
| 21 | 41 | 0.47619 |
9232384305ab33d604e55b1c7a1051885431894c
| 24,337 |
py
|
Python
|
src/oci/data_integration/models/update_task_from_rest_task.py
|
pabs3/oci-python-sdk
|
437ba18ce39af2d1090e277c4bb8750c89f83021
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/data_integration/models/update_task_from_rest_task.py
|
pabs3/oci-python-sdk
|
437ba18ce39af2d1090e277c4bb8750c89f83021
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/data_integration/models/update_task_from_rest_task.py
|
pabs3/oci-python-sdk
|
437ba18ce39af2d1090e277c4bb8750c89f83021
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .update_task_details import UpdateTaskDetails
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateTaskFromRestTask(UpdateTaskDetails):
"""
The information about the Generic REST task. The endpoint and cancelEndpoint properties are deprecated, use the properties executeRestCallConfig, cancelRestCallConfig and pollRestCallConfig for execute, cancel and polling of the calls.
"""
#: A constant which can be used with the method_type property of a UpdateTaskFromRestTask.
#: This constant has a value of "GET"
METHOD_TYPE_GET = "GET"
#: A constant which can be used with the method_type property of a UpdateTaskFromRestTask.
#: This constant has a value of "POST"
METHOD_TYPE_POST = "POST"
#: A constant which can be used with the method_type property of a UpdateTaskFromRestTask.
#: This constant has a value of "PATCH"
METHOD_TYPE_PATCH = "PATCH"
#: A constant which can be used with the method_type property of a UpdateTaskFromRestTask.
#: This constant has a value of "DELETE"
METHOD_TYPE_DELETE = "DELETE"
#: A constant which can be used with the method_type property of a UpdateTaskFromRestTask.
#: This constant has a value of "PUT"
METHOD_TYPE_PUT = "PUT"
#: A constant which can be used with the api_call_mode property of a UpdateTaskFromRestTask.
#: This constant has a value of "SYNCHRONOUS"
API_CALL_MODE_SYNCHRONOUS = "SYNCHRONOUS"
#: A constant which can be used with the api_call_mode property of a UpdateTaskFromRestTask.
#: This constant has a value of "ASYNC_OCI_WORKREQUEST"
API_CALL_MODE_ASYNC_OCI_WORKREQUEST = "ASYNC_OCI_WORKREQUEST"
#: A constant which can be used with the api_call_mode property of a UpdateTaskFromRestTask.
#: This constant has a value of "ASYNC_GENERIC"
API_CALL_MODE_ASYNC_GENERIC = "ASYNC_GENERIC"
#: A constant which can be used with the cancel_method_type property of a UpdateTaskFromRestTask.
#: This constant has a value of "GET"
CANCEL_METHOD_TYPE_GET = "GET"
#: A constant which can be used with the cancel_method_type property of a UpdateTaskFromRestTask.
#: This constant has a value of "POST"
CANCEL_METHOD_TYPE_POST = "POST"
#: A constant which can be used with the cancel_method_type property of a UpdateTaskFromRestTask.
#: This constant has a value of "PATCH"
CANCEL_METHOD_TYPE_PATCH = "PATCH"
#: A constant which can be used with the cancel_method_type property of a UpdateTaskFromRestTask.
#: This constant has a value of "DELETE"
CANCEL_METHOD_TYPE_DELETE = "DELETE"
#: A constant which can be used with the cancel_method_type property of a UpdateTaskFromRestTask.
#: This constant has a value of "PUT"
CANCEL_METHOD_TYPE_PUT = "PUT"
def __init__(self, **kwargs):
"""
Initializes a new UpdateTaskFromRestTask object with values from keyword arguments. The default value of the :py:attr:`~oci.data_integration.models.UpdateTaskFromRestTask.model_type` attribute
of this class is ``REST_TASK`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param model_type:
The value to assign to the model_type property of this UpdateTaskFromRestTask.
Allowed values for this property are: "INTEGRATION_TASK", "DATA_LOADER_TASK", "PIPELINE_TASK", "SQL_TASK", "OCI_DATAFLOW_TASK", "REST_TASK"
:type model_type: str
:param key:
The value to assign to the key property of this UpdateTaskFromRestTask.
:type key: str
:param model_version:
The value to assign to the model_version property of this UpdateTaskFromRestTask.
:type model_version: str
:param parent_ref:
The value to assign to the parent_ref property of this UpdateTaskFromRestTask.
:type parent_ref: oci.data_integration.models.ParentReference
:param name:
The value to assign to the name property of this UpdateTaskFromRestTask.
:type name: str
:param description:
The value to assign to the description property of this UpdateTaskFromRestTask.
:type description: str
:param object_status:
The value to assign to the object_status property of this UpdateTaskFromRestTask.
:type object_status: int
:param object_version:
The value to assign to the object_version property of this UpdateTaskFromRestTask.
:type object_version: int
:param identifier:
The value to assign to the identifier property of this UpdateTaskFromRestTask.
:type identifier: str
:param input_ports:
The value to assign to the input_ports property of this UpdateTaskFromRestTask.
:type input_ports: list[oci.data_integration.models.InputPort]
:param output_ports:
The value to assign to the output_ports property of this UpdateTaskFromRestTask.
:type output_ports: list[oci.data_integration.models.OutputPort]
:param parameters:
The value to assign to the parameters property of this UpdateTaskFromRestTask.
:type parameters: list[oci.data_integration.models.Parameter]
:param op_config_values:
The value to assign to the op_config_values property of this UpdateTaskFromRestTask.
:type op_config_values: oci.data_integration.models.ConfigValues
:param config_provider_delegate:
The value to assign to the config_provider_delegate property of this UpdateTaskFromRestTask.
:type config_provider_delegate: oci.data_integration.models.ConfigProvider
:param registry_metadata:
The value to assign to the registry_metadata property of this UpdateTaskFromRestTask.
:type registry_metadata: oci.data_integration.models.RegistryMetadata
:param auth_details:
The value to assign to the auth_details property of this UpdateTaskFromRestTask.
:type auth_details: oci.data_integration.models.AuthDetails
:param auth_config:
The value to assign to the auth_config property of this UpdateTaskFromRestTask.
:type auth_config: oci.data_integration.models.AuthConfig
:param endpoint:
The value to assign to the endpoint property of this UpdateTaskFromRestTask.
:type endpoint: oci.data_integration.models.Expression
:param method_type:
The value to assign to the method_type property of this UpdateTaskFromRestTask.
Allowed values for this property are: "GET", "POST", "PATCH", "DELETE", "PUT"
:type method_type: str
:param headers:
The value to assign to the headers property of this UpdateTaskFromRestTask.
:type headers: object
:param additional_properties:
The value to assign to the additional_properties property of this UpdateTaskFromRestTask.
:type additional_properties: str
:param json_data:
The value to assign to the json_data property of this UpdateTaskFromRestTask.
:type json_data: str
:param api_call_mode:
The value to assign to the api_call_mode property of this UpdateTaskFromRestTask.
Allowed values for this property are: "SYNCHRONOUS", "ASYNC_OCI_WORKREQUEST", "ASYNC_GENERIC"
:type api_call_mode: str
:param cancel_endpoint:
The value to assign to the cancel_endpoint property of this UpdateTaskFromRestTask.
:type cancel_endpoint: oci.data_integration.models.Expression
:param cancel_method_type:
The value to assign to the cancel_method_type property of this UpdateTaskFromRestTask.
Allowed values for this property are: "GET", "POST", "PATCH", "DELETE", "PUT"
:type cancel_method_type: str
:param execute_rest_call_config:
The value to assign to the execute_rest_call_config property of this UpdateTaskFromRestTask.
:type execute_rest_call_config: oci.data_integration.models.ExecuteRestCallConfig
:param cancel_rest_call_config:
The value to assign to the cancel_rest_call_config property of this UpdateTaskFromRestTask.
:type cancel_rest_call_config: oci.data_integration.models.CancelRestCallConfig
:param poll_rest_call_config:
The value to assign to the poll_rest_call_config property of this UpdateTaskFromRestTask.
:type poll_rest_call_config: oci.data_integration.models.PollRestCallConfig
:param typed_expressions:
The value to assign to the typed_expressions property of this UpdateTaskFromRestTask.
:type typed_expressions: list[oci.data_integration.models.TypedExpression]
"""
self.swagger_types = {
'model_type': 'str',
'key': 'str',
'model_version': 'str',
'parent_ref': 'ParentReference',
'name': 'str',
'description': 'str',
'object_status': 'int',
'object_version': 'int',
'identifier': 'str',
'input_ports': 'list[InputPort]',
'output_ports': 'list[OutputPort]',
'parameters': 'list[Parameter]',
'op_config_values': 'ConfigValues',
'config_provider_delegate': 'ConfigProvider',
'registry_metadata': 'RegistryMetadata',
'auth_details': 'AuthDetails',
'auth_config': 'AuthConfig',
'endpoint': 'Expression',
'method_type': 'str',
'headers': 'object',
'additional_properties': 'str',
'json_data': 'str',
'api_call_mode': 'str',
'cancel_endpoint': 'Expression',
'cancel_method_type': 'str',
'execute_rest_call_config': 'ExecuteRestCallConfig',
'cancel_rest_call_config': 'CancelRestCallConfig',
'poll_rest_call_config': 'PollRestCallConfig',
'typed_expressions': 'list[TypedExpression]'
}
self.attribute_map = {
'model_type': 'modelType',
'key': 'key',
'model_version': 'modelVersion',
'parent_ref': 'parentRef',
'name': 'name',
'description': 'description',
'object_status': 'objectStatus',
'object_version': 'objectVersion',
'identifier': 'identifier',
'input_ports': 'inputPorts',
'output_ports': 'outputPorts',
'parameters': 'parameters',
'op_config_values': 'opConfigValues',
'config_provider_delegate': 'configProviderDelegate',
'registry_metadata': 'registryMetadata',
'auth_details': 'authDetails',
'auth_config': 'authConfig',
'endpoint': 'endpoint',
'method_type': 'methodType',
'headers': 'headers',
'additional_properties': 'additionalProperties',
'json_data': 'jsonData',
'api_call_mode': 'apiCallMode',
'cancel_endpoint': 'cancelEndpoint',
'cancel_method_type': 'cancelMethodType',
'execute_rest_call_config': 'executeRestCallConfig',
'cancel_rest_call_config': 'cancelRestCallConfig',
'poll_rest_call_config': 'pollRestCallConfig',
'typed_expressions': 'typedExpressions'
}
self._model_type = None
self._key = None
self._model_version = None
self._parent_ref = None
self._name = None
self._description = None
self._object_status = None
self._object_version = None
self._identifier = None
self._input_ports = None
self._output_ports = None
self._parameters = None
self._op_config_values = None
self._config_provider_delegate = None
self._registry_metadata = None
self._auth_details = None
self._auth_config = None
self._endpoint = None
self._method_type = None
self._headers = None
self._additional_properties = None
self._json_data = None
self._api_call_mode = None
self._cancel_endpoint = None
self._cancel_method_type = None
self._execute_rest_call_config = None
self._cancel_rest_call_config = None
self._poll_rest_call_config = None
self._typed_expressions = None
self._model_type = 'REST_TASK'
@property
def auth_details(self):
"""
Gets the auth_details of this UpdateTaskFromRestTask.
:return: The auth_details of this UpdateTaskFromRestTask.
:rtype: oci.data_integration.models.AuthDetails
"""
return self._auth_details
@auth_details.setter
def auth_details(self, auth_details):
"""
Sets the auth_details of this UpdateTaskFromRestTask.
:param auth_details: The auth_details of this UpdateTaskFromRestTask.
:type: oci.data_integration.models.AuthDetails
"""
self._auth_details = auth_details
@property
def auth_config(self):
"""
Gets the auth_config of this UpdateTaskFromRestTask.
:return: The auth_config of this UpdateTaskFromRestTask.
:rtype: oci.data_integration.models.AuthConfig
"""
return self._auth_config
@auth_config.setter
def auth_config(self, auth_config):
"""
Sets the auth_config of this UpdateTaskFromRestTask.
:param auth_config: The auth_config of this UpdateTaskFromRestTask.
:type: oci.data_integration.models.AuthConfig
"""
self._auth_config = auth_config
@property
def endpoint(self):
"""
Gets the endpoint of this UpdateTaskFromRestTask.
:return: The endpoint of this UpdateTaskFromRestTask.
:rtype: oci.data_integration.models.Expression
"""
return self._endpoint
@endpoint.setter
def endpoint(self, endpoint):
"""
Sets the endpoint of this UpdateTaskFromRestTask.
:param endpoint: The endpoint of this UpdateTaskFromRestTask.
:type: oci.data_integration.models.Expression
"""
self._endpoint = endpoint
@property
def method_type(self):
"""
Gets the method_type of this UpdateTaskFromRestTask.
The REST method to use. This property is deprecated, use ExecuteRestCallConfig's methodType property instead.
Allowed values for this property are: "GET", "POST", "PATCH", "DELETE", "PUT"
:return: The method_type of this UpdateTaskFromRestTask.
:rtype: str
"""
return self._method_type
@method_type.setter
def method_type(self, method_type):
"""
Sets the method_type of this UpdateTaskFromRestTask.
The REST method to use. This property is deprecated, use ExecuteRestCallConfig's methodType property instead.
:param method_type: The method_type of this UpdateTaskFromRestTask.
:type: str
"""
allowed_values = ["GET", "POST", "PATCH", "DELETE", "PUT"]
if not value_allowed_none_or_none_sentinel(method_type, allowed_values):
raise ValueError(
"Invalid value for `method_type`, must be None or one of {0}"
.format(allowed_values)
)
self._method_type = method_type
@property
def headers(self):
"""
Gets the headers of this UpdateTaskFromRestTask.
:return: The headers of this UpdateTaskFromRestTask.
:rtype: object
"""
return self._headers
@headers.setter
def headers(self, headers):
"""
Sets the headers of this UpdateTaskFromRestTask.
:param headers: The headers of this UpdateTaskFromRestTask.
:type: object
"""
self._headers = headers
@property
def additional_properties(self):
"""
Gets the additional_properties of this UpdateTaskFromRestTask.
Header value.
:return: The additional_properties of this UpdateTaskFromRestTask.
:rtype: str
"""
return self._additional_properties
@additional_properties.setter
def additional_properties(self, additional_properties):
"""
Sets the additional_properties of this UpdateTaskFromRestTask.
Header value.
:param additional_properties: The additional_properties of this UpdateTaskFromRestTask.
:type: str
"""
self._additional_properties = additional_properties
@property
def json_data(self):
"""
Gets the json_data of this UpdateTaskFromRestTask.
JSON data for payload body. This property is deprecated, use ExecuteRestCallConfig's payload config param instead.
:return: The json_data of this UpdateTaskFromRestTask.
:rtype: str
"""
return self._json_data
@json_data.setter
def json_data(self, json_data):
"""
Sets the json_data of this UpdateTaskFromRestTask.
JSON data for payload body. This property is deprecated, use ExecuteRestCallConfig's payload config param instead.
:param json_data: The json_data of this UpdateTaskFromRestTask.
:type: str
"""
self._json_data = json_data
@property
def api_call_mode(self):
"""
Gets the api_call_mode of this UpdateTaskFromRestTask.
The REST invocation pattern to use. ASYNC_OCI_WORKREQUEST is being deprecated as well as cancelEndpoint/MethodType.
Allowed values for this property are: "SYNCHRONOUS", "ASYNC_OCI_WORKREQUEST", "ASYNC_GENERIC"
:return: The api_call_mode of this UpdateTaskFromRestTask.
:rtype: str
"""
return self._api_call_mode
@api_call_mode.setter
def api_call_mode(self, api_call_mode):
"""
Sets the api_call_mode of this UpdateTaskFromRestTask.
The REST invocation pattern to use. ASYNC_OCI_WORKREQUEST is being deprecated as well as cancelEndpoint/MethodType.
:param api_call_mode: The api_call_mode of this UpdateTaskFromRestTask.
:type: str
"""
allowed_values = ["SYNCHRONOUS", "ASYNC_OCI_WORKREQUEST", "ASYNC_GENERIC"]
if not value_allowed_none_or_none_sentinel(api_call_mode, allowed_values):
raise ValueError(
"Invalid value for `api_call_mode`, must be None or one of {0}"
.format(allowed_values)
)
self._api_call_mode = api_call_mode
@property
def cancel_endpoint(self):
"""
Gets the cancel_endpoint of this UpdateTaskFromRestTask.
:return: The cancel_endpoint of this UpdateTaskFromRestTask.
:rtype: oci.data_integration.models.Expression
"""
return self._cancel_endpoint
@cancel_endpoint.setter
def cancel_endpoint(self, cancel_endpoint):
"""
Sets the cancel_endpoint of this UpdateTaskFromRestTask.
:param cancel_endpoint: The cancel_endpoint of this UpdateTaskFromRestTask.
:type: oci.data_integration.models.Expression
"""
self._cancel_endpoint = cancel_endpoint
@property
def cancel_method_type(self):
"""
Gets the cancel_method_type of this UpdateTaskFromRestTask.
The REST method to use for canceling the original request.
Allowed values for this property are: "GET", "POST", "PATCH", "DELETE", "PUT"
:return: The cancel_method_type of this UpdateTaskFromRestTask.
:rtype: str
"""
return self._cancel_method_type
@cancel_method_type.setter
def cancel_method_type(self, cancel_method_type):
"""
Sets the cancel_method_type of this UpdateTaskFromRestTask.
The REST method to use for canceling the original request.
:param cancel_method_type: The cancel_method_type of this UpdateTaskFromRestTask.
:type: str
"""
allowed_values = ["GET", "POST", "PATCH", "DELETE", "PUT"]
if not value_allowed_none_or_none_sentinel(cancel_method_type, allowed_values):
raise ValueError(
"Invalid value for `cancel_method_type`, must be None or one of {0}"
.format(allowed_values)
)
self._cancel_method_type = cancel_method_type
@property
def execute_rest_call_config(self):
"""
Gets the execute_rest_call_config of this UpdateTaskFromRestTask.
:return: The execute_rest_call_config of this UpdateTaskFromRestTask.
:rtype: oci.data_integration.models.ExecuteRestCallConfig
"""
return self._execute_rest_call_config
@execute_rest_call_config.setter
def execute_rest_call_config(self, execute_rest_call_config):
"""
Sets the execute_rest_call_config of this UpdateTaskFromRestTask.
:param execute_rest_call_config: The execute_rest_call_config of this UpdateTaskFromRestTask.
:type: oci.data_integration.models.ExecuteRestCallConfig
"""
self._execute_rest_call_config = execute_rest_call_config
@property
def cancel_rest_call_config(self):
"""
Gets the cancel_rest_call_config of this UpdateTaskFromRestTask.
:return: The cancel_rest_call_config of this UpdateTaskFromRestTask.
:rtype: oci.data_integration.models.CancelRestCallConfig
"""
return self._cancel_rest_call_config
@cancel_rest_call_config.setter
def cancel_rest_call_config(self, cancel_rest_call_config):
"""
Sets the cancel_rest_call_config of this UpdateTaskFromRestTask.
:param cancel_rest_call_config: The cancel_rest_call_config of this UpdateTaskFromRestTask.
:type: oci.data_integration.models.CancelRestCallConfig
"""
self._cancel_rest_call_config = cancel_rest_call_config
@property
def poll_rest_call_config(self):
"""
Gets the poll_rest_call_config of this UpdateTaskFromRestTask.
:return: The poll_rest_call_config of this UpdateTaskFromRestTask.
:rtype: oci.data_integration.models.PollRestCallConfig
"""
return self._poll_rest_call_config
@poll_rest_call_config.setter
def poll_rest_call_config(self, poll_rest_call_config):
"""
Sets the poll_rest_call_config of this UpdateTaskFromRestTask.
:param poll_rest_call_config: The poll_rest_call_config of this UpdateTaskFromRestTask.
:type: oci.data_integration.models.PollRestCallConfig
"""
self._poll_rest_call_config = poll_rest_call_config
@property
def typed_expressions(self):
"""
Gets the typed_expressions of this UpdateTaskFromRestTask.
List of typed expressions.
:return: The typed_expressions of this UpdateTaskFromRestTask.
:rtype: list[oci.data_integration.models.TypedExpression]
"""
return self._typed_expressions
@typed_expressions.setter
def typed_expressions(self, typed_expressions):
"""
Sets the typed_expressions of this UpdateTaskFromRestTask.
List of typed expressions.
:param typed_expressions: The typed_expressions of this UpdateTaskFromRestTask.
:type: list[oci.data_integration.models.TypedExpression]
"""
self._typed_expressions = typed_expressions
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 38.691574 | 245 | 0.677692 |
dd7328ee221733720f8504161cc6fb9069ac8d5c
| 7,680 |
py
|
Python
|
h/presenters/document_html.py
|
julien-cheng/h
|
36c8ec044725720cf36f0986cdf025395aca8929
|
[
"BSD-2-Clause"
] | 2 |
2019-08-04T07:22:11.000Z
|
2020-07-17T05:01:41.000Z
|
h/presenters/document_html.py
|
11-eleven-11/h
|
91c7a4504ad7471ed3e30246763a03e6c1cc531b
|
[
"BSD-2-Clause"
] | null | null | null |
h/presenters/document_html.py
|
11-eleven-11/h
|
91c7a4504ad7471ed3e30246763a03e6c1cc531b
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import jinja2
from h._compat import text_type, urlparse, url_unquote
class DocumentHTMLPresenter(object):
"""Wraps Document model objects and adds some HTML properties."""
def __init__(self, document):
self.document = document
@property
def filename(self):
"""
Return the filename of this document, or ''.
If the document's URI is a file:// URI then return the filename part
of it, otherwise return ''.
The filename is escaped and safe to be rendered.
If it contains escaped characters then the filename will be a
Markup object so it won't be double-escaped.
"""
if self.uri.lower().startswith("file:///"):
return jinja2.escape(self.uri.split("/")[-1])
else:
return ""
@property
def href(self):
"""
Return an href for this document, or ''.
Returns a value suitable for use as the value of the href attribute in
an <a> element in an HTML document.
Returns an empty string if the document doesn't have an http(s):// URI.
The href is escaped and safe to be rendered.
If it contains escaped characters the returned value will be a
Markup object so that it doesn't get double-escaped.
"""
if self.document.web_uri:
return jinja2.escape(self.document.web_uri)
else:
return ""
@property
def hostname_or_filename(self):
"""
Return the hostname or filename of this document.
Returns the hostname part of the document's URI, e.g.
'www.example.com' for 'http://www.example.com/example.html'.
If the URI is a file:// URI then return the filename part of it
instead.
The returned hostname or filename is escaped and safe to be rendered.
If it contains escaped characters the returned value will be a Markup
object so that it doesn't get double-escaped.
"""
if self.filename:
return jinja2.escape(url_unquote(self.filename))
else:
hostname = urlparse.urlparse(self.uri).hostname
# urlparse()'s .hostname is sometimes None.
hostname = hostname or ""
return jinja2.escape(hostname)
@property
def link(self):
"""
Return a link to this document.
Returns HTML strings like:
<a href="{href}" title="{title}">{link_text}</a> {hostname}
<em>Local file:</em> {title}<br>{hostname}
where:
- {href} is the uri of the document, if it has an http(s):// uri
- {title} is the title of the document.
If the document has no title then its uri will be used instead.
If it's a local file:// uri then only the filename part is used,
not the full path.
- {link_text} is the same as {title}, but truncated with … if
it's too long
- {hostname} is the hostname name of the document's uri without
the scheme (http(s)://) and www parts, e.g. 'example.com'.
If it's a local file:// uri then the filename is used as the
hostname.
If the hostname is too long it is truncated with ….
The {hostname} part will be missing if it wouldn't be any different
from the {link_text} part.
The href="{href}" will be missing if there's no http(s) uri to link to
for this annotation's document.
User-supplied values are escaped so the string is safe for raw
rendering (the returned string is actually a Markup object and
won't be escaped by Jinja2 when rendering).
"""
return _format_document_link(
self.href, self.title, self.link_text, self.hostname_or_filename
)
@property
def link_text(self):
"""
Return some link text for this document.
Return a text representation of this document suitable for use as the
link text in a link like <a ...>{link_text}</a>.
Returns the document's title if it has one, or failing that uses part
of the document's URI if it has one.
The link text is escaped and safe for rendering.
If it contains escaped characters the returned value will be a
Markup object so it doesn't get double-escaped.
"""
title = jinja2.escape(self.title)
# Sometimes self.title is the annotated document's URI (if the document
# has no title). In those cases we want to remove the http(s):// from
# the front and unquote it for link text.
lower = title.lower()
if lower.startswith("http://") or lower.startswith("https://"):
parts = urlparse.urlparse(title)
return url_unquote(parts.netloc + parts.path)
else:
return title
@property
def title(self):
"""
Return a title for this document.
Return the document's title or if the document has no title then return
its filename (if it's a file:// URI) or its URI for non-file URIs.
The title is escaped and safe to be rendered.
If it contains escaped characters then the title will be a
Markup object, so that it won't be double-escaped.
"""
title = self.document.title
if title:
# Convert non-string titles into strings.
# We're assuming that title cannot be a byte string.
title = text_type(title)
return jinja2.escape(title)
if self.filename:
return jinja2.escape(url_unquote(self.filename))
else:
return jinja2.escape(url_unquote(self.uri))
@property
def uri(self):
if self.document.document_uris:
return jinja2.escape(self.document.document_uris[0].uri)
return ""
@property
def web_uri(self):
via_prefix = "https://via.hypothes.is/"
web_uri = self.document.web_uri
if web_uri and web_uri != via_prefix and web_uri.startswith(via_prefix):
web_uri = web_uri[len(via_prefix) :]
return web_uri
def _format_document_link(href, title, link_text, host_or_filename):
"""Return a document link for the given components.
Helper function for the .document_link property below.
:returns: A document link as an HTML string, escaped and safe for
rendering. The returned string is a Markup object so that it won't be
double-escaped.
"""
if href and host_or_filename and host_or_filename in link_text:
host_or_filename = ""
elif not href and title == host_or_filename:
title = ""
def truncate(content, length=55):
"""Truncate the given string to at most length chars."""
if len(content) <= length:
return content
else:
return content[:length] + jinja2.Markup("…")
host_or_filename = truncate(host_or_filename)
link_text = truncate(link_text)
if href and host_or_filename:
link = '<a href="{href}" title="{title}">{link_text}</a><br>{host_or_filename}'
elif href:
link = '<a href="{href}" title="{title}">{link_text}</a>'
else:
link = "<em>Local file:</em> {title}"
if host_or_filename:
link += "<br>{host_or_filename}"
link = link.format(
href=jinja2.escape(href),
title=jinja2.escape(title),
link_text=jinja2.escape(link_text),
host_or_filename=jinja2.escape(host_or_filename),
)
return jinja2.Markup(link)
| 32.133891 | 87 | 0.61862 |
6ad185d08e311da32bd614a9c3f19eac735d45dc
| 17,672 |
py
|
Python
|
jupyter_pyfilesystem/contents.py
|
manics/jupyter-pyfilesystem
|
b5124aa894e075b5f3954ea80ea0a5e8691ce5cc
|
[
"MIT"
] | 5 |
2020-01-29T19:40:52.000Z
|
2020-05-20T07:19:54.000Z
|
jupyter_pyfilesystem/contents.py
|
manics/jupyter-pyfilesystem
|
b5124aa894e075b5f3954ea80ea0a5e8691ce5cc
|
[
"MIT"
] | null | null | null |
jupyter_pyfilesystem/contents.py
|
manics/jupyter-pyfilesystem
|
b5124aa894e075b5f3954ea80ea0a5e8691ce5cc
|
[
"MIT"
] | null | null | null |
from notebook.services.contents.manager import ContentsManager
from notebook.services.contents.checkpoints import (
Checkpoints,
GenericCheckpointsMixin,
)
from traitlets import (
Bool,
default,
Instance,
Int,
TraitError,
Unicode,
)
from traitlets.config.configurable import LoggingConfigurable
from tornado.ioloop import PeriodicCallback
from tornado.web import HTTPError
from base64 import (
b64encode,
b64decode,
)
import atexit
from datetime import datetime
from functools import wraps
import mimetypes
import nbformat
import re
from fs import open_fs
from fs.base import FS
from fs.errors import (
DestinationExists,
IllegalBackReference,
ResourceNotFound,
ResourceReadOnly,
)
import fs.path as fspath
# https://github.com/quantopian/pgcontents/blob/5fad3f6840d82e6acde97f8e3abe835765fa824b/pgcontents/api_utils.py#L25
def _base_model(dirname, name):
return {
'name': name,
'path': (dirname + '/' + name).strip('/'),
'writable': True,
'last_modified': None,
'created': None,
'content': None,
'format': None,
'mimetype': None,
'size': 0,
'type': None,
}
DEFAULT_CREATED_DATE = datetime.utcfromtimestamp(0)
def _created_modified(details):
created = details.created or details.modified or DEFAULT_CREATED_DATE
modified = details.modified or details.created or DEFAULT_CREATED_DATE
return created, modified
def wrap_fs_errors(type=None):
"""
Decorator to convert fs.errors into HTTPErrors.
Wrapped method must have arguments `self` and `path`
as the first two arguments
"""
def wrap_fs_errors_with_type(func):
@wraps(func)
def check(self, path, *args, **kwargs):
t = (type + ' ') if type else ''
try:
return func(self, path, *args, **kwargs)
except (ResourceNotFound, IllegalBackReference) as e:
self.log.debug('Caught exception: %s', e)
raise HTTPError(404, '{}"{}" not found: {}'.format(t, path, e))
except DestinationExists as e:
self.log.debug('Caught exception: {}'.format(e))
raise HTTPError(409, '{}"{}" conflicts: {}'.format(t, path, e))
except ResourceReadOnly as e:
self.log.debug('Caught exception: %s', e)
raise HTTPError(409, '{}"{}" is read-only: {}'.format(
t, path, e))
return check
return wrap_fs_errors_with_type
class FilesystemHandle(LoggingConfigurable):
def __init__(self, fs_url, *, create, writeable, closeonexit, keepalive):
m = re.match(r'^([a-z][a-z0-9+\-.]*)://', fs_url)
if not m:
raise TraitError('Invalid fs_url: {}'.format(fs_url))
self.fs_url = fs_url
self.fsname = m.group()
self.log.debug('Opening filesystem %s', fs_url)
self.fs = open_fs(self.fs_url, writeable=writeable, create=create)
self.log.info('Opened filesystem %s', self.fsname)
self.keepalive_cb = None
if keepalive:
self.enable_keepalive(keepalive)
if closeonexit:
self.register_atexit()
def close(self):
self.log.debug('Closing filesystem %s', self.fs_url)
self.enable_keepalive(0)
self.fs.close()
self.log.info('Closed filesystem %s', self.fsname)
def keepalive(self):
d = self.fs.getdetails('/')
self.log.debug('keepalive: %s', d)
def enable_keepalive(self, interval):
self.log.debug('enable_keepalive(%s)', interval)
if self.keepalive_cb:
self.keepalive_cb.stop()
self.keepalive_cb = None
if interval > 0:
self.keepalive_cb = PeriodicCallback(
self.keepalive, interval * 1000)
self.keepalive_cb.start()
def register_atexit(self):
atexit.register(self.close)
class FsContentsManager(ContentsManager):
"""
https://jupyter-notebook.readthedocs.io/en/stable/extending/contents.html
https://github.com/jupyter/notebook/blob/6.0.1/notebook/services/contents/manager.py
https://github.com/jupyter/notebook/blob/6.0.1/notebook/services/contents/filemanager.py
https://github.com/quantopian/pgcontents/blob/master/pgcontents/pgmanager.py
"""
fs = Instance(FS)
@default('fs')
def _fs_default(self):
instance = FilesystemHandle(
self.fs_url, create=self.create, writeable=self.writeable,
closeonexit=self.closeonexit, keepalive=self.keepalive)
assert instance.fs_url == self.fs_url
return instance.fs
fs_url = Unicode(
allow_none=False,
help='FS URL',
config=True,
)
create = Bool(
default_value=True,
help='Create filesystem if necessary',
config=True,
)
writeable = Bool(
default_value=True,
help='Open filesystem for reading and writing',
config=True,
)
closeonexit = Bool(
default_value=True,
help='Register an atexit handler to close the filesystem',
config=True,
)
keepalive = Int(
default_value=0,
help='''Send keepalive at this interval (seconds), this might be needed
for remote filesystems''',
config=True,
)
@default('checkpoints_class')
def _checkpoints_class_default(self):
return FsCheckpoints
# https://github.com/quantopian/pgcontents/blob/5fad3f6840d82e6acde97f8e3abe835765fa824b/pgcontents/pgmanager.py#L115
def guess_type(self, path, allow_directory=True):
"""
Guess the type of a file.
If allow_directory is False, don't consider the possibility that the
file is a directory.
"""
if path.endswith('.ipynb'):
return 'notebook'
elif allow_directory and self.dir_exists(path):
return 'directory'
else:
return 'file'
def get(self, path, content=True, type=None, format=None):
self.log.debug('get(%s %s)', path, type)
if type is None:
type = self.guess_type(path)
try:
fn = {
'notebook': self._get_notebook,
'directory': self._get_directory,
'file': self._get_file,
}[type]
except KeyError:
raise ValueError("Unknown type passed: '{}'".format(type))
return fn(path=path, content=content, format=format, type=type)
@wrap_fs_errors('notebook')
def _get_notebook(self, path, content, format, *, type=None, trust=True):
self.log.debug('_get_notebook(%s)', path)
path = self.fs.validatepath(path)
model = self._get_file(path, content, format)
model['type'] = 'notebook'
if content:
nb = nbformat.reads(model['content'], as_version=4)
if trust:
self.mark_trusted_cells(nb, path)
model['content'] = nb
model['format'] = 'json'
if trust:
self.validate_notebook_model(model)
return model
@wrap_fs_errors('directory')
def _get_directory(self, path, content, format, *, type=None):
self.log.debug('_get_directory(%s)', path)
path = self.fs.validatepath(path)
d = self.fs.getdetails(path)
if not d.is_dir:
raise HTTPError(404, '"%s" not a directory', path)
model = _base_model(*fspath.split(path))
model['type'] = 'directory'
model['size'] = None
model['format'] = None
model['created'], model['last_modified'] = _created_modified(d)
if content:
model['content'] = []
model['format'] = 'json'
for item in self.fs.scandir(path, ['basic', 'details']):
child_path = fspath.join(path, item.name)
if item.is_dir:
model['content'].append(
self._get_directory(child_path, False, None))
if item.is_file:
model['content'].append(
self._get_file(child_path, False, format))
return model
@wrap_fs_errors('file')
def _get_file(self, path, content, format, *, type=None):
self.log.debug('_get_file(%s)', path)
path = self.fs.validatepath(path)
f = self.fs.getdetails(path)
if not f.is_file:
raise HTTPError(404, 'Not a file: {}'.format(path))
model = self._file_model(path, f, content, format)
if type:
model['type'] = type
return model
def _file_model(self, path, f, content, format):
model = _base_model(*fspath.split(path))
model['type'] = self.guess_type(path)
model['created'], model['last_modified'] = _created_modified(f)
model['size'] = f.size
if content:
model['content'], model['format'] = self._read_file(path, format)
model['mimetype'] = mimetypes.guess_type(model['path'])[0]
return model
@wrap_fs_errors('file')
def _read_file(self, path, format):
self.log.debug('_read_file(%s)', path)
"""
:param format:
- 'text': contents will be decoded as UTF-8.
- 'base64': raw bytes contents will be encoded as base64.
- None: try to decode as UTF-8, and fall back to base64
"""
with self.fs.openbin(path, 'r') as fo:
bcontent = fo.read()
if format is None or format == 'text':
try:
return bcontent.decode('utf8'), 'text'
except UnicodeError:
if format == 'text':
raise HTTPError(
400,
"{} is not UTF-8 encoded".format(path),
reason='bad format')
return b64encode(bcontent).decode('ascii'), 'base64'
def save(self, model, path):
self.log.debug('save(%s %s)', path, model['type'])
self.run_pre_save_hook(model=model, path=path)
if 'type' not in model or not model['type']:
raise HTTPError(400, 'No model type provided')
try:
fn = {
'notebook': self._save_notebook,
'directory': self._save_directory,
'file': self._save_file,
}[model['type']]
except KeyError:
raise ValueError("Unknown type passed: '{}'".format(type))
return fn(path, model)
@wrap_fs_errors('notebook')
def _save_notebook(self, path, model, sign=True):
self.log.debug('_save_notebook(%s)', path)
nb = nbformat.from_dict(model['content'])
if sign:
self.check_and_sign(nb, path)
model['content'] = nbformat.writes(nb)
model['format'] = 'text'
return self._save_file(path, model)
@wrap_fs_errors('directory')
def _save_directory(self, path, model):
self.log.debug('_save_directory(%s)', path)
self.fs.makedir(path, recreate=True)
model = self._get_directory(path, False, None)
return model
@wrap_fs_errors('file')
def _save_file(self, path, model):
self.log.debug('_save_file(%s)', path)
if 'content' not in model:
raise HTTPError(400, 'No file content provided')
if model.get('format') not in {'text', 'base64'}:
raise HTTPError(
400, "Format of file contents must be 'text' or 'base64'")
try:
if model['format'] == 'text':
bcontent = model['content'].encode('utf8')
else:
bcontent = b64decode(model['content'])
except Exception as e:
raise HTTPError(
400, 'Encoding error saving {}: {}'.format(model['path'], e))
with self.fs.openbin(path, 'w') as fo:
fo.write(bcontent)
return self._get_file(path, False, None)
@wrap_fs_errors('file')
def delete_file(self, path):
# TODO: This is also used to delete directories
self.log.debug('delete_file(%s)', path)
path = self.fs.validatepath(path)
if self.fs.isfile(path):
self.fs.remove(path)
elif self.fs.isdir(path):
self.fs.removedir(path)
else:
raise ResourceNotFound(path)
@wrap_fs_errors('file')
def rename_file(self, old_path, new_path):
self.log.debug('rename_file(%s %s)', old_path, new_path)
old_path = self.fs.validatepath(old_path)
new_path = self.fs.validatepath(new_path)
if old_path == '/':
raise HTTPError(409, 'Unable to rename root /')
if self.fs.isdir(old_path):
if self.fs.exists(new_path):
raise DestinationExists(new_path)
self.fs.movedir(old_path, new_path, create=True)
else:
self.fs.move(old_path, new_path)
@wrap_fs_errors(None)
def file_exists(self, path):
self.log.debug('file_exists(%s)', path)
path = self.fs.validatepath(path)
return self.fs.isfile(path)
@wrap_fs_errors(None)
def dir_exists(self, path):
self.log.debug('dir_exists(%s)', path)
path = self.fs.validatepath(path)
return self.fs.isdir(path)
@wrap_fs_errors(None)
def is_hidden(self, path):
self.log.debug('is_hidden(%s)', path)
path = self.fs.validatepath(path)
return fspath.basename(path).startswith('.')
# def _send_keep_alive(self):
# self.log.debug('Sending keepalive')
# self.conn.c.sf.keepAlive(None)
class FsCheckpoints(GenericCheckpointsMixin, Checkpoints):
checkpoint_dir = Unicode(
'.ipynb_checkpoints',
config=True,
help="""The directory name in which to keep file checkpoints
relative to the file's own directory""",
)
checkpoint_template = Unicode(
'{basename}-checkpoint{id}{ext}',
config=True,
help="""The prefix to add to checkpoint files.
`{basename}` is the filename with the extension, `{ext}` is the
extension including `.`, `{id}` will be replaced by the checkpoint id.
""",
)
def _checkpoint_path(self, checkpoint_id, path):
"""find the path to a checkpoint"""
path = self.parent.fs.validatepath(path)
parent, name = fspath.split(path)
basename, ext = fspath.splitext(name)
cp_path = fspath.join(
parent, self.checkpoint_dir, self.checkpoint_template.format(
basename=basename, id=checkpoint_id, ext=ext))
return cp_path
def _checkpoint_model(self, checkpoint_id, f):
"""construct the info dict for a given checkpoint"""
info = {'id': str(checkpoint_id)}
if isinstance(f, dict):
info['last_modified'] = f['last_modified']
else:
info['last_modified'] = f.modified
return info
def _ensure_checkpoint_dir(self, cp_path):
dirname, basename = fspath.split(cp_path)
if not self.parent.dir_exists(dirname):
self.parent._save_directory(dirname, None)
def create_file_checkpoint(self, content, format, path):
self.log.debug('create_file_checkpoint(%s)', path)
cp_path = self._checkpoint_path(0, path)
self._ensure_checkpoint_dir(cp_path)
model = _base_model(*fspath.split(cp_path))
model['content'] = content
model['format'] = format
f = self.parent._save_file(cp_path, model)
return self._checkpoint_model(0, f)
def create_notebook_checkpoint(self, nb, path):
self.log.debug('create_notebook_checkpoint(%s)', path)
cp_path = self._checkpoint_path(0, path)
self._ensure_checkpoint_dir(cp_path)
model = _base_model(*fspath.split(cp_path))
model['content'] = nb
f = self.parent._save_notebook(cp_path, model, False)
return self._checkpoint_model(0, f)
def get_file_checkpoint(self, checkpoint_id, path):
# -> {'type': 'file', 'content': <str>, 'format': {'text', 'base64'}}
self.log.debug('get_file_checkpoint(%s %s)', checkpoint_id, path)
cp_path = self._checkpoint_path(checkpoint_id, path)
return self.parent._get_file(cp_path, True, None)
def get_notebook_checkpoint(self, checkpoint_id, path):
# -> {'type': 'notebook', 'content': <output of nbformat.read>}
self.log.debug('get_notebook_checkpoint(%s %s)', checkpoint_id, path)
cp_path = self._checkpoint_path(checkpoint_id, path)
return self.parent._get_notebook(cp_path, True, 'text', trust=False)
def delete_checkpoint(self, checkpoint_id, path):
self.log.debug('delete_checkpoint(%s %s)', checkpoint_id, path)
cp_path = self._checkpoint_path(checkpoint_id, path)
self.parent.delete_file(cp_path)
def list_checkpoints(self, path):
self.log.debug('list_checkpoints(%s)', path)
cp_path = self._checkpoint_path(0, path)
if self.parent.file_exists(cp_path):
f = self.parent._get_file(cp_path, False, None)
return [self._checkpoint_model(0, f)]
return []
def rename_checkpoint(self, checkpoint_id, old_path, new_path):
self.log.debug(
'rename_checkpoint(%s %s %s)', checkpoint_id, old_path, new_path)
cp_path_old = self._checkpoint_path(checkpoint_id, old_path)
cp_path_new = self._checkpoint_path(checkpoint_id, new_path)
self._ensure_checkpoint_dir(cp_path_new)
self.parent.rename_file(cp_path_old, cp_path_new)
| 35.629032 | 121 | 0.605534 |
05587fa9096f3e1a1fad48f1ad0a31f5095a1d5a
| 8,905 |
py
|
Python
|
autotest/gdrivers/wcs.py
|
HongqiangWei/gdal
|
f7c427926438cc39d31e4459fa6401321f8e62f0
|
[
"MIT"
] | null | null | null |
autotest/gdrivers/wcs.py
|
HongqiangWei/gdal
|
f7c427926438cc39d31e4459fa6401321f8e62f0
|
[
"MIT"
] | null | null | null |
autotest/gdrivers/wcs.py
|
HongqiangWei/gdal
|
f7c427926438cc39d31e4459fa6401321f8e62f0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test WCS client support.
# Author: Frank Warmerdam <[email protected]>
#
###############################################################################
# Copyright (c) 2003, Frank Warmerdam <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
import string
import array
import gdal
sys.path.append( '../pymod' )
import gdaltest
###############################################################################
# Verify we have the driver.
def wcs_1():
# Disable wcs tests till we have a more reliable test server.
gdaltest.wcs_drv = None
try:
gdaltest.wcs_drv = gdal.GetDriverByName( 'WCS' )
except:
gdaltest.wcs_drv = None
# NOTE - mloskot:
# This is a dirty hack checking if remote WCS service is online.
# Nothing genuine but helps to keep the buildbot waterfall green.
srv = 'http://demo.opengeo.org/geoserver/wcs?'
if gdaltest.gdalurlopen(srv) is None:
gdaltest.wcs_drv = None
gdaltest.wcs_ds = None
if gdaltest.wcs_drv is None:
return 'skip'
else:
return 'success'
###############################################################################
# Open the GeoServer WCS service.
def wcs_2():
if gdaltest.wcs_drv is None:
return 'skip'
# first, copy to tmp directory.
open('tmp/geoserver.wcs','w').write(open('data/geoserver.wcs').read())
gdaltest.wcs_ds = None
gdaltest.wcs_ds = gdal.Open( 'tmp/geoserver.wcs' )
if gdaltest.wcs_ds is not None:
return 'success'
else:
gdaltest.post_reason( 'open failed.' )
return 'fail'
###############################################################################
# Check various things about the configuration.
def wcs_3():
if gdaltest.wcs_drv is None or gdaltest.wcs_ds is None:
return 'skip'
if gdaltest.wcs_ds.RasterXSize != 983 \
or gdaltest.wcs_ds.RasterYSize != 598 \
or gdaltest.wcs_ds.RasterCount != 3:
gdaltest.post_reason( 'wrong size or bands' )
print(gdaltest.wcs_ds.RasterXSize)
print(gdaltest.wcs_ds.RasterYSize)
print(gdaltest.wcs_ds.RasterCount)
return 'fail'
wkt = gdaltest.wcs_ds.GetProjectionRef()
if wkt[:14] != 'GEOGCS["WGS 84':
gdaltest.post_reason( 'Got wrong SRS: ' + wkt )
return 'fail'
gt = gdaltest.wcs_ds.GetGeoTransform()
expected_gt = (-130.85167999999999, 0.070036907426246159, 0.0, 54.114100000000001, 0.0, -0.055867725752508368)
for i in range(6):
if abs(gt[i]- expected_gt[i]) > 0.00001:
gdaltest.post_reason( 'wrong geotransform' )
print(gt)
return 'fail'
if gdaltest.wcs_ds.GetRasterBand(1).GetOverviewCount() < 1:
gdaltest.post_reason( 'no overviews!' )
return 'fail'
if gdaltest.wcs_ds.GetRasterBand(1).DataType != gdal.GDT_Byte:
gdaltest.post_reason( 'wrong band data type' )
return 'fail'
return 'success'
###############################################################################
# Check checksum
def wcs_4():
if gdaltest.wcs_drv is None or gdaltest.wcs_ds is None:
return 'skip'
cs = gdaltest.wcs_ds.GetRasterBand(1).Checksum()
if cs != 58765:
gdaltest.post_reason( 'Wrong checksum: ' + str(cs) )
return 'fail'
return 'success'
###############################################################################
# Open the service using XML as filename.
def wcs_5():
if gdaltest.wcs_drv is None:
return 'skip'
fn = """<WCS_GDAL>
<ServiceURL>http://demo.opengeo.org/geoserver/wcs?</ServiceURL>
<CoverageName>Img_Sample</CoverageName>
</WCS_GDAL>
"""
ds = gdal.Open( fn )
if ds is None:
gdaltest.post_reason( 'open failed.' )
return 'fail'
if ds.RasterXSize != 983 \
or ds.RasterYSize != 598 \
or ds.RasterCount != 3:
gdaltest.post_reason( 'wrong size or bands' )
print(ds.RasterXSize)
print(ds.RasterYSize)
print(ds.RasterCount)
return 'fail'
ds = None
return 'success'
###############################################################################
# Open the srtm plus service.
def old_wcs_2():
if gdaltest.wcs_drv is None:
return 'skip'
# first, copy to tmp directory.
open('tmp/srtmplus.wcs','w').write(open('data/srtmplus.wcs').read())
gdaltest.wcs_ds = None
gdaltest.wcs_ds = gdal.Open( 'tmp/srtmplus.wcs' )
if gdaltest.wcs_ds is not None:
return 'success'
else:
gdaltest.post_reason( 'open failed.' )
return 'fail'
###############################################################################
# Check various things about the configuration.
def old_wcs_3():
if gdaltest.wcs_drv is None or gdaltest.wcs_ds is None:
return 'skip'
if gdaltest.wcs_ds.RasterXSize != 43200 \
or gdaltest.wcs_ds.RasterYSize != 21600 \
or gdaltest.wcs_ds.RasterCount != 1:
gdaltest.post_reason( 'wrong size or bands' )
return 'fail'
wkt = gdaltest.wcs_ds.GetProjectionRef()
if wkt[:12] != 'GEOGCS["NAD8':
gdaltest.post_reason( 'Got wrong SRS: ' + wkt )
return 'fail'
gt = gdaltest.wcs_ds.GetGeoTransform()
if abs(gt[0]- -180.0041667) > 0.00001 \
or abs(gt[3]- 90.004167) > 0.00001 \
or abs(gt[1] - 0.00833333) > 0.00001 \
or abs(gt[2] - 0) > 0.00001 \
or abs(gt[5] - -0.00833333) > 0.00001 \
or abs(gt[4] - 0) > 0.00001:
gdaltest.post_reason( 'wrong geotransform' )
print(gt)
return 'fail'
if gdaltest.wcs_ds.GetRasterBand(1).GetOverviewCount() < 1:
gdaltest.post_reason( 'no overviews!' )
return 'fail'
if gdaltest.wcs_ds.GetRasterBand(1).DataType < gdal.GDT_Int16:
gdaltest.post_reason( 'wrong band data type' )
return 'fail'
return 'success'
###############################################################################
# Check checksum for a small region.
def old_wcs_4():
if gdaltest.wcs_drv is None or gdaltest.wcs_ds is None:
return 'skip'
cs = gdaltest.wcs_ds.GetRasterBand(1).Checksum( 0, 0, 100, 100 )
if cs != 10469:
gdaltest.post_reason( 'Wrong checksum: ' + str(cs) )
return 'fail'
return 'success'
###############################################################################
# Open the srtm plus service using XML as filename.
def old_wcs_5():
if gdaltest.wcs_drv is None:
return 'skip'
fn = '<WCS_GDAL><ServiceURL>http://geodata.telascience.org/cgi-bin/mapserv_dem?</ServiceURL><CoverageName>srtmplus_raw</CoverageName><Timeout>75</Timeout></WCS_GDAL>'
ds = gdal.Open( fn )
if ds is None:
gdaltest.post_reason( 'open failed.' )
return 'fail'
if ds.RasterXSize != 43200 \
or ds.RasterYSize != 21600 \
or ds.RasterCount != 1:
gdaltest.post_reason( 'wrong size or bands' )
return 'fail'
ds = None
return 'success'
###############################################################################
def wcs_cleanup():
gdaltest.wcs_drv = None
gdaltest.wcs_ds = None
try:
os.remove( 'tmp/geoserver.wcs' )
except:
pass
return 'success'
gdaltest_list = [
wcs_1,
#wcs_2, #FIXME: reenable after adapting test
wcs_3,
wcs_4,
#wcs_5, #FIXME: reenable after adapting test
wcs_cleanup ]
if __name__ == '__main__':
gdaltest.setup_run( 'wcs' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| 29.196721 | 170 | 0.571926 |
76dd7157723a2dc005196e2cad8ac4312a5c62b7
| 3,579 |
py
|
Python
|
data/results/help.py
|
ksboy/finance_negative_entity
|
926200dda0f56380b6a6eb42afb3b616e4498d12
|
[
"MIT"
] | null | null | null |
data/results/help.py
|
ksboy/finance_negative_entity
|
926200dda0f56380b6a6eb42afb3b616e4498d12
|
[
"MIT"
] | null | null | null |
data/results/help.py
|
ksboy/finance_negative_entity
|
926200dda0f56380b6a6eb42afb3b616e4498d12
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
"""
@author: banifeng
@contact: [email protected]
@version: 1.0
@file: help.py
@time: 2019-08-18 09:22
这一行开始写关于本文件的说明与解释
"""
from collections import defaultdict
def func():
datas = open('result_test.csv').read().splitlines()[1:]
output = []
for data in datas:
split_data = data.split(',')
assert len(split_data) == 9
output.append([split_data[i] for i in range(len(split_data)) if i in [0, 1, 4, 7, 8]])
index = 0
result = []
for i in range(1, 2238):
flag = False
while index < len(output) and output[index][0] == str(i):
flag = True
output[index][3], output[index][4] = output[index][4], output[index][3]
result.append(output[index])
index += 1
if not flag:
result.append([str(i), '_', '_', '_', '_'])
outputs = []
for i, data in enumerate(result):
if i < 1 or data != result[i-1]:
outputs.append(data)
result = outputs
with open('Result.csv', 'w') as f:
# f.write("id,AspectTerms,A_start,A_end,OpinionTerms,O_start,O_end,Categories,Polarities\n")
for i, obj in enumerate(result):
# for i in [2, 3, 5, 6]:
# if isinstance(obj[i], int):
# obj[i] = str(obj[i])
f.write(','.join(obj))
if i != len(result)-1:
f.write("\n")
pass
def check_result():
results = open('Result.csv').read().splitlines()
id_labels_map = defaultdict(list)
for r in results:
id, label = r.split(',',1)
id_labels_map[id].append(label)
for id, values in id_labels_map.items():
aspect_opinion_map = defaultdict(list)
for v in values:
split_label = v.split(',')
aspect_opinion_map[split_label[0]].append(split_label[1])
for key,opinions in aspect_opinion_map.items():
if len(opinions)>1 and '_' in opinions:
print(id, values)
def check_gold_labels():
results = open('../TRAIN/Train_labels.csv').read().splitlines()[1:]
id_labels_map = defaultdict(list)
for r in results:
id, label = r.split(',',1)
id_labels_map[id].append(label)
for id, values in id_labels_map.items():
aspect_opinion_map = defaultdict(list)
for v in values:
split_label = v.split(',')
aspect_opinion_map[split_label[0]].append(split_label[3])
for key,opinions in aspect_opinion_map.items():
if len(opinions)>1 and '_' in opinions:
print(id, values)
for id, values in id_labels_map.items():
opinion_aspect_map = defaultdict(list)
for v in values:
split_label = v.split(',')
opinion_aspect_map[split_label[3]].append(split_label[0])
# print(opinion_aspect_map)
for key, aspects in opinion_aspect_map.items():
if len(aspects) > 1 and '_' in aspects:
print(id, values)
def compare_result_file():
hit_data = open('Result.csv', encoding='utf-8').readlines()
zju_data = open('Result_zju.csv', encoding='utf-8').readlines()
h_set = set(hit_data)
z_set = set(zju_data)
print("h_set - z_set")
print(h_set-z_set)
print("z_set - h_set")
print(z_set - h_set)
print("\n\n\n")
print("len(h_set&z_set)", len(h_set&z_set))
print("len(h_set)==", len(h_set))
print("len(z_set)==", len(z_set))
if __name__ == '__main__':
compare_result_file()
# func()
# check_result()
# check_gold_labels()
| 34.085714 | 100 | 0.578933 |
73bc9dcafb57f31bcdfe1915f1b05c3aef5a85fb
| 3,734 |
py
|
Python
|
py365/auth/msal_connection.py
|
dudil/py365
|
5f350470e3930ff1c1ae09e8cea917fee17836fa
|
[
"MIT"
] | 1 |
2020-04-16T13:28:29.000Z
|
2020-04-16T13:28:29.000Z
|
py365/auth/msal_connection.py
|
dudil/py365
|
5f350470e3930ff1c1ae09e8cea917fee17836fa
|
[
"MIT"
] | 4 |
2020-03-24T17:00:21.000Z
|
2021-02-11T09:16:22.000Z
|
py365/auth/msal_connection.py
|
dudil/py365
|
5f350470e3930ff1c1ae09e8cea917fee17836fa
|
[
"MIT"
] | null | null | null |
import logging
from typing import Optional
from msal import ConfidentialClientApplication, PublicClientApplication
from .app_connection import AppConnection
DEFAULT_API_VER = "v1.0"
DEFAULT_RESOURCE = "https://graph.microsoft.com/"
DEFAULT_SCOPES = ["https://graph.microsoft.com/.default"]
class MsalConnection(AppConnection):
""" Implementation of the Connection class using msal """
def __init__(self, app_id: str, tenant_id: str
, app_secret: str = None, username: str = None, password: str = None
, resource: str = DEFAULT_RESOURCE, api_ver: str = DEFAULT_API_VER):
super().__init__(app_id=app_id, app_secret=app_secret
, tenant_id=tenant_id, resource=resource, api_ver=api_ver)
self.username = username
self.password = password
self.scopes = DEFAULT_SCOPES
self.app = None
def getAccessToken(self) -> Optional[str]:
if self.app_secret:
return self.getConfidentialClientAccessToken()
return self.getPublicAccessToken()
def getConfidentialClientAccessToken(self) -> Optional[str]:
# Initialise the app if not already exist
if not self.app:
logging.info("Initialise msal connection app")
self.app = ConfidentialClientApplication(client_id=self.app_id, authority=self.authority
, client_credential=self.app_secret)
# try to get the token from cache if already exist
result = self.app.acquire_token_silent(scopes=self.scopes, account=None)
if not result:
logging.info("No suitable token exists in cache. Let's get a new one from AAD.")
result = self.app.acquire_token_for_client(scopes=self.scopes)
if "access_token" in result:
return result["access_token"]
return None
def getDeviceFlowAccessToken(self):
if not self.app:
# must have app initialised before calling that method
print("App must be initialised before calling getDeviceFlowAccessToken")
return None
flow = self.app.initiate_device_flow(scopes=self.scopes)
print(flow["message"])
print(flow["verification_uri"])
print(flow["user_code"])
return self.app.acquire_token_by_device_flow(flow)
def getUsernamePasswordAccessToken(self):
if not self.app:
# must have app initialised before calling that method
print("App must be initialised before calling getUsernamePasswordAccessToken")
return None
return self.app.acquire_token_by_username_password(self.username, self.password, scopes=self.scopes)
def getPublicAccessToken(self) -> Optional[str]:
# Initialise the app if not already exist
if not self.app:
print("Initialise msal connection app")
self.app = PublicClientApplication(client_id=self.app_id, authority=self.authority)
result = None
accounts = self.app.get_accounts()
if accounts:
# TODO: need to pick the relevant account to proceed
chosen = accounts[0]
# try to get the token from cache if already exist
result = self.app.acquire_token_silent(scopes=self.scopes, account=chosen)
if not result:
print("No suitable token exists in cache. Let's get a new one from AAD.")
if self.username and self.password:
result = self.getUsernamePasswordAccessToken()
else:
result = self.getDeviceFlowAccessToken()
if "access_token" in result:
return result["access_token"]
return None
| 41.032967 | 108 | 0.654794 |
a8569dface172c145c3ffa996774800922c2b7a6
| 3,734 |
py
|
Python
|
nanobrok/tests/test_deviceInfo_model.py
|
retr0-13/nanobroK
|
6e01e385c6c0c7c231609faedb76c0337de90dc0
|
[
"Apache-2.0"
] | 142 |
2021-09-18T11:25:28.000Z
|
2022-03-30T13:44:58.000Z
|
nanobrok/tests/test_deviceInfo_model.py
|
retr0-13/nanobroK
|
6e01e385c6c0c7c231609faedb76c0337de90dc0
|
[
"Apache-2.0"
] | 1 |
2021-09-19T14:31:17.000Z
|
2021-09-21T00:47:04.000Z
|
nanobrok/tests/test_deviceInfo_model.py
|
retr0-13/nanobroK
|
6e01e385c6c0c7c231609faedb76c0337de90dc0
|
[
"Apache-2.0"
] | 31 |
2021-09-19T03:52:13.000Z
|
2022-03-31T14:19:12.000Z
|
import unittest, json
from nanobrok.models import (
DeviceInfo,
DeviceInfoSchema,
PacketType,
PacketDataSchema,
PacketData,
Event,
)
from pprint import pprint
from marshmallow import ValidationError
from nanobrok.ext.database import db
from datetime import datetime
from nanobrok.blueprints.webui.utils import remove_key_from_dict
# This file is part of the Nanobrok Open Source Project.
# nanobrok is licensed under the Apache 2.0.
# Copyright 2021 p0cL4bs Team - Marcos Bomfim (mh4x0f)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class TestDeviceInfoSchema(unittest.TestCase):
def setUp(self):
pass
def test_only_deviceInfo(self):
app_device_packet = '{"upTime": 182428741, \
"isRunningOnEmulator": false, "hasSdCard" : false, \
"batteryVoltage": 3963, "isBatteryPresent" : false, \
"totalInternalMemory" : 35678318016, \
"aInternalMemory": 34719318016, \
"totalRAM" : 3907751936, "isDeviceCharging": false, "batteryTechnology": "Litio", \
"deviceRingerMode": "Normal", "iPv4Address": "10.0.0.106", \
"iPv6Address": "FE80::201F:281B:9E88:7ED6", "networkType" : "WIFI/WIFIMAX"}'
app_device_packet = json.loads(app_device_packet)
schema_device = DeviceInfoSchema()
try:
result_deviceInfo = schema_device.load(app_device_packet)
except ValidationError as err:
print(err.messages)
print(err.valid_data)
obj_deviceInfo = DeviceInfo(**result_deviceInfo)
self.assertEqual(obj_deviceInfo.upTime, 182428741)
self.assertEqual(obj_deviceInfo.totalRAM, 3907751936)
def test_packet_data_with_deviceInfo(self):
app_device_packet = '{"command": "", "event": "e921f8fab42fdbb2", \
"registred_at" : 1508484583259, "packet_type": 1, "data": '
app_device_packet += '{"upTime": 182428741, \
"isRunningOnEmulator": false, "hasSdCard" : false, \
"batteryVoltage": 3963, "isBatteryPresent" : false, \
"totalInternalMemory" : 35678318016, \
"aInternalMemory": 34719318016, \
"totalRAM" : 3907751936, "isDeviceCharging": false, "batteryTechnology": "Litio", \
"deviceRingerMode": "Normal", "iPv4Address": "10.0.0.106", \
"iPv6Address": "FE80::201F:281B:9E88:7ED6", "networkType" : "WIFI/WIFIMAX"}}'
app_device_packet = json.loads(app_device_packet)
schema_packet_data = PacketDataSchema()
schema_deficeInfo = DeviceInfoSchema()
try:
result_packet_data = schema_packet_data.load(
remove_key_from_dict(app_device_packet, {"data"})
)
result_deviceInfo = schema_deficeInfo.load(app_device_packet.get("data"))
except ValidationError as err:
print(err.messages)
print(err.valid_data)
obj_packet_data = PacketData(**result_packet_data)
obj_deviceInfo = DeviceInfo(**result_deviceInfo)
self.assertEqual(obj_packet_data.event, Event.GET_DEVICEINFO_CODE.name)
self.assertEqual(obj_deviceInfo.upTime, 182428741)
self.assertEqual(obj_deviceInfo.totalRAM, 3907751936)
if __name__ == "__main__":
unittest.main()
| 39.305263 | 91 | 0.687467 |
3dbf4dad3446e51da65796c8b59a3629c530a11d
| 2,067 |
py
|
Python
|
Python/phonenumbers/data/region_TW.py
|
skykisl/uberbruns2
|
26933efce04dba700d93cc75c7b74e069fb02d26
|
[
"Unlicense"
] | 5 |
2015-04-27T20:10:56.000Z
|
2018-06-14T18:19:09.000Z
|
python/phonenumbers/data/region_TW.py
|
vemel/python-phonenumbers
|
595c322bf12106a3b95e3f202e948a7c6b6c15b8
|
[
"Apache-2.0"
] | 2 |
2017-06-08T16:11:13.000Z
|
2018-05-07T11:50:13.000Z
|
python/phonenumbers/data/region_TW.py
|
vemel/python-phonenumbers
|
595c322bf12106a3b95e3f202e948a7c6b6c15b8
|
[
"Apache-2.0"
] | 6 |
2015-02-19T11:11:04.000Z
|
2022-03-15T19:38:31.000Z
|
"""Auto-generated file, do not edit by hand. TW metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_TW = PhoneMetadata(id='TW', country_code=886, international_prefix='0(?:0[25679]|19)',
general_desc=PhoneNumberDesc(national_number_pattern='[2-9]\\d{7,8}', possible_number_pattern='\\d{8,9}'),
fixed_line=PhoneNumberDesc(national_number_pattern='[2-8]\\d{7,8}', possible_number_pattern='\\d{8,9}', example_number='21234567'),
mobile=PhoneNumberDesc(national_number_pattern='9\\d{8}', possible_number_pattern='\\d{9}', example_number='912345678'),
toll_free=PhoneNumberDesc(national_number_pattern='800\\d{6}', possible_number_pattern='\\d{9}', example_number='800123456'),
premium_rate=PhoneNumberDesc(national_number_pattern='900\\d{6}', possible_number_pattern='\\d{9}', example_number='900123456'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='11[029]', possible_number_pattern='\\d{3}', example_number='110'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
national_prefix='0',
preferred_extn_prefix='#',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='([2-8])(\\d{3,4})(\\d{4})', format=u'\\1 \\2 \\3', leading_digits_pattern=['[2-7]|8[1-9]'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='([89]\\d{2})(\\d{3})(\\d{3})', format=u'\\1 \\2 \\3', leading_digits_pattern=['80|9'], national_prefix_formatting_rule=u'0\\1')])
| 89.869565 | 174 | 0.752298 |
f62632a41ab0fec5c2f2c0a26b2f0c25482f576e
| 38,719 |
py
|
Python
|
readthedocs/builds/models.py
|
comradekingu/readthedocs.org
|
b657dd5bc721db7357b71363494cf814b1a4785a
|
[
"MIT"
] | null | null | null |
readthedocs/builds/models.py
|
comradekingu/readthedocs.org
|
b657dd5bc721db7357b71363494cf814b1a4785a
|
[
"MIT"
] | null | null | null |
readthedocs/builds/models.py
|
comradekingu/readthedocs.org
|
b657dd5bc721db7357b71363494cf814b1a4785a
|
[
"MIT"
] | null | null | null |
"""Models for the builds app."""
import datetime
import logging
import os.path
import re
from shutil import rmtree
import regex
from django.conf import settings
from django.db import models
from django.db.models import F
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import ugettext
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.models import TimeStampedModel
from jsonfield import JSONField
from polymorphic.models import PolymorphicModel
import readthedocs.builds.automation_actions as actions
from readthedocs.builds.constants import (
BRANCH,
BUILD_STATE,
BUILD_STATE_FINISHED,
BUILD_STATE_TRIGGERED,
BUILD_TYPES,
EXTERNAL,
GENERIC_EXTERNAL_VERSION_NAME,
GITHUB_EXTERNAL_VERSION_NAME,
GITLAB_EXTERNAL_VERSION_NAME,
INTERNAL,
LATEST,
NON_REPOSITORY_VERSIONS,
PREDEFINED_MATCH_ARGS,
PREDEFINED_MATCH_ARGS_VALUES,
STABLE,
TAG,
VERSION_TYPES,
)
from readthedocs.builds.managers import (
BuildManager,
ExternalBuildManager,
ExternalVersionManager,
InternalBuildManager,
InternalVersionManager,
VersionAutomationRuleManager,
VersionManager,
)
from readthedocs.builds.querysets import (
BuildQuerySet,
RelatedBuildQuerySet,
VersionQuerySet,
)
from readthedocs.builds.utils import (
get_bitbucket_username_repo,
get_github_username_repo,
get_gitlab_username_repo,
)
from readthedocs.builds.version_slug import VersionSlugField
from readthedocs.config import LATEST_CONFIGURATION_VERSION
from readthedocs.core.utils import broadcast
from readthedocs.projects.constants import (
BITBUCKET_COMMIT_URL,
BITBUCKET_URL,
GITHUB_BRAND,
GITHUB_COMMIT_URL,
GITHUB_PULL_REQUEST_COMMIT_URL,
GITHUB_PULL_REQUEST_URL,
GITHUB_URL,
GITLAB_BRAND,
GITLAB_COMMIT_URL,
DOCUMENTATION_CHOICES,
GITLAB_MERGE_REQUEST_COMMIT_URL,
GITLAB_MERGE_REQUEST_URL,
GITLAB_URL,
MEDIA_TYPES,
PRIVACY_CHOICES,
PRIVATE,
)
from readthedocs.projects.models import APIProject, Project
from readthedocs.projects.version_handling import determine_stable_version
log = logging.getLogger(__name__)
class Version(models.Model):
"""Version of a ``Project``."""
project = models.ForeignKey(
Project,
verbose_name=_('Project'),
related_name='versions',
on_delete=models.CASCADE,
)
type = models.CharField(
_('Type'),
max_length=20,
choices=VERSION_TYPES,
default='unknown',
)
# used by the vcs backend
#: The identifier is the ID for the revision this is version is for. This
#: might be the revision number (e.g. in SVN), or the commit hash (e.g. in
#: Git). If the this version is pointing to a branch, then ``identifier``
#: will contain the branch name.
identifier = models.CharField(_('Identifier'), max_length=255)
#: This is the actual name that we got for the commit stored in
#: ``identifier``. This might be the tag or branch name like ``"v1.0.4"``.
#: However this might also hold special version names like ``"latest"``
#: and ``"stable"``.
verbose_name = models.CharField(_('Verbose Name'), max_length=255)
#: The slug is the slugified version of ``verbose_name`` that can be used
#: in the URL to identify this version in a project. It's also used in the
#: filesystem to determine how the paths for this version are called. It
#: must not be used for any other identifying purposes.
slug = VersionSlugField(
_('Slug'),
max_length=255,
populate_from='verbose_name',
)
supported = models.BooleanField(_('Supported'), default=True)
active = models.BooleanField(_('Active'), default=False)
built = models.BooleanField(_('Built'), default=False)
uploaded = models.BooleanField(_('Uploaded'), default=False)
privacy_level = models.CharField(
_('Privacy Level'),
max_length=20,
choices=PRIVACY_CHOICES,
default=settings.DEFAULT_VERSION_PRIVACY_LEVEL,
help_text=_('Level of privacy for this Version.'),
)
machine = models.BooleanField(_('Machine Created'), default=False)
# Whether the latest successful build for this version contains certain media types
has_pdf = models.BooleanField(_('Has PDF'), default=False)
has_epub = models.BooleanField(_('Has ePub'), default=False)
has_htmlzip = models.BooleanField(_('Has HTML Zip'), default=False)
documentation_type = models.CharField(
_('Documentation type'),
max_length=20,
choices=DOCUMENTATION_CHOICES,
default='sphinx',
help_text=_(
'Type of documentation the version was built with.'
),
)
objects = VersionManager.from_queryset(VersionQuerySet)()
# Only include BRANCH, TAG, UNKNOWN type Versions.
internal = InternalVersionManager.from_queryset(VersionQuerySet)()
# Only include EXTERNAL type Versions.
external = ExternalVersionManager.from_queryset(VersionQuerySet)()
class Meta:
unique_together = [('project', 'slug')]
ordering = ['-verbose_name']
def __str__(self):
return ugettext(
'Version {version} of {project} ({pk})'.format(
version=self.verbose_name,
project=self.project,
pk=self.pk,
),
)
@property
def ref(self):
if self.slug == STABLE:
stable = determine_stable_version(
self.project.versions(manager=INTERNAL).all()
)
if stable:
return stable.slug
@property
def vcs_url(self):
"""
Generate VCS (github, gitlab, bitbucket) URL for this version.
Example: https://github.com/rtfd/readthedocs.org/tree/3.4.2/.
External Version Example: https://github.com/rtfd/readthedocs.org/pull/99/.
"""
if self.type == EXTERNAL:
if 'github' in self.project.repo:
user, repo = get_github_username_repo(self.project.repo)
return GITHUB_PULL_REQUEST_URL.format(
user=user,
repo=repo,
number=self.verbose_name,
)
if 'gitlab' in self.project.repo:
user, repo = get_gitlab_username_repo(self.project.repo)
return GITLAB_MERGE_REQUEST_URL.format(
user=user,
repo=repo,
number=self.verbose_name,
)
# TODO: Add VCS URL for BitBucket.
return ''
url = ''
if self.slug == STABLE:
slug_url = self.ref
elif self.slug == LATEST:
slug_url = self.project.get_default_branch()
else:
slug_url = self.slug
if ('github' in self.project.repo) or ('gitlab' in self.project.repo):
url = f'/tree/{slug_url}/'
if 'bitbucket' in self.project.repo:
slug_url = self.identifier
url = f'/src/{slug_url}'
# TODO: improve this replacing
return self.project.repo.replace('git://', 'https://').replace('.git', '') + url
@property
def last_build(self):
return self.builds.order_by('-date').first()
@property
def config(self):
"""
Proxy to the configuration of the build.
:returns: The configuration used in the last successful build.
:rtype: dict
"""
last_build = (
self.builds(manager=INTERNAL).filter(
state=BUILD_STATE_FINISHED,
success=True,
).order_by('-date')
.only('_config')
.first()
)
return last_build.config
@property
def commit_name(self):
"""
Return the branch name, the tag name or the revision identifier.
The result could be used as ref in a git repo, e.g. for linking to
GitHub, Bitbucket or GitLab.
"""
# LATEST is special as it is usually a branch but does not contain the
# name in verbose_name.
if self.slug == LATEST:
return self.project.get_default_branch()
if self.slug == STABLE:
if self.type == BRANCH:
# Special case, as we do not store the original branch name
# that the stable version works on. We can only interpolate the
# name from the commit identifier, but it's hacky.
# TODO: Refactor ``Version`` to store more actual info about
# the underlying commits.
if self.identifier.startswith('origin/'):
return self.identifier[len('origin/'):]
return self.identifier
# By now we must have handled all special versions.
if self.slug in NON_REPOSITORY_VERSIONS:
raise Exception('All special versions must be handled by now.')
if self.type in (BRANCH, TAG):
# If this version is a branch or a tag, the verbose_name will
# contain the actual name. We cannot use identifier as this might
# include the "origin/..." part in the case of a branch. A tag
# would contain the hash in identifier, which is not as pretty as
# the actual tag name.
return self.verbose_name
if self.type == EXTERNAL:
# If this version is a EXTERNAL version, the identifier will
# contain the actual commit hash. which we can use to
# generate url for a given file name
return self.identifier
# If we came that far it's not a special version
# nor a branch, tag or EXTERNAL version.
# Therefore just return the identifier to make a safe guess.
log.debug(
'TODO: Raise an exception here. Testing what cases it happens',
)
return self.identifier
def get_absolute_url(self):
"""Get absolute url to the docs of the version."""
if not self.built and not self.uploaded:
return reverse(
'project_version_detail',
kwargs={
'project_slug': self.project.slug,
'version_slug': self.slug,
},
)
private = self.privacy_level == PRIVATE
external = self.type == EXTERNAL
return self.project.get_docs_url(
version_slug=self.slug,
private=private,
external=external,
)
def save(self, *args, **kwargs): # pylint: disable=arguments-differ
"""Add permissions to the Version for all owners on save."""
from readthedocs.projects import tasks
obj = super().save(*args, **kwargs)
if not self.project.has_feature(feature_id='skip_sync'):
broadcast(
type='app',
task=tasks.symlink_project,
args=[self.project.pk],
)
return obj
def delete(self, *args, **kwargs): # pylint: disable=arguments-differ
from readthedocs.projects import tasks
log.info('Removing files for version %s', self.slug)
has_skip_sync = self.project.has_feature(feature_id='skip_sync')
if not has_skip_sync:
broadcast(
type='app',
task=tasks.remove_dirs,
args=[self.get_artifact_paths()],
)
# Remove resources if the version is not external
if self.type != EXTERNAL:
tasks.clean_project_resources(self.project, self)
project_pk = self.project.pk
super().delete(*args, **kwargs)
if not has_skip_sync:
broadcast(
type='app',
task=tasks.symlink_project,
args=[project_pk],
)
@property
def identifier_friendly(self):
"""Return display friendly identifier."""
if re.match(r'^[0-9a-f]{40}$', self.identifier, re.I):
return self.identifier[:8]
return self.identifier
@property
def is_editable(self):
return self.type == BRANCH
@property
def supports_wipe(self):
"""Return True if version is not external."""
return not self.type == EXTERNAL
def get_subdomain_url(self):
private = self.privacy_level == PRIVATE
external = self.type == EXTERNAL
return self.project.get_docs_url(
version_slug=self.slug,
lang_slug=self.project.language,
private=private,
external=external,
)
def get_downloads(self, pretty=False):
project = self.project
data = {}
def prettify(k):
return k if pretty else k.lower()
if self.has_pdf:
data[prettify('PDF')] = project.get_production_media_url(
'pdf',
self.slug,
)
if self.has_htmlzip:
data[prettify('HTML')] = project.get_production_media_url(
'htmlzip',
self.slug,
)
if self.has_epub:
data[prettify('Epub')] = project.get_production_media_url(
'epub',
self.slug,
)
return data
def get_conf_py_path(self):
conf_py_path = self.project.conf_dir(self.slug)
checkout_prefix = self.project.checkout_path(self.slug)
conf_py_path = os.path.relpath(conf_py_path, checkout_prefix)
return conf_py_path
def get_build_path(self):
"""Return version build path if path exists, otherwise `None`."""
path = self.project.checkout_path(version=self.slug)
if os.path.exists(path):
return path
return None
def get_artifact_paths(self):
"""
Return a list of all production artifacts/media path for this version.
:rtype: list
"""
paths = []
for type_ in ('pdf', 'epub', 'htmlzip'):
paths.append(
self.project
.get_production_media_path(type_=type_, version_slug=self.slug),
)
paths.append(self.project.rtd_build_path(version=self.slug))
return paths
def get_storage_paths(self):
"""
Return a list of all build artifact storage paths for this version.
:rtype: list
"""
paths = []
for type_ in MEDIA_TYPES:
paths.append(
self.project.get_storage_path(
type_=type_,
version_slug=self.slug,
include_file=False,
version_type=self.type,
)
)
return paths
def clean_build_path(self):
"""
Clean build path for project version.
Ensure build path is clean for project version. Used to ensure stale
build checkouts for each project version are removed.
"""
try:
path = self.get_build_path()
if path is not None:
log.debug('Removing build path %s for %s', path, self)
rmtree(path)
except OSError:
log.exception('Build path cleanup failed')
def get_github_url(
self,
docroot,
filename,
source_suffix='.rst',
action='view',
):
"""
Return a GitHub URL for a given filename.
:param docroot: Location of documentation in repository
:param filename: Name of file
:param source_suffix: File suffix of documentation format
:param action: `view` (default) or `edit`
"""
repo_url = self.project.repo
if 'github' not in repo_url:
return ''
if not docroot:
return ''
# Normalize /docroot/
docroot = '/' + docroot.strip('/') + '/'
if action == 'view':
action_string = 'blob'
elif action == 'edit':
action_string = 'edit'
user, repo = get_github_username_repo(repo_url)
if not user and not repo:
return ''
repo = repo.rstrip('/')
if not filename:
# If there isn't a filename, we don't need a suffix
source_suffix = ''
return GITHUB_URL.format(
user=user,
repo=repo,
version=self.commit_name,
docroot=docroot,
path=filename,
source_suffix=source_suffix,
action=action_string,
)
def get_gitlab_url(
self,
docroot,
filename,
source_suffix='.rst',
action='view',
):
repo_url = self.project.repo
if 'gitlab' not in repo_url:
return ''
if not docroot:
return ''
# Normalize /docroot/
docroot = '/' + docroot.strip('/') + '/'
if action == 'view':
action_string = 'blob'
elif action == 'edit':
action_string = 'edit'
user, repo = get_gitlab_username_repo(repo_url)
if not user and not repo:
return ''
repo = repo.rstrip('/')
if not filename:
# If there isn't a filename, we don't need a suffix
source_suffix = ''
return GITLAB_URL.format(
user=user,
repo=repo,
version=self.commit_name,
docroot=docroot,
path=filename,
source_suffix=source_suffix,
action=action_string,
)
def get_bitbucket_url(self, docroot, filename, source_suffix='.rst'):
repo_url = self.project.repo
if 'bitbucket' not in repo_url:
return ''
if not docroot:
return ''
# Normalize /docroot/
docroot = '/' + docroot.strip('/') + '/'
user, repo = get_bitbucket_username_repo(repo_url)
if not user and not repo:
return ''
repo = repo.rstrip('/')
if not filename:
# If there isn't a filename, we don't need a suffix
source_suffix = ''
return BITBUCKET_URL.format(
user=user,
repo=repo,
version=self.commit_name,
docroot=docroot,
path=filename,
source_suffix=source_suffix,
)
class APIVersion(Version):
"""
Version proxy model for API data deserialization.
This replaces the pattern where API data was deserialized into a mocked
:py:class:`Version` object.
This pattern was confusing, as it was not explicit
as to what form of object you were working with -- API backed or database
backed.
This model preserves the Version model methods, allowing for overrides on
model field differences. This model pattern will generally only be used on
builder instances, where we are interacting solely with API data.
"""
project = None
class Meta:
proxy = True
def __init__(self, *args, **kwargs):
self.project = APIProject(**kwargs.pop('project', {}))
# These fields only exist on the API return, not on the model, so we'll
# remove them to avoid throwing exceptions due to unexpected fields
for key in ['resource_uri', 'absolute_url', 'downloads']:
try:
del kwargs[key]
except KeyError:
pass
super().__init__(*args, **kwargs)
def save(self, *args, **kwargs):
return 0
class Build(models.Model):
"""Build data."""
project = models.ForeignKey(
Project,
verbose_name=_('Project'),
related_name='builds',
on_delete=models.CASCADE,
)
version = models.ForeignKey(
Version,
verbose_name=_('Version'),
null=True,
related_name='builds',
on_delete=models.CASCADE,
)
type = models.CharField(
_('Type'),
max_length=55,
choices=BUILD_TYPES,
default='html',
)
state = models.CharField(
_('State'),
max_length=55,
choices=BUILD_STATE,
default='finished',
)
date = models.DateTimeField(_('Date'), auto_now_add=True)
success = models.BooleanField(_('Success'), default=True)
setup = models.TextField(_('Setup'), null=True, blank=True)
setup_error = models.TextField(_('Setup error'), null=True, blank=True)
output = models.TextField(_('Output'), default='', blank=True)
error = models.TextField(_('Error'), default='', blank=True)
exit_code = models.IntegerField(_('Exit code'), null=True, blank=True)
commit = models.CharField(
_('Commit'),
max_length=255,
null=True,
blank=True,
)
_config = JSONField(_('Configuration used in the build'), default=dict)
length = models.IntegerField(_('Build Length'), null=True, blank=True)
builder = models.CharField(
_('Builder'),
max_length=255,
null=True,
blank=True,
)
cold_storage = models.NullBooleanField(
_('Cold Storage'),
help_text='Build steps stored outside the database.',
)
# Managers
objects = BuildManager.from_queryset(BuildQuerySet)()
# Only include BRANCH, TAG, UNKNOWN type Version builds.
internal = InternalBuildManager.from_queryset(BuildQuerySet)()
# Only include EXTERNAL type Version builds.
external = ExternalBuildManager.from_queryset(BuildQuerySet)()
CONFIG_KEY = '__config'
class Meta:
ordering = ['-date']
get_latest_by = 'date'
index_together = [['version', 'state', 'type']]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._config_changed = False
@property
def previous(self):
"""
Returns the previous build to the current one.
Matching the project and version.
"""
date = self.date or timezone.now()
if self.project is not None and self.version is not None:
return (
Build.objects.filter(
project=self.project,
version=self.version,
date__lt=date,
).order_by('-date').first()
)
return None
@property
def config(self):
"""
Get the config used for this build.
Since we are saving the config into the JSON field only when it differs
from the previous one, this helper returns the correct JSON used in this
Build object (it could be stored in this object or one of the previous
ones).
"""
if self.CONFIG_KEY in self._config:
return (
Build.objects
.only('_config')
.get(pk=self._config[self.CONFIG_KEY])
._config
)
return self._config
@config.setter
def config(self, value):
"""
Set `_config` to value.
`_config` should never be set directly from outside the class.
"""
self._config = value
self._config_changed = True
def save(self, *args, **kwargs): # noqa
"""
Save object.
To save space on the db we only save the config if it's different
from the previous one.
If the config is the same, we save the pk of the object
that has the **real** config under the `CONFIG_KEY` key.
"""
if self.pk is None or self._config_changed:
previous = self.previous
# yapf: disable
if (
previous is not None and self._config and
self._config == previous.config
):
# yapf: enable
previous_pk = previous._config.get(self.CONFIG_KEY, previous.pk)
self._config = {self.CONFIG_KEY: previous_pk}
super().save(*args, **kwargs)
self._config_changed = False
def __str__(self):
return ugettext(
'Build {project} for {usernames} ({pk})'.format(
project=self.project,
usernames=' '.join(
self.project.users.all().values_list('username', flat=True),
),
pk=self.pk,
),
)
def get_absolute_url(self):
return reverse('builds_detail', args=[self.project.slug, self.pk])
def get_full_url(self):
"""
Get full url of the build including domain.
Example: https://readthedocs.org/projects/pip/builds/99999999/
"""
scheme = 'http' if settings.DEBUG else 'https'
full_url = '{scheme}://{domain}{absolute_url}'.format(
scheme=scheme,
domain=settings.PRODUCTION_DOMAIN,
absolute_url=self.get_absolute_url()
)
return full_url
def get_commit_url(self):
"""Return the commit URL."""
repo_url = self.project.repo
if self.is_external:
if 'github' in repo_url:
user, repo = get_github_username_repo(repo_url)
if not user and not repo:
return ''
repo = repo.rstrip('/')
return GITHUB_PULL_REQUEST_COMMIT_URL.format(
user=user,
repo=repo,
number=self.version.verbose_name,
commit=self.commit
)
if 'gitlab' in repo_url:
user, repo = get_gitlab_username_repo(repo_url)
if not user and not repo:
return ''
repo = repo.rstrip('/')
return GITLAB_MERGE_REQUEST_COMMIT_URL.format(
user=user,
repo=repo,
number=self.version.verbose_name,
commit=self.commit
)
# TODO: Add External Version Commit URL for BitBucket.
else:
if 'github' in repo_url:
user, repo = get_github_username_repo(repo_url)
if not user and not repo:
return ''
repo = repo.rstrip('/')
return GITHUB_COMMIT_URL.format(
user=user,
repo=repo,
commit=self.commit
)
if 'gitlab' in repo_url:
user, repo = get_gitlab_username_repo(repo_url)
if not user and not repo:
return ''
repo = repo.rstrip('/')
return GITLAB_COMMIT_URL.format(
user=user,
repo=repo,
commit=self.commit
)
if 'bitbucket' in repo_url:
user, repo = get_bitbucket_username_repo(repo_url)
if not user and not repo:
return ''
repo = repo.rstrip('/')
return BITBUCKET_COMMIT_URL.format(
user=user,
repo=repo,
commit=self.commit
)
return None
@property
def finished(self):
"""Return if build has a finished state."""
return self.state == BUILD_STATE_FINISHED
@property
def is_stale(self):
"""Return if build state is triggered & date more than 5m ago."""
mins_ago = timezone.now() - datetime.timedelta(minutes=5)
return self.state == BUILD_STATE_TRIGGERED and self.date < mins_ago
@property
def is_external(self):
return self.version.type == EXTERNAL
@property
def external_version_name(self):
if self.is_external:
if self.project.git_provider_name == GITHUB_BRAND:
return GITHUB_EXTERNAL_VERSION_NAME
if self.project.git_provider_name == GITLAB_BRAND:
return GITLAB_EXTERNAL_VERSION_NAME
# TODO: Add External Version Name for BitBucket.
return GENERIC_EXTERNAL_VERSION_NAME
return None
def using_latest_config(self):
return int(self.config.get('version', '1')) == LATEST_CONFIGURATION_VERSION
class BuildCommandResultMixin:
"""
Mixin for common command result methods/properties.
Shared methods between the database model :py:class:`BuildCommandResult` and
non-model respresentations of build command results from the API
"""
@property
def successful(self):
"""Did the command exit with a successful exit code."""
return self.exit_code == 0
@property
def failed(self):
"""
Did the command exit with a failing exit code.
Helper for inverse of :py:meth:`successful`
"""
return not self.successful
class BuildCommandResult(BuildCommandResultMixin, models.Model):
"""Build command for a ``Build``."""
build = models.ForeignKey(
Build,
verbose_name=_('Build'),
related_name='commands',
on_delete=models.CASCADE,
)
command = models.TextField(_('Command'))
description = models.TextField(_('Description'), blank=True)
output = models.TextField(_('Command output'), blank=True)
exit_code = models.IntegerField(_('Command exit code'))
start_time = models.DateTimeField(_('Start time'))
end_time = models.DateTimeField(_('End time'))
class Meta:
ordering = ['start_time']
get_latest_by = 'start_time'
objects = RelatedBuildQuerySet.as_manager()
def __str__(self):
return (
ugettext('Build command {pk} for build {build}')
.format(pk=self.pk, build=self.build)
)
@property
def run_time(self):
"""Total command runtime in seconds."""
if self.start_time is not None and self.end_time is not None:
diff = self.end_time - self.start_time
return diff.seconds
class VersionAutomationRule(PolymorphicModel, TimeStampedModel):
"""Versions automation rules for projects."""
ACTIVATE_VERSION_ACTION = 'activate-version'
SET_DEFAULT_VERSION_ACTION = 'set-default-version'
ACTIONS = (
(ACTIVATE_VERSION_ACTION, _('Activate version')),
(SET_DEFAULT_VERSION_ACTION, _('Set version as default')),
)
project = models.ForeignKey(
Project,
related_name='automation_rules',
on_delete=models.CASCADE,
)
priority = models.IntegerField(
_('Rule priority'),
help_text=_('A lower number (0) means a higher priority'),
)
description = models.CharField(
_('Description'),
max_length=255,
null=True,
blank=True,
)
match_arg = models.CharField(
_('Match argument'),
help_text=_('Value used for the rule to match the version'),
max_length=255,
)
predefined_match_arg = models.CharField(
_('Predefined match argument'),
help_text=_(
'Match argument defined by us, it is used if is not None, '
'otherwise match_arg will be used.'
),
max_length=255,
choices=PREDEFINED_MATCH_ARGS,
null=True,
blank=True,
default=None,
)
action = models.CharField(
_('Action'),
help_text=_('Action to apply to matching versions'),
max_length=32,
choices=ACTIONS,
)
action_arg = models.CharField(
_('Action argument'),
help_text=_('Value used for the action to perfom an operation'),
max_length=255,
null=True,
blank=True,
)
version_type = models.CharField(
_('Version type'),
help_text=_('Type of version the rule should be applied to'),
max_length=32,
choices=VERSION_TYPES,
)
objects = VersionAutomationRuleManager()
class Meta:
unique_together = (('project', 'priority'),)
ordering = ('priority', '-modified', '-created')
def get_match_arg(self):
"""Get the match arg defined for `predefined_match_arg` or the match from user."""
match_arg = PREDEFINED_MATCH_ARGS_VALUES.get(
self.predefined_match_arg,
)
return match_arg or self.match_arg
def run(self, version, *args, **kwargs):
"""
Run an action if `version` matches the rule.
:type version: readthedocs.builds.models.Version
:returns: True if the action was performed
"""
if version.type == self.version_type:
match, result = self.match(version, self.get_match_arg())
if match:
self.apply_action(version, result)
return True
return False
def match(self, version, match_arg):
"""
Returns True and the match result if the version matches the rule.
:type version: readthedocs.builds.models.Version
:param str match_arg: Additional argument to perform the match
:returns: A tuple of (boolean, match_resul).
The result will be passed to `apply_action`.
"""
return False, None
def apply_action(self, version, match_result):
"""
Apply the action from allowed_actions.
:type version: readthedocs.builds.models.Version
:param any match_result: Additional context from the match operation
:raises: NotImplementedError if the action
isn't implemented or supported for this rule.
"""
action = self.allowed_actions.get(self.action)
if action is None:
raise NotImplementedError
action(version, match_result, self.action_arg)
def move(self, steps):
"""
Change the priority of this Automation Rule.
This is done by moving it ``n`` steps,
relative to the other priority rules.
The priority from the other rules are updated too.
:param steps: Number of steps to be moved
(it can be negative)
:returns: True if the priority was changed
"""
total = self.project.automation_rules.count()
current_priority = self.priority
new_priority = (current_priority + steps) % total
if current_priority == new_priority:
return False
# Move other's priority
if new_priority > current_priority:
# It was moved down
rules = (
self.project.automation_rules
.filter(priority__gt=current_priority, priority__lte=new_priority)
# We sort the queryset in asc order
# to be updated in that order
# to avoid hitting the unique constraint (project, priority).
.order_by('priority')
)
expression = F('priority') - 1
else:
# It was moved up
rules = (
self.project.automation_rules
.filter(priority__lt=current_priority, priority__gte=new_priority)
.exclude(pk=self.pk)
# We sort the queryset in desc order
# to be updated in that order
# to avoid hitting the unique constraint (project, priority).
.order_by('-priority')
)
expression = F('priority') + 1
# Put an imposible priority to avoid
# the unique constraint (project, priority)
# while updating.
self.priority = total + 99
self.save()
# We update each object one by one to
# avoid hitting the unique constraint (project, priority).
for rule in rules:
rule.priority = expression
rule.save()
# Put back new priority
self.priority = new_priority
self.save()
return True
def delete(self, *args, **kwargs): # pylint: disable=arguments-differ
"""Override method to update the other priorities after delete."""
current_priority = self.priority
project = self.project
super().delete(*args, **kwargs)
rules = (
project.automation_rules
.filter(priority__gte=current_priority)
# We sort the queryset in asc order
# to be updated in that order
# to avoid hitting the unique constraint (project, priority).
.order_by('priority')
)
# We update each object one by one to
# avoid hitting the unique constraint (project, priority).
for rule in rules:
rule.priority = F('priority') - 1
rule.save()
def get_description(self):
if self.description:
return self.description
return f'{self.get_action_display()}'
def get_edit_url(self):
raise NotImplementedError
def __str__(self):
class_name = self.__class__.__name__
return (
f'({self.priority}) '
f'{class_name}/{self.get_action_display()} '
f'for {self.project.slug}:{self.get_version_type_display()}'
)
class RegexAutomationRule(VersionAutomationRule):
TIMEOUT = 1 # timeout in seconds
allowed_actions = {
VersionAutomationRule.ACTIVATE_VERSION_ACTION: actions.activate_version,
VersionAutomationRule.SET_DEFAULT_VERSION_ACTION: actions.set_default_version,
}
class Meta:
proxy = True
def match(self, version, match_arg):
"""
Find a match using regex.search.
.. note::
We use the regex module with the timeout
arg to avoid ReDoS.
We could use a finite state machine type of regex too,
but there isn't a stable library at the time of writting this code.
"""
try:
match = regex.search(
match_arg,
version.verbose_name,
# Compatible with the re module
flags=regex.VERSION0,
timeout=self.TIMEOUT,
)
return bool(match), match
except TimeoutError:
log.warning(
'Timeout while parsing regex. pattern=%s, input=%s',
match_arg, version.verbose_name,
)
except Exception as e:
log.info('Error parsing regex: %s', e)
return False, None
def get_edit_url(self):
return reverse(
'projects_automation_rule_regex_edit',
args=[self.project.slug, self.pk],
)
| 31.736885 | 90 | 0.584545 |
2a06ca4b909d5dc2b9b674bcec3bbf7873e7812f
| 2,831 |
py
|
Python
|
tools/build/v2/test/build_dir.py
|
juslee/boost-svn
|
6d5a03c1f5ed3e2b23bd0f3ad98d13ff33d4dcbb
|
[
"BSL-1.0"
] | 1 |
2018-12-15T19:55:56.000Z
|
2018-12-15T19:55:56.000Z
|
tools/build/v2/test/build_dir.py
|
smart-make/boost
|
46509a094f8a844eefd5bb8a0030b739a04d79e1
|
[
"BSL-1.0"
] | null | null | null |
tools/build/v2/test/build_dir.py
|
smart-make/boost
|
46509a094f8a844eefd5bb8a0030b739a04d79e1
|
[
"BSL-1.0"
] | null | null | null |
#!/usr/bin/python
# Copyright 2003 Dave Abrahams
# Copyright 2002, 2003, 2005 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Test that we can change build directory using the 'build-dir' project
# attribute.
import BoostBuild
import string
import os
t = BoostBuild.Tester()
# Test that top-level project can affect build dir.
t.write("jamroot.jam", "import gcc ;")
t.write("jamfile.jam", """\
project : build-dir build ;
exe a : a.cpp ;
build-project src ;
""")
t.write("a.cpp", "int main() {}\n")
t.write("src/jamfile.jam", "exe b : b.cpp ; ")
t.write("src/b.cpp", "int main() {}\n")
t.run_build_system()
t.expect_addition(["build/$toolset/debug/a.exe",
"build/src/$toolset/debug/b.exe"])
# Test that building from child projects work.
t.run_build_system(subdir='src')
t.ignore("build/config.log")
t.expect_nothing_more()
# Test that project can override build dir.
t.write("jamfile.jam", """\
exe a : a.cpp ;
build-project src ;
""")
t.write("src/jamfile.jam", """\
project : build-dir build ;
exe b : b.cpp ;
""")
t.run_build_system()
t.expect_addition(["bin/$toolset/debug/a.exe",
"src/build/$toolset/debug/b.exe"])
# Now test the '--build-dir' option.
t.rm(".")
t.write("jamroot.jam", "")
# Test that we get an error when no project id is specified.
t.run_build_system(["--build-dir=foo"])
t.fail_test(string.find(t.stdout(),
"warning: the --build-dir option will be ignored") == -1)
t.write("jamroot.jam", """\
project foo ;
exe a : a.cpp ;
build-project sub ;
""")
t.write("a.cpp", "int main() {}\n")
t.write("sub/jamfile.jam", "exe b : b.cpp ;\n")
t.write("sub/b.cpp", "int main() {}\n")
t.run_build_system(["--build-dir=build"])
t.expect_addition(["build/foo/$toolset/debug/a.exe",
"build/foo/sub/$toolset/debug/b.exe"])
t.write("jamroot.jam", """\
project foo : build-dir bin.v2 ;
exe a : a.cpp ;
build-project sub ;
""")
t.run_build_system(["--build-dir=build"])
t.expect_addition(["build/foo/bin.v2/$toolset/debug/a.exe",
"build/foo/bin.v2/sub/$toolset/debug/b.exe"])
# Try building in subdir. We expect that the entire build tree with be in
# 'sub/build'. Today, I am not sure if this is what the user expects, but let
# it be.
t.rm('build')
t.run_build_system(["--build-dir=build"], subdir="sub")
t.expect_addition(["sub/build/foo/bin.v2/sub/$toolset/debug/b.exe"])
t.write("jamroot.jam", """\
project foo : build-dir %s ;
exe a : a.cpp ;
build-project sub ;
""" % string.replace(os.getcwd(), '\\', '\\\\'))
t.run_build_system(["--build-dir=build"], status=1)
t.fail_test(string.find(t.stdout(),
"Absolute directory specified via 'build-dir' project attribute") == -1)
t.cleanup()
| 26.457944 | 81 | 0.653833 |
f75a05cfb70187274d9492393de25937bbfe2bc0
| 12,228 |
py
|
Python
|
zaza/openstack/utilities/openstack_upgrade.py
|
freyes/zaza-openstack-tests
|
c9834315f996966aaedd95d712a991df7a449eb8
|
[
"ECL-2.0",
"Apache-2.0"
] | 5 |
2019-08-09T02:39:12.000Z
|
2021-05-18T14:19:51.000Z
|
zaza/openstack/utilities/openstack_upgrade.py
|
freyes/zaza-openstack-tests
|
c9834315f996966aaedd95d712a991df7a449eb8
|
[
"ECL-2.0",
"Apache-2.0"
] | 350 |
2019-05-13T10:28:33.000Z
|
2022-03-30T13:35:16.000Z
|
zaza/openstack/utilities/openstack_upgrade.py
|
freyes/zaza-openstack-tests
|
c9834315f996966aaedd95d712a991df7a449eb8
|
[
"ECL-2.0",
"Apache-2.0"
] | 72 |
2019-04-18T06:05:01.000Z
|
2022-03-29T05:41:40.000Z
|
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for performing OpenStack upgrades.
This module contains a number of functions for upgrading OpenStack.
"""
import logging
import zaza.openstack.utilities.juju as juju_utils
import zaza.model
from zaza import sync_wrapper
from zaza.openstack.utilities.upgrade_utils import (
get_upgrade_groups,
)
async def async_pause_units(units, model_name=None):
"""Pause all units in unit list.
Pause all units in unit list. Wait for pause action
to complete.
:param units: List of unit names.
:type units: []
:param model_name: Name of model to query.
:type model_name: str
:rtype: juju.action.Action
:raises: zaza.model.ActionFailed
"""
logging.info("Pausing {}".format(', '.join(units)))
await zaza.model.async_run_action_on_units(
units,
'pause',
model_name=model_name,
raise_on_failure=True)
pause_units = sync_wrapper(async_pause_units)
async def async_resume_units(units, model_name=None):
"""Resume all units in unit list.
Resume all units in unit list. Wait for resume action
to complete.
:param units: List of unit names.
:type units: []
:param model_name: Name of model to query.
:type model_name: str
:rtype: juju.action.Action
:raises: zaza.model.ActionFailed
"""
logging.info("Resuming {}".format(', '.join(units)))
await zaza.model.async_run_action_on_units(
units,
'resume',
model_name=model_name,
raise_on_failure=True)
resume_units = sync_wrapper(async_resume_units)
async def async_action_unit_upgrade(units, model_name=None):
"""Run openstack-upgrade on all units in unit list.
Upgrade payload on all units in unit list. Wait for action
to complete.
:param units: List of unit names.
:type units: []
:param model_name: Name of model to query.
:type model_name: str
:rtype: juju.action.Action
:raises: zaza.model.ActionFailed
"""
logging.info("Upgrading {}".format(', '.join(units)))
await zaza.model.async_run_action_on_units(
units,
'openstack-upgrade',
model_name=model_name,
raise_on_failure=True)
action_unit_upgrade = sync_wrapper(async_action_unit_upgrade)
def action_upgrade_apps(applications, model_name=None):
"""Upgrade units in the applications using action managed upgrades.
Upgrade all units of the given applications using action managed upgrades.
This involves the following process:
1) Take a unit from each application which has not been upgraded yet.
2) Pause all hacluster units assocaiated with units to be upgraded.
3) Pause target units.
4) Upgrade target units.
5) Resume target units.
6) Resume hacluster units paused in step 2.
7) Repeat until all units are upgraded.
:param applications: List of application names.
:type applications: []
:param model_name: Name of model to query.
:type model_name: str
"""
status = zaza.model.get_status(model_name=model_name)
done = []
while True:
target = []
for app in applications:
for unit in zaza.model.get_units(app, model_name=model_name):
if unit.entity_id not in done:
target.append(unit.entity_id)
break
else:
logging.info("All units of {} upgraded".format(app))
if not target:
break
hacluster_units = juju_utils.get_subordinate_units(
target,
'hacluster',
status=status,
model_name=model_name)
# NOTE(lourot): we're more likely to time out while waiting for the
# action's result if we launch an action while the model is still
# executing. Thus it's safer to wait for the model to settle between
# actions.
zaza.model.block_until_all_units_idle(model_name)
pause_units(hacluster_units, model_name=model_name)
zaza.model.block_until_all_units_idle(model_name)
pause_units(target, model_name=model_name)
zaza.model.block_until_all_units_idle(model_name)
action_unit_upgrade(target, model_name=model_name)
zaza.model.block_until_all_units_idle(model_name)
resume_units(target, model_name=model_name)
zaza.model.block_until_all_units_idle(model_name)
resume_units(hacluster_units, model_name=model_name)
done.extend(target)
# Ensure that mysql-innodb-cluster has at least one R/W group (it can get
# into a state where all are R/O whilst it is sorting itself out after an
# openstack_upgrade
if "mysql-innodb-cluster" in applications:
block_until_mysql_innodb_cluster_has_rw(model_name)
# Now we need to wait for the model to go back to idle.
zaza.model.block_until_all_units_idle(model_name)
async def async_block_until_mysql_innodb_cluster_has_rw(model=None,
timeout=None):
"""Block until the mysql-innodb-cluster is in a healthy state.
Curiously, after a series of pauses and restarts (e.g. during an upgrade)
the mysql-innodb-cluster charms may not yet have agreed which one is the
R/W node; i.e. they are all R/O. Anyway, eventually they sort it out and
one jumps to the front and says "it's me!". This is detected, externally,
by the status line including R/W in the output.
This function blocks until that happens so that no charm attempts to have a
chat with the mysql server before it has settled, thus breaking the whole
test.
"""
async def async_check_workload_messages_for_rw(model=None):
"""Return True if a least one work message contains R/W."""
status = await zaza.model.async_get_status()
app_status = status.applications.get("mysql-innodb-cluster")
units_data = app_status.units.values()
workload_statuses = [d.workload_status.info for d in units_data]
return any("R/W" in s for s in workload_statuses)
await zaza.model.async_block_until(async_check_workload_messages_for_rw,
timeout=timeout)
block_until_mysql_innodb_cluster_has_rw = sync_wrapper(
async_block_until_mysql_innodb_cluster_has_rw)
def set_upgrade_application_config(applications, new_source,
action_managed=True, model_name=None):
"""Set the charm config for upgrade.
Set the charm config for upgrade.
:param applications: List of application names.
:type applications: List[str]
:param new_source: New package origin.
:type new_source: str
:param action_managed: Whether to set action-managed-upgrade config option.
:type action_managed: bool
:param model_name: Name of model to query.
:type model_name: str
"""
for app in applications:
src_option = 'openstack-origin'
charm_options = zaza.model.get_application_config(
app, model_name=model_name)
try:
charm_options[src_option]
except KeyError:
src_option = 'source'
config = {
src_option: new_source}
if action_managed:
config['action-managed-upgrade'] = 'True'
logging.info("Setting config for {} to {}".format(app, config))
zaza.model.set_application_config(
app,
config,
model_name=model_name)
def is_action_upgradable(app, model_name=None):
"""Can application be upgraded using action managed upgrade method.
:param app: The application to check
:type app: str
:param model_name: Name of model to query.
:type model_name: str
:returns: Whether app be upgraded using action managed upgrade method.
:rtype: bool
"""
config = zaza.model.get_application_config(app, model_name=model_name)
try:
config['action-managed-upgrade']
supported = True
except KeyError:
supported = False
return supported
def is_already_upgraded(app, new_src, model_name=None):
"""Return True if the app has already been upgraded.
:param app: The application to check
:type app: str
:param new_src: the new source (distro, cloud:x-y, etc.)
:type new_src: str
:param model_name: Name of model to query.
:type model_name: str
:returns: Whether app be upgraded using action managed upgrade method.
:rtype: bool
"""
config = zaza.model.get_application_config(app, model_name=model_name)
try:
src = config['openstack-origin']['value']
key_was = 'openstack-origin'
except KeyError:
src = config['source']['value']
key_was = 'source'
logging.info("origin for {} is {}={}".format(app, key_was, src))
return src == new_src
def run_action_upgrades(apps, new_source, model_name=None):
"""Upgrade payload of all applications in group using action upgrades.
:param apps: List of applications to upgrade.
:type apps: List[str]
:param new_source: New package origin.
:type new_source: str
:param model_name: Name of model to query.
:type model_name: str
"""
set_upgrade_application_config(apps, new_source, model_name=model_name)
action_upgrade_apps(apps, model_name=model_name)
def run_all_in_one_upgrades(apps, new_source, model_name=None):
"""Upgrade payload of all applications in group using all-in-one method.
:param apps: List of applications to upgrade.
:type apps: List[str]
:source: New package origin.
:type new_source: str
:param model_name: Name of model to query.
:type model_name: str
"""
set_upgrade_application_config(
apps,
new_source,
model_name=model_name,
action_managed=False)
zaza.model.block_until_all_units_idle()
def run_upgrade_on_apps(apps, new_source, model_name=None):
"""Upgrade payload of all applications in group.
Upgrade apps using action managed upgrades where possible and fallback to
all_in_one method.
:param apps: List of applications to upgrade.
:type apps: []
:param new_source: New package origin.
:type new_source: str
:param model_name: Name of model to query.
:type model_name: str
"""
action_upgrades = []
all_in_one_upgrades = []
for app in apps:
if is_already_upgraded(app, new_source, model_name=model_name):
logging.info("Application '%s' is already upgraded. Skipping.",
app)
continue
if is_action_upgradable(app, model_name=model_name):
action_upgrades.append(app)
else:
all_in_one_upgrades.append(app)
if all_in_one_upgrades:
run_all_in_one_upgrades(
all_in_one_upgrades,
new_source,
model_name=model_name)
if action_upgrades:
run_action_upgrades(
action_upgrades,
new_source,
model_name=model_name)
def run_upgrade_tests(new_source, model_name=None):
"""Upgrade payload of all applications in model.
This the most basic upgrade test. It should be adapted to add/remove
elements from the environment and add tests at intermediate stages.
:param new_source: New package origin.
:type new_source: str
:param model_name: Name of model to query.
:type model_name: str
"""
groups = get_upgrade_groups(model_name=model_name)
for name, apps in groups:
logging.info("Performing upgrade of %s", name)
run_upgrade_on_apps(apps, new_source, model_name=model_name)
| 34.44507 | 79 | 0.680651 |
431acfda8e8b6596b6264025d1cf502a925de516
| 13,924 |
py
|
Python
|
src/pytorch-project/siamese/siamese_baseline.py
|
kaderghal/ADNI_Data_processing
|
454462d3913d77e3bc4de2b9725b456301c7b351
|
[
"MIT"
] | 5 |
2021-01-07T10:11:57.000Z
|
2022-01-16T04:57:51.000Z
|
src/pytorch-project/siamese/siamese_baseline.py
|
kaderghal/ADNI_Data_processing
|
454462d3913d77e3bc4de2b9725b456301c7b351
|
[
"MIT"
] | null | null | null |
src/pytorch-project/siamese/siamese_baseline.py
|
kaderghal/ADNI_Data_processing
|
454462d3913d77e3bc4de2b9725b456301c7b351
|
[
"MIT"
] | 1 |
2021-08-05T07:34:16.000Z
|
2021-08-05T07:34:16.000Z
|
import os
import sys
import errno
import random
import pickle
import numpy as np
from PIL import Image
import torch
import torchvision
from torch.utils.data.dataset import Dataset
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import BatchSampler
from torchvision.datasets import DatasetFolder
from torchvision import transforms
import torch.nn.functional as F
from torch import nn
from torch import optim
from torchsummary import summary
import matplotlib.pyplot as plt
import torch.optim as optim
###############################################################################################################
# server
###############################################################################################################
# sys.path.append('/data/ADERGHAL/code-source/ADNI_Data_processing/src/data_processing/')
# root_path = '/data/ADERGHAL/ADNI_workspace/results/ADNI_des/F_28P_F10_MS2_MB10D/HIPP/3D/AD-NC/'
###############################################################################################################
# HP computer
###############################################################################################################
sys.path.append('/home/karim/workspace/vscode-python/ADNI_Data_processing/src/data_processing')
root_path = '/home/karim/workspace/ADNI_workspace/results/ADNI_des/F_28P_F10_MS2_MB10D/HIPP/3D/AD-NC/'
ADNI_MODEL_EXTENSIONS = ('.pkl')
# 1 pickle loader (load one sample)
def pickle_loader(path_file):
dir_name = os.path.dirname(path_file)
with open(path_file, 'rb') as f:
model_adni = pickle.load(f)
return model_adni
# to check if the file type is allowed
def has_file_allowed_extension(filename, extensions):
return filename.lower().endswith(extensions)
def is_image_file(filename):
return has_file_allowed_extension(filename, ADNI_MODEL_EXTENSIONS)
# function
def make_dataset(dir, class_to_idx, extensions=None, is_valid_file=None):
images = []
dir = os.path.expanduser(dir)
if not ((extensions is None) ^ (is_valid_file is None)):
raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
if extensions is not None:
def is_valid_file(x):
return has_file_allowed_extension(x, extensions)
for target in sorted(class_to_idx.keys()):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
path = os.path.join(root, fname)
if is_valid_file(path):
item = (path, class_to_idx[target])
images.append(item)
return images
# 2 Class Datafolder
class Dataset_ADNI_Folder(DatasetFolder):
# Methodes
def __init__(self, root, loader, extensions=None, transform=None, target_transform=None, is_valid_file=None):
self.root = root
classes, class_to_idx = self._find_classes(self.root)
samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file)
if len(samples) == 0:
raise (RuntimeError("Found 0 files in subfolders of: " + self.root + "\n"
"Supported extensions are: " + ",".join(extensions)))
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.transform = transforms.Compose([transforms.ToTensor()])
self.targets = [s[1] for s in samples]
# __getitem__
def __getitem__(self, index):
path, target = self.samples[index]
sample = self.loader(path)
# if self.transform is not None:
# sample = self.transform(sample)
# if self.target_transform is not None:
# target = self.target_transform(target)
# sample is objet instance from HippModel (L, R, V, Label)
return (sample.hippLeft, sample.hippRight, sample.hippMetaDataVector, target)
# __len__
def __len__(self):
return len(self.samples)
# _find_classes
def _find_classes(self, dir):
if sys.version_info >= (3, 5):
# Faster and available in Python 3.5 and above
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
else:
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
# one stream network
class OneStreamNet(nn.Module):
def __init__(self):
super(OneStreamNet, self).__init__()
self.conv1 = nn.Conv3d(1, 32, kernel_size=3 ,stride=1, padding=0)
self.conv2 = nn.Conv3d(32, 64, kernel_size=3 ,stride=1, padding=0)
self.pool1 = nn.MaxPool3d(kernel_size=(3,3,3), stride=2, padding=0)
self.pool2 = nn.MaxPool3d(kernel_size=(3,3,3), stride=2, padding=0)
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
# Defining the fully connected layers
self.fc1 = nn.Linear(30000, 1024)
self.fc2 = nn.Linear(1024, 2)
def forward(self, x):
# x = x.view(32,28,28,28)
# x = x.view(x.size(0), -1)
x = self.conv1(x)
x = self.pool1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.pool2(x)
x = self.relu2(x)
x = self.fc1(x)
x = self.fc2(x)
return x
# 3D HIPP
class HIPP3D(nn.Module):
def __init__(self):
super(HIPP3D, self).__init__()
self.conv3d1 = nn.Conv3d(1, 32, kernel_size=(4,4,4), stride=1, padding=1)
self.conv3d2 = nn.Conv3d(32, 64, kernel_size=(2,2,2), stride=1, padding=0)
self.fc1 = nn.Linear(64*7*7*7, 120)
# added by me
self.dropout = nn.Dropout(0.5)
self.fc2 = nn.Linear(120, 2)
def forward(self, x):
x = F.max_pool3d(F.relu(self.conv3d1(x)), kernel_size=(3,3,3), stride=2, padding=0)
x = F.max_pool3d(F.relu(self.conv3d2(x)), kernel_size=(2,2,2), stride=2, padding=1)
x = x.view(-1, self.num_flat_features(x))
# x = self.dropout(F.relu(self.fc1(x)))
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def num_flat_features(self, x):
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features
# Siamese 3D HIPP
class SiameseHipp3D(nn.Module):
# init
def __init__(self):
super(SiameseHipp3D, self).__init__()
self.conv3d1 = nn.Conv3d(1, 32, kernel_size=(4,4,4), stride=1, padding=1)
self.conv3d2 = nn.Conv3d(32, 64, kernel_size=(2,2,2), stride=1, padding=0)
self.fc1 = nn.Linear(64*7*7*7, 120)
# added by me
self.dropout = nn.Dropout(0.5)
self.fc2 = nn.Linear(120, 40)
self.fc3 = nn.Linear(40, 12)
# concatenate
self.fc_conc = nn.Linear(24,2)
# forward_once
def forward_once(self, x):
x = F.max_pool3d(F.relu(self.conv3d1(x)), kernel_size=(3,3,3), stride=2, padding=0)
x = F.max_pool3d(F.relu(self.conv3d2(x)), kernel_size=(2,2,2), stride=2, padding=1)
x = x.view(-1, self.num_flat_features(x))
# x = self.dropout(F.relu(self.fc1(x)))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# forward siamse
def forward(self, x_1, x_2):
o_1 = self.forward_once(x_1)
o_2 = self.forward_once(x_2)
y = torch.cat((o_1, o_2), dim=1)
# print("y (4):", y[1])
y = F.relu(self.fc_conc(y))
# print("y (2):", y[1])
return y
def num_flat_features(self, x):
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features
# Train function
def train(model, device, train_loader, epoch, optimizer):
pass
# Test function
def test(model, device, test_loader):
pass
#==========================================================================
# Function: Main definition
#==========================================================================
def main():
# parames for data
params_num_workers = 4
batch_size = 64
num_classes = 2
save_frequency = 2
learning_rate = 0.0001
num_epochs = 1
weight_decay = 0.0001
steps = 0
train_losses, test_losses = [], []
running_loss = 0
print_every = 10
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # PyTorch v0.4.0
print("using device :", device)
model = SiameseHipp3D().to(device)
# DataFolder
train_data = Dataset_ADNI_Folder(root=root_path + 'train/', loader=pickle_loader, extensions='.pkl', transform=None)
valid_data = Dataset_ADNI_Folder(root=root_path + 'valid/', loader=pickle_loader, extensions='.pkl', transform=None)
test_data = Dataset_ADNI_Folder(root=root_path + 'test/' , loader=pickle_loader, extensions='.pkl', transform=None)
# Dataloader
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers)
valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers)
# net = SiameseHipp3D()
# summary(model, (1, 28, 28, 28))
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)
# Train the model
total_step = len(train_loader)
loss_list = []
acc_list = []
running_loss = 0.0
for epoch in range(num_epochs):
for i, (d1, d2, v, labels) in enumerate(train_loader):
# print(i)
# zero the parameter gradients
optimizer.zero_grad()
# # forward + backward + optimize
d1 = torch.unsqueeze(d1, 1).to(device, dtype=torch.float)
d2 = torch.unsqueeze(d2, 1).to(device, dtype=torch.float)
labels = labels.to(device)
outputs = model(d1, d2)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# Track the accuracy
total = labels.size(0)
_, predicted = torch.max(outputs.data, 1)
correct = (predicted == labels).sum().item()
acc_list.append(correct / total)
if (i + 1) % 10 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Accuracy: {:.2f}%'
.format(epoch + 1, num_epochs, i + 1, total_step, loss.item(), (correct / total) * 100))
# # print statistics
# running_loss += loss.item()
# if i % 2000 == 1999: # print every 2000 mini-batches
# print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000))
# running_loss = 0.0
print('Finished Training')
# # Track the accuracy
# total = labels.size(0)
# _, predicted = torch.max(outputs.data, 1)
# correct = (predicted == labels).sum().item()
# acc_list.append(correct / total)
# if (i + 1) % 10 == 0:
# print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Accuracy: {:.2f}%'
# .format(epoch + 1, num_epochs, i + 1, total_step, loss.item(), (correct / total) * 100))
# # print statistics
# running_loss += loss.item()
# if i % 2000 == 1999: # print every 2000 mini-batches
# print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000))
# running_loss = 0.0
print('Finished Training')
# for i, (d1, d2, v, labels) in enumerate(train_loader):
# print(i)
# # Run the forward pass
# d1 = torch.unsqueeze(d1, 0).to(device, dtype=torch.float)
# outputs = model(d1)
# loss = criterion(outputs, labels)
# loss_list.append(loss.item())
# # Backprop and perform Adam optimisation
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# # Track the accuracy
# total = labels.size(0)
# _, predicted = torch.max(outputs.data, 1)
# correct = (predicted == labels).sum().item()
# acc_list.append(correct / total)
# if (i + 1) % 100 == 0:
# print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Accuracy: {:.2f}%'
# .format(epoch + 1, num_epochs, i + 1, total_step, loss.item(),
# (correct / total) * 100))
# model = OneStreamNet().to(device)
# summary(model, (1, 28, 28, 28))
# index = 0
# for d1, d2, v, labels in valid_loader:
# # print("key: {} : Left {} : Right {} : Vect {} : label {}".format(index, d1.size(), d2.size(), v, labels.size()))
# print("key: {} - Left {} : Right {} - Vect {} : label {}".format(index, d1.size(), d2.size(), len(v), labels.size()))
# index+= 1
#==========================================================================
# Start : __Main__
#==========================================================================
if __name__ == '__main__':
main()
| 30.669604 | 127 | 0.547328 |
c0a66e9df2fd8a4d9079af4c55679416055b602d
| 4,396 |
py
|
Python
|
tests/test_dataloaders.py
|
McMasterAI/RadiologyandAI-MedicalZooPytorch
|
606a1654f08b8bae7c265608694d55fecc1001ed
|
[
"MIT"
] | 995 |
2019-07-23T11:34:22.000Z
|
2022-03-30T21:10:52.000Z
|
tests/test_dataloaders.py
|
pyushkevich/MedicalZooPytorch
|
c6831d8ddebfbc1b33c04f8cec0d01c2ceb828f6
|
[
"MIT"
] | 18 |
2020-04-27T03:38:22.000Z
|
2022-01-18T20:55:20.000Z
|
tests/test_dataloaders.py
|
pyushkevich/MedicalZooPytorch
|
c6831d8ddebfbc1b33c04f8cec0d01c2ceb828f6
|
[
"MIT"
] | 209 |
2019-08-21T13:41:13.000Z
|
2022-03-30T08:01:52.000Z
|
# Python libraries
import argparse
# Lib files
import lib.utils as utils
import lib.medloaders as medical_loaders
class TestDataLoaders:
def __init__(self, batch=1, dim=64, classes=10):
self.batch = batch
self.dim = dim
self.classes = classes
self.binary_classes = 2
self.args = self.get_arguments()
def MRBRAINS_4_class(self):
self.args.dataset_name = "mrbrains"
training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(self.args,
path='.././datasets')
print("mrbrains 4 OK!", len(training_generator), len(val_generator))
def MRBRAINS_9_class(self):
self.args.classes = 9
training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(self.args,
path='.././datasets')
print("mrbrains 8 OK!", len(training_generator), len(val_generator))
def ISEG2017(self):
self.args.inChannels = 2
self.args.inModalities = 2
self.args.dataset_name = "iseg2017"
training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(self.args,
path='.././datasets')
print("iseg OK! ", len(training_generator), len(val_generator))
def brats2018(self):
self.args.inChannels = 4
self.args.inModalities = 4
self.args.classes = 5
self.args.dataset_name = "brats2018"
training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(self.args,
path='.././datasets')
print("brats2018 OK!", len(training_generator), len(val_generator))
def miccai2019(self):
self.args.dim = (64, 64)
self.args.inChannels = 3
self.args.inModalities = 1
self.args.classes = 7
self.args.dataset_name = "miccai2019"
training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(self.args,
path='.././datasets')
print("miccai2019 OK!", len(training_generator), len(val_generator))
def ixi(self):
self.args.inChannels = 2
self.args.inModalities = 2
self.args.dim = (1, 1, 1)
self.args.dataset_name = "ixi"
generator, affine = medical_loaders.generate_datasets(self.args, path='.././datasets')
print("ixi OK!", len(generator))
def get_arguments(self):
parser = argparse.ArgumentParser()
parser.add_argument('--batchSz', type=int, default=1)
parser.add_argument('--dataset_name', type=str, default="mrbrains")
parser.add_argument('--dim', nargs="+", type=int, default=(16, 16, 16))
parser.add_argument('--nEpochs', type=int, default=300)
parser.add_argument('--inChannels', type=int, default=3)
parser.add_argument('--inModalities', type=int, default=3)
parser.add_argument('--samples_train', type=int, default=10)
parser.add_argument('--samples_val', type=int, default=10)
parser.add_argument('--classes', type=int, default=4)
parser.add_argument('--fold_id', default='1', type=str, help='Select subject for fold validation')
parser.add_argument('--lr', default=1e-3, type=float,
help='learning rate (default: 1e-3)')
parser.add_argument('--cuda', action='store_true', default=False)
parser.add_argument('--model', type=str, default='UNET3D',
choices=('VNET', 'VNET2', 'UNET3D', 'DENSENET1', 'DENSENET2', 'DENSENET3', 'HYPERDENSENET'))
parser.add_argument('--opt', type=str, default='sgd',
choices=('sgd', 'adam', 'rmsprop'))
args = parser.parse_args()
return args
test_obj = TestDataLoaders(batch=1, dim=64, classes=10)
test_obj.MRBRAINS_4_class()
test_obj.MRBRAINS_9_class()
test_obj.ISEG2017()
test_obj.brats2018()
test_obj.miccai2019()
# test_obj.ixi()
| 43.524752 | 120 | 0.579618 |
030b11c9438dfe15669ab07d72fabd0d755d1ee8
| 3,540 |
py
|
Python
|
bindings/python/ensmallen/datasets/string/vavraiaculicissubspfloridensis.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5 |
2021-02-17T00:44:45.000Z
|
2021-08-09T16:41:47.000Z
|
bindings/python/ensmallen/datasets/string/vavraiaculicissubspfloridensis.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18 |
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/vavraiaculicissubspfloridensis.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3 |
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Vavraia culicis subsp. floridensis.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def VavraiaCulicisSubspFloridensis(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Vavraia culicis subsp. floridensis graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Vavraia culicis subsp. floridensis graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="VavraiaCulicisSubspFloridensis",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.714286 | 223 | 0.681356 |
392c01cc9ccc3844d7865b0fa269068bd077b337
| 9,338 |
py
|
Python
|
sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/aio/operations/_sql_pool_schemas_operations.py
|
RAY-316/azure-sdk-for-python
|
4f7790deaf46c6f4e965f099f36eb73a7954ad5b
|
[
"MIT"
] | 2 |
2021-03-24T06:26:11.000Z
|
2021-04-18T15:55:59.000Z
|
sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/aio/operations/_sql_pool_schemas_operations.py
|
RSidea/azure-sdk-for-python
|
8f691b2c95ee0fc53b12d08bd83e3f134d9cf0ef
|
[
"MIT"
] | null | null | null |
sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/aio/operations/_sql_pool_schemas_operations.py
|
RSidea/azure-sdk-for-python
|
8f691b2c95ee0fc53b12d08bd83e3f134d9cf0ef
|
[
"MIT"
] | 1 |
2021-12-18T20:01:22.000Z
|
2021-12-18T20:01:22.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SqlPoolSchemasOperations:
"""SqlPoolSchemasOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
filter: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.SqlPoolSchemaListResult"]:
"""Gets schemas of a given SQL pool.
Gets schemas of a given SQL pool.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:param filter: An OData filter expression that filters elements in the collection.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SqlPoolSchemaListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.synapse.models.SqlPoolSchemaListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlPoolSchemaListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'sqlPoolName': self._serialize.url("sql_pool_name", sql_pool_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SqlPoolSchemaListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/schemas'} # type: ignore
async def get(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
schema_name: str,
**kwargs
) -> "_models.Resource":
"""Get Sql Pool schema.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:param schema_name: The name of the schema.
:type schema_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Resource, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.Resource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Resource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'sqlPoolName': self._serialize.url("sql_pool_name", sql_pool_name, 'str'),
'schemaName': self._serialize.url("schema_name", schema_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Resource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/schemas/{schemaName}'} # type: ignore
| 48.134021 | 211 | 0.658171 |
12e5dddd5dedcc55c1d4e66dba281afe2f3aa92b
| 2,841 |
py
|
Python
|
TrialPathfinder/shapley_computation.py
|
cshukai/TrialPathfinder
|
c46b7652171f5abb535d06c3a0276e6b891bae02
|
[
"MIT"
] | 27 |
2021-04-07T16:53:10.000Z
|
2022-03-02T19:55:49.000Z
|
TrialPathfinder/shapley_computation.py
|
AprilCCC/TrialPathfinder
|
61649369b99e7d43afdadebf4c36753b62010a7f
|
[
"MIT"
] | null | null | null |
TrialPathfinder/shapley_computation.py
|
AprilCCC/TrialPathfinder
|
61649369b99e7d43afdadebf4c36753b62010a7f
|
[
"MIT"
] | 13 |
2021-04-20T22:26:10.000Z
|
2022-03-29T17:18:04.000Z
|
from .utils import *
def shapley_computation(cohort, features, drug_treatment, drug_control, name_rules, tolerance=0.001, iter_max=1000, covariates_cont=[], covariates_cat=[], thresh_censor=None, name_DrugName='DrugName', name_StartDate='StartDate', name_OutcomeDate='OutcomeDate', name_LastVisitDate='LastVisitDate', indicator_miss='Missing', random_seed=1001, verbose=0):
'''
Emulate trial
Return HR, confidence interval, data fit for the cox model
'''
np.random.seed(random_seed)
def get_HR(name_rules, data_survival):
'''Return HR given criteria and survival data'''
data_cox = generate_trial_cox(cohort, data_survival, drug_treatment, drug_control,
name_rules=name_rules, name_DrugName=name_DrugName,
covariates_cont=covariates_cont, covariates_cat=covariates_cat)
HR, _ = cox(data_cox)
return HR
def compute_SEM(dHRs):
'''Compute the standard error of the Monte Carlo mean'''
dHRs = np.array(dHRs)
SEM = np.mean([np.std(dHRs[:, i])/np.sqrt(dHRs.shape[0]) for i in range(dHRs.shape[1])])
return SEM
# Generate survival information.
data_survival = generate_survival_data(features.copy(), cohort.name_PatientID,
covariates=covariates_cont+covariates_cat, thresh_censor=thresh_censor,
name_DrugName=name_DrugName, name_StartDate=name_StartDate,
name_OutcomeDate=name_OutcomeDate, name_LastVisitDate=name_LastVisitDate,
indicator_miss=indicator_miss)
# HR for Empty set and full set
HR_empty = get_HR([], data_survival)
HR_full = get_HR(name_rules, data_survival)
dHRs = []
# Shapley Computation
n_rules = len(name_rules)
name_rules = np.array(name_rules)
for m in range(iter_max):
dHR = np.zeros([n_rules])
idx = np.random.permutation(n_rules)
HRs = [HR_empty]
for i_rule in range(1, n_rules):
name_rules_subset = name_rules[idx][:i_rule]
HR = get_HR(name_rules_subset, data_survival)
HRs.append(HR)
HRs.append(HR_full)
dHR[idx] = np.array([HRs[i]-HRs[i-1] for i in range(1, len(HRs))])
dHRs.append(dHR)
# Convergence checking
SEM = compute_SEM(dHRs)
if verbose:
print('Shapley Computation Iteration %d | SEM = %.4f' % (m, SEM))
if (m>0) and (SEM < tolerance):
print('Stopping criteria satisfied!')
break
if m == (iter_max-1):
print('Maximum iteration reached!')
shapley_value = np.mean(dHRs, axis=0)
return shapley_value
| 45.822581 | 352 | 0.611756 |
ad395a27912ae38cf68c862babe432a3c0548dfa
| 16,349 |
py
|
Python
|
third_party/logilab/astroid/brain/brain_stdlib.py
|
stdft112/depot_tools
|
52c7211807930272424213ff6127c209de790eca
|
[
"BSD-3-Clause"
] | 35 |
2016-09-22T22:53:14.000Z
|
2020-02-13T15:12:21.000Z
|
third_party/logilab/astroid/brain/brain_stdlib.py
|
stdft112/depot_tools
|
52c7211807930272424213ff6127c209de790eca
|
[
"BSD-3-Clause"
] | 28 |
2020-03-04T22:01:48.000Z
|
2022-03-12T00:59:47.000Z
|
third_party/logilab/astroid/brain/brain_stdlib.py
|
stdft112/depot_tools
|
52c7211807930272424213ff6127c209de790eca
|
[
"BSD-3-Clause"
] | 88 |
2016-11-27T02:16:11.000Z
|
2020-02-28T05:10:26.000Z
|
"""Astroid hooks for the Python 2 standard library.
Currently help understanding of :
* hashlib.md5 and hashlib.sha1
"""
import functools
import sys
from textwrap import dedent
from astroid import (
MANAGER, UseInferenceDefault, inference_tip, BoundMethod,
InferenceError, register_module_extender)
from astroid import exceptions
from astroid import nodes
from astroid.builder import AstroidBuilder
from astroid import util
from astroid import test_utils
PY3K = sys.version_info > (3, 0)
PY33 = sys.version_info >= (3, 3)
PY34 = sys.version_info >= (3, 4)
# general function
def infer_func_form(node, base_type, context=None, enum=False):
"""Specific inference function for namedtuple or Python 3 enum. """
def infer_first(node):
if node is util.YES:
raise UseInferenceDefault
try:
value = next(node.infer(context=context))
if value is util.YES:
raise UseInferenceDefault()
else:
return value
except StopIteration:
raise InferenceError()
# node is a Call node, class name as first argument and generated class
# attributes as second argument
if len(node.args) != 2:
# something weird here, go back to class implementation
raise UseInferenceDefault()
# namedtuple or enums list of attributes can be a list of strings or a
# whitespace-separate string
try:
name = infer_first(node.args[0]).value
names = infer_first(node.args[1])
try:
attributes = names.value.replace(',', ' ').split()
except AttributeError:
if not enum:
attributes = [infer_first(const).value for const in names.elts]
else:
# Enums supports either iterator of (name, value) pairs
# or mappings.
# TODO: support only list, tuples and mappings.
if hasattr(names, 'items') and isinstance(names.items, list):
attributes = [infer_first(const[0]).value
for const in names.items
if isinstance(const[0], nodes.Const)]
elif hasattr(names, 'elts'):
# Enums can support either ["a", "b", "c"]
# or [("a", 1), ("b", 2), ...], but they can't
# be mixed.
if all(isinstance(const, nodes.Tuple)
for const in names.elts):
attributes = [infer_first(const.elts[0]).value
for const in names.elts
if isinstance(const, nodes.Tuple)]
else:
attributes = [infer_first(const).value
for const in names.elts]
else:
raise AttributeError
if not attributes:
raise AttributeError
except (AttributeError, exceptions.InferenceError):
raise UseInferenceDefault()
# If we can't iner the name of the class, don't crash, up to this point
# we know it is a namedtuple anyway.
name = name or 'Uninferable'
# we want to return a Class node instance with proper attributes set
class_node = nodes.ClassDef(name, 'docstring')
class_node.parent = node.parent
# set base class=tuple
class_node.bases.append(base_type)
# XXX add __init__(*attributes) method
for attr in attributes:
fake_node = nodes.EmptyNode()
fake_node.parent = class_node
fake_node.attrname = attr
class_node._instance_attrs[attr] = [fake_node]
return class_node, name, attributes
# module specific transformation functions #####################################
def hashlib_transform():
template = '''
class %(name)s(object):
def __init__(self, value=''): pass
def digest(self):
return %(digest)s
def copy(self):
return self
def update(self, value): pass
def hexdigest(self):
return ''
@property
def name(self):
return %(name)r
@property
def block_size(self):
return 1
@property
def digest_size(self):
return 1
'''
algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
classes = "".join(
template % {'name': hashfunc, 'digest': 'b""' if PY3K else '""'}
for hashfunc in algorithms)
return AstroidBuilder(MANAGER).string_build(classes)
def collections_transform():
return AstroidBuilder(MANAGER).string_build('''
class defaultdict(dict):
default_factory = None
def __missing__(self, key): pass
class deque(object):
maxlen = 0
def __init__(self, iterable=None, maxlen=None):
self.iterable = iterable
def append(self, x): pass
def appendleft(self, x): pass
def clear(self): pass
def count(self, x): return 0
def extend(self, iterable): pass
def extendleft(self, iterable): pass
def pop(self): pass
def popleft(self): pass
def remove(self, value): pass
def reverse(self): pass
def rotate(self, n): pass
def __iter__(self): return self
def __reversed__(self): return self.iterable[::-1]
def __getitem__(self, index): pass
def __setitem__(self, index, value): pass
def __delitem__(self, index): pass
''')
def pkg_resources_transform():
return AstroidBuilder(MANAGER).string_build('''
def require(*requirements):
return pkg_resources.working_set.require(*requirements)
def run_script(requires, script_name):
return pkg_resources.working_set.run_script(requires, script_name)
def iter_entry_points(group, name=None):
return pkg_resources.working_set.iter_entry_points(group, name)
def resource_exists(package_or_requirement, resource_name):
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(package_or_requirement, resource_name):
return get_provider(package_or_requirement).resource_isdir(
resource_name)
def resource_filename(package_or_requirement, resource_name):
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name)
def resource_stream(package_or_requirement, resource_name):
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name)
def resource_string(package_or_requirement, resource_name):
return get_provider(package_or_requirement).get_resource_string(
self, resource_name)
def resource_listdir(package_or_requirement, resource_name):
return get_provider(package_or_requirement).resource_listdir(
resource_name)
def extraction_error():
pass
def get_cache_path(archive_name, names=()):
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
return target_path
def postprocess(tempname, filename):
pass
def set_extraction_path(path):
pass
def cleanup_resources(force=False):
pass
''')
def subprocess_transform():
if PY3K:
communicate = (bytes('string', 'ascii'), bytes('string', 'ascii'))
communicate_signature = 'def communicate(self, input=None, timeout=None)'
init = """
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0, restore_signals=True,
start_new_session=False, pass_fds=()):
pass
"""
else:
communicate = ('string', 'string')
communicate_signature = 'def communicate(self, input=None)'
init = """
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
pass
"""
if PY33:
wait_signature = 'def wait(self, timeout=None)'
else:
wait_signature = 'def wait(self)'
if PY3K:
ctx_manager = '''
def __enter__(self): return self
def __exit__(self, *args): pass
'''
else:
ctx_manager = ''
code = dedent('''
class Popen(object):
returncode = pid = 0
stdin = stdout = stderr = file()
%(init)s
%(communicate_signature)s:
return %(communicate)r
%(wait_signature)s:
return self.returncode
def poll(self):
return self.returncode
def send_signal(self, signal):
pass
def terminate(self):
pass
def kill(self):
pass
%(ctx_manager)s
''' % {'init': init,
'communicate': communicate,
'communicate_signature': communicate_signature,
'wait_signature': wait_signature,
'ctx_manager': ctx_manager})
return AstroidBuilder(MANAGER).string_build(code)
# namedtuple support ###########################################################
def _looks_like(node, name):
func = node.func
if isinstance(func, nodes.Attribute):
return func.attrname == name
if isinstance(func, nodes.Name):
return func.name == name
return False
_looks_like_namedtuple = functools.partial(_looks_like, name='namedtuple')
_looks_like_enum = functools.partial(_looks_like, name='Enum')
def infer_named_tuple(node, context=None):
"""Specific inference function for namedtuple Call node"""
class_node, name, attributes = infer_func_form(node, nodes.Tuple._proxied,
context=context)
fake = AstroidBuilder(MANAGER).string_build('''
class %(name)s(tuple):
_fields = %(fields)r
def _asdict(self):
return self.__dict__
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
return new(cls, iterable)
def _replace(self, **kwds):
return self
''' % {'name': name, 'fields': attributes})
class_node._locals['_asdict'] = fake.body[0]._locals['_asdict']
class_node._locals['_make'] = fake.body[0]._locals['_make']
class_node._locals['_replace'] = fake.body[0]._locals['_replace']
class_node._locals['_fields'] = fake.body[0]._locals['_fields']
# we use UseInferenceDefault, we can't be a generator so return an iterator
return iter([class_node])
def infer_enum(node, context=None):
""" Specific inference function for enum Call node. """
enum_meta = test_utils.extract_node('''
class EnumMeta(object):
'docstring'
def __call__(self, node):
class EnumAttribute(object):
name = ''
value = 0
return EnumAttribute()
''')
class_node = infer_func_form(node, enum_meta,
context=context, enum=True)[0]
return iter([class_node.instantiate_class()])
def infer_enum_class(node):
""" Specific inference for enums. """
names = set(('Enum', 'IntEnum', 'enum.Enum', 'enum.IntEnum'))
for basename in node.basenames:
# TODO: doesn't handle subclasses yet. This implementation
# is a hack to support enums.
if basename not in names:
continue
if node.root().name == 'enum':
# Skip if the class is directly from enum module.
break
for local, values in node._locals.items():
if any(not isinstance(value, nodes.AssignName)
for value in values):
continue
stmt = values[0].statement()
if isinstance(stmt.targets[0], nodes.Tuple):
targets = stmt.targets[0].itered()
else:
targets = stmt.targets
new_targets = []
for target in targets:
# Replace all the assignments with our mocked class.
classdef = dedent('''
class %(name)s(%(types)s):
@property
def value(self):
# Not the best return.
return None
@property
def name(self):
return %(name)r
''' % {'name': target.name, 'types': ', '.join(node.basenames)})
fake = AstroidBuilder(MANAGER).string_build(classdef)[target.name]
fake.parent = target.parent
for method in node.mymethods():
fake._locals[method.name] = [method]
new_targets.append(fake.instantiate_class())
node._locals[local] = new_targets
break
return node
def multiprocessing_transform():
module = AstroidBuilder(MANAGER).string_build(dedent('''
from multiprocessing.managers import SyncManager
def Manager():
return SyncManager()
'''))
if not PY34:
return module
# On Python 3.4, multiprocessing uses a getattr lookup inside contexts,
# in order to get the attributes they need. Since it's extremely
# dynamic, we use this approach to fake it.
node = AstroidBuilder(MANAGER).string_build(dedent('''
from multiprocessing.context import DefaultContext, BaseContext
default = DefaultContext()
base = BaseContext()
'''))
try:
context = next(node['default'].infer())
base = next(node['base'].infer())
except InferenceError:
return module
for node in (context, base):
for key, value in node._locals.items():
if key.startswith("_"):
continue
value = value[0]
if isinstance(value, nodes.FunctionDef):
# We need to rebound this, since otherwise
# it will have an extra argument (self).
value = BoundMethod(value, node)
module[key] = value
return module
def multiprocessing_managers_transform():
return AstroidBuilder(MANAGER).string_build(dedent('''
import array
import threading
import multiprocessing.pool as pool
import six
class Namespace(object):
pass
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
class SyncManager(object):
Queue = JoinableQueue = six.moves.queue.Queue
Event = threading.Event
RLock = threading.RLock
BoundedSemaphore = threading.BoundedSemaphore
Condition = threading.Condition
Barrier = threading.Barrier
Pool = pool.Pool
list = list
dict = dict
Value = Value
Array = Array
Namespace = Namespace
__enter__ = lambda self: self
__exit__ = lambda *args: args
def start(self, initializer=None, initargs=None):
pass
def shutdown(self):
pass
'''))
MANAGER.register_transform(nodes.Call, inference_tip(infer_named_tuple),
_looks_like_namedtuple)
MANAGER.register_transform(nodes.Call, inference_tip(infer_enum),
_looks_like_enum)
MANAGER.register_transform(nodes.ClassDef, infer_enum_class)
register_module_extender(MANAGER, 'hashlib', hashlib_transform)
register_module_extender(MANAGER, 'collections', collections_transform)
register_module_extender(MANAGER, 'pkg_resources', pkg_resources_transform)
register_module_extender(MANAGER, 'subprocess', subprocess_transform)
register_module_extender(MANAGER, 'multiprocessing.managers',
multiprocessing_managers_transform)
register_module_extender(MANAGER, 'multiprocessing', multiprocessing_transform)
| 34.491561 | 82 | 0.614472 |
329e56d98585b04d5e78583f1b8d8d96fef74504
| 14,503 |
py
|
Python
|
resnet/ADV_ResNet.py
|
tropicalwzc/ice_sudoku.github.io
|
5dddc1b49c1090aec211b6c4e08a771be54deefe
|
[
"MIT"
] | 73 |
2019-11-27T08:10:32.000Z
|
2022-03-25T07:12:47.000Z
|
resnet/ADV_ResNet.py
|
tropicalwzc/ice_sudoku.github.io
|
5dddc1b49c1090aec211b6c4e08a771be54deefe
|
[
"MIT"
] | 7 |
2020-06-27T09:46:27.000Z
|
2022-03-31T02:54:35.000Z
|
resnet/ADV_ResNet.py
|
tropicalwzc/ice_sudoku.github.io
|
5dddc1b49c1090aec211b6c4e08a771be54deefe
|
[
"MIT"
] | 13 |
2020-08-24T04:47:57.000Z
|
2022-01-05T01:02:44.000Z
|
import torch
import torch.nn as nn
from torch.utils.model_zoo import load_url
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.dropout1 = nn.Dropout(0.5)
self.dropout2_hard = nn.Dropout2d(0.5)
self.dropout2_mid = nn.Dropout2d(0.48)
self.dropout2_light = nn.Dropout2d(0.46)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.dropout2_light(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.dropout2_light(x)
x = self.layer2(x)
x = self.dropout2_light(x)
x = self.layer3(x)
x = self.dropout2_mid(x)
x = self.layer4(x)
x = self.dropout2_hard(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.dropout1(x)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
| 40.853521 | 107 | 0.6338 |
96219f3cc021ad526eabcd2fc3b8224f87b3cdbc
| 220 |
py
|
Python
|
kdezero/datasets/__init__.py
|
kotabrog/K_DeZero
|
e8145a539874956bc235d4577fa38211c01c30ea
|
[
"MIT"
] | null | null | null |
kdezero/datasets/__init__.py
|
kotabrog/K_DeZero
|
e8145a539874956bc235d4577fa38211c01c30ea
|
[
"MIT"
] | null | null | null |
kdezero/datasets/__init__.py
|
kotabrog/K_DeZero
|
e8145a539874956bc235d4577fa38211c01c30ea
|
[
"MIT"
] | null | null | null |
from kdezero.datasets.core import Dataset
from kdezero.datasets.imagenet import ImageNet
from kdezero.datasets.mnist import MNIST
from kdezero.datasets.sincurve import SinCurve
from kdezero.datasets.spiral import Spiral
| 36.666667 | 46 | 0.863636 |
f61420fbca467ad030e0228f3d5b60fdabdc5837
| 5,788 |
py
|
Python
|
trivia_game.py
|
amm042/pytrivia-server
|
7ae295899d23d1140a2649d8aff4e1859813f34f
|
[
"MIT"
] | null | null | null |
trivia_game.py
|
amm042/pytrivia-server
|
7ae295899d23d1140a2649d8aff4e1859813f34f
|
[
"MIT"
] | null | null | null |
trivia_game.py
|
amm042/pytrivia-server
|
7ae295899d23d1140a2649d8aff4e1859813f34f
|
[
"MIT"
] | null | null | null |
import os
import random
import json
import datetime
import string
import logging
import os.path
import json
from Crypto.PublicKey import RSA
log = logging.getLogger(__name__)
def cleanstr(s, n=20):
"clean string to printable chars with max length n"
if s == None:
return "NONE"
try:
s = s.decode()
except AttributeError:
pass
try:
q = ''.join(x for x in s[:n] if x in string.printable[:64])
except TypeError:
q = "TypeError"
return q
class TriviaClient:
def __init__(self, client_socket, client_dir='./user_dat'):
self.skt = client_socket
self.connect_at = datetime.datetime.now()
self.current_q = None
self.question_sent = None
self.handle = self.authenticate
self.score = 0
self.counters = [0,0,0,0] # auth, correct, incorrect, invalid
self.username = None
self.client_dir = client_dir
self.authenticated = False
log.info("New client {}".format(self.skt.getpeername()))
def authenticate(self, msg):
"the default handler until authenticated"
msg = msg.decode()
self.username = cleanstr(msg.strip())[5:]
log.info("Auth from {}".format(self.username))
#send nonce
self.nonce = "{}".format(random.random())
self.skt.send(self.nonce.encode())
self.handle = self.authenticate_response
def authenticate_response(self, msg):
pubkey = os.path.join(
os.path.expanduser('~'+self.username),
"id_rsa.pub")
if os.path.exists(pubkey):
try:
with open(pubkey, 'r') as f:
rsa = RSA.importKey(f.read())
# log.info("got key {}".format(rsa))
# log.info("encrypt {}".format(rsa.can_encrypt()))
# log.info("has_private {}".format(rsa.has_private()))
try:
resp = rsa.encrypt(msg, 32)[0].decode()
except Exception as x:
log.error(x)
resp = None
# log.info("auth resp {}, wanted {}".format(resp, self.nonce))
if resp == self.nonce:
self.authenticated = True
self.skt.send(b"AUTHORIZED")
self.restore()
self.counters[0] += 1
# authenticated, set game handler
self.handle = self.play
else:
self.skt.send(b"NOTAUTHORIZED")
self.handle = self.authenticate
except PermissionError:
self.skt.send("NOPERMS {}".format(pubkey).encode())
self.handle = self.authenticate
else:
self.skt.send("NOKEY {}".format(pubkey).encode())
self.handle = self.authenticate
def save(self):
"write score history for this player"
filename = os.path.join(self.client_dir, self.username)
with open(filename, 'w') as f:
json.dump({
'score': self.score,
'counters': self.counters
}, f)
def restore(self):
filename = os.path.join(self.client_dir, self.username)
if os.path.exists(filename):
with open(filename, 'r') as f:
h = json.load(f)
self.score = h['score']
self.counters = h['counters']
def play(self, msg):
"when a msg is recieved on this game"
msg = msg.decode()
log.info("{} play: {}".format(self.skt.getpeername(), msg))
if self.current_q:
resp_time = datetime.datetime.now() - self.question_sent
if msg in self.current_q['choices']:
r = ""
if msg == self.current_q['answer']:
self.score += max(60, 100 - resp_time.total_seconds())
r = "CORRECT SCORE {}".format(int(self.score))
self.counters[1] += 1
else:
self.score -= (100 / len(self.current_q['choices']))
r = "INCORRECT SCORE {}".format(int(self.score))
self.counters[2] += 1
self.skt.send(r.encode())
self.save()
else:
self.counters[3] += 1
self.skt.send(b"INVALID")
# invalidate the question
self.current_q = None
else:
self.skt.send(b"NOQUESTION")
def send_question(self, question):
if self.authenticated:
self.current_q = question
log.info("{} -> SendQ: {}".format(
self.skt.getpeername(),
question['question']))
msg = [question['question']] + question['choices']
self.skt.send('\n'.join(msg).encode())
self.question_sent = datetime.datetime.now()
""" question format is a dict:
{
'question': 'A “face mask” is a common penalty in what sport?',
'choices': ['FOOTBALL', 'ILLEGAL', 'PLAYER', 'MASK', 'FACE', 'HELMET'],
'answer': 'FOOTBALL',
'created': '2019-04-24T21:21:57.664294'}
"""
class TriviaServer:
def __init__(self, trivia_dir='./trivia'):
self.questions = list(
map(lambda x: os.path.join(trivia_dir, x),
os.listdir(trivia_dir))
)
def get_question(self):
qf = random.choice(self.questions)
with open(qf, 'r') as f:
return json.load(f)
if __name__ == "__main__":
ts = TriviaServer()
print ("have {} questions".format(len(ts.questions)))
for i in range(10):
print ("\t{}".format(ts.get_question()))
| 31.977901 | 82 | 0.521596 |
da4096cf718cb81a7f147d72d0e5fdce7b3487fb
| 5,845 |
py
|
Python
|
grr/test_lib/fixture_test_lib.py
|
Onager/grr
|
646196bbfb332e4cb546b6d0fe1c09b57c675f7d
|
[
"Apache-2.0"
] | null | null | null |
grr/test_lib/fixture_test_lib.py
|
Onager/grr
|
646196bbfb332e4cb546b6d0fe1c09b57c675f7d
|
[
"Apache-2.0"
] | null | null | null |
grr/test_lib/fixture_test_lib.py
|
Onager/grr
|
646196bbfb332e4cb546b6d0fe1c09b57c675f7d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Client fixture-related test classes."""
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import paths as rdf_paths
from grr.lib.rdfvalues import protodict as rdf_protodict
from grr.lib.rdfvalues import structs as rdf_structs
from grr.server.grr_response_server import aff4
from grr.server.grr_response_server import artifact
from grr.server.grr_response_server import client_fixture
from grr.server.grr_response_server import client_index
from grr.server.grr_response_server import data_migration
from grr.server.grr_response_server import data_store
from grr.server.grr_response_server.aff4_objects import aff4_grr
from grr.test_lib import test_lib
# Make the fixture appear to be 1 week old.
FIXTURE_TIME = test_lib.FIXED_TIME
class LegacyClientFixture(object):
"""A tool to create a client fixture.
This will populate the AFF4 object tree in the data store with a mock client
filesystem, including various objects. This allows us to test various
end-to-end aspects (e.g. GUI).
"""
def __init__(self, client_id, token=None, fixture=None, age=None, **kwargs):
"""Constructor.
Args:
client_id: The unique id for the new client.
token: An instance of access_control.ACLToken security token.
fixture: An optional fixture to install. If not provided we use
client_fixture.VFS.
age: Create the fixture at this timestamp. If None we use FIXTURE_TIME.
**kwargs: Any other parameters which need to be interpolated by the
fixture.
"""
self.args = kwargs
self.token = token
self.age = age or FIXTURE_TIME.AsSecondsSinceEpoch()
self.client_id = rdf_client.ClientURN(client_id)
self.args["client_id"] = self.client_id.Basename()
self.args["age"] = self.age
self.CreateClientObject(fixture or client_fixture.VFS)
def CreateClientObject(self, vfs_fixture):
"""Make a new client object."""
# First remove the old fixture just in case its still there.
aff4.FACTORY.Delete(self.client_id, token=self.token)
# Create the fixture at a fixed time.
with test_lib.FakeTime(self.age):
for path, (aff4_type, attributes) in vfs_fixture:
path %= self.args
aff4_object = aff4.FACTORY.Create(
self.client_id.Add(path), aff4_type, mode="rw", token=self.token)
for attribute_name, value in attributes.items():
attribute = aff4.Attribute.PREDICATES[attribute_name]
if isinstance(value, (str, unicode)):
# Interpolate the value
value %= self.args
# Is this supposed to be an RDFValue array?
if aff4.issubclass(attribute.attribute_type,
rdf_protodict.RDFValueArray):
rdfvalue_object = attribute()
for item in value:
new_object = rdfvalue_object.rdf_type.FromTextFormat(
utils.SmartStr(item))
rdfvalue_object.Append(new_object)
# It is a text serialized protobuf.
elif aff4.issubclass(attribute.attribute_type,
rdf_structs.RDFProtoStruct):
# Use the alternate constructor - we always write protobufs in
# textual form:
rdfvalue_object = attribute.attribute_type.FromTextFormat(
utils.SmartStr(value))
elif aff4.issubclass(attribute.attribute_type, rdfvalue.RDFInteger):
rdfvalue_object = attribute(int(value))
else:
rdfvalue_object = attribute(value)
# If we don't already have a pathspec, try and get one from the stat.
if aff4_object.Get(aff4_object.Schema.PATHSPEC) is None:
# If the attribute was a stat, it has a pathspec nested in it.
# We should add that pathspec as an attribute.
if attribute.attribute_type == rdf_client.StatEntry:
stat_object = attribute.attribute_type.FromTextFormat(
utils.SmartStr(value))
if stat_object.pathspec:
pathspec_attribute = aff4.Attribute(
"aff4:pathspec", rdf_paths.PathSpec,
"The pathspec used to retrieve "
"this object from the client.", "pathspec")
aff4_object.AddAttribute(pathspec_attribute,
stat_object.pathspec)
if attribute in ["aff4:content", "aff4:content"]:
# For AFF4MemoryStreams we need to call Write() instead of
# directly setting the contents..
aff4_object.Write(rdfvalue_object)
else:
aff4_object.AddAttribute(attribute, rdfvalue_object)
# Populate the KB from the client attributes.
if aff4_type == aff4_grr.VFSGRRClient:
kb = rdf_client.KnowledgeBase()
artifact.SetCoreGRRKnowledgeBaseValues(kb, aff4_object)
aff4_object.Set(aff4_object.Schema.KNOWLEDGE_BASE, kb)
# Make sure we do not actually close the object here - we only want to
# sync back its attributes, not run any finalization code.
aff4_object.Flush()
if aff4_type == aff4_grr.VFSGRRClient:
index = client_index.CreateClientIndex(token=self.token)
index.AddClient(aff4_object)
def ClientFixture(client_id, token=None, age=None):
"""Creates a client fixture with a predefined VFS tree."""
if hasattr(client_id, "Basename"):
client_id = client_id.Basename()
LegacyClientFixture(client_id, age=age, token=token)
if not data_store.RelationalDBReadEnabled():
return
data_migration.Migrate(thread_count=1)
db_client_snapshot = data_store.REL_DB.ReadClientSnapshot(client_id)
client_index.ClientIndex().AddClient(db_client_snapshot)
| 40.310345 | 79 | 0.679555 |
4e927233ae938e935d8b769b60e381946466051a
| 310 |
py
|
Python
|
Exercícios/Ex.49.py
|
mattheuslima/Projetos-Curso_Python
|
ab4cab98fe69b70245b5bcf41edd0febe823ac6a
|
[
"MIT"
] | null | null | null |
Exercícios/Ex.49.py
|
mattheuslima/Projetos-Curso_Python
|
ab4cab98fe69b70245b5bcf41edd0febe823ac6a
|
[
"MIT"
] | null | null | null |
Exercícios/Ex.49.py
|
mattheuslima/Projetos-Curso_Python
|
ab4cab98fe69b70245b5bcf41edd0febe823ac6a
|
[
"MIT"
] | null | null | null |
#Refaça o desafio 9 mostrando a tabuada que o usuário escolher utilizando um laço for
print('-='*10)
print('{:=^20}'.format('Desafio 49'))
print('-='*10)
num=int(input('Qual número deseja fazer a tabuada? '))
for c in range (1,11):
multi=num*c
print('{} x {} = {}'.format(num,c,multi))
print('Fim')
| 23.846154 | 85 | 0.63871 |
1ce8193af3aec26d79b3c2ed8d72fb1bd574795e
| 501 |
py
|
Python
|
0674_longest_continuous_increasing_subsequence.py
|
subwaymatch/leetcode
|
2592ba2e55682fd54d0060c5b1ff1b8469ba7916
|
[
"MIT"
] | null | null | null |
0674_longest_continuous_increasing_subsequence.py
|
subwaymatch/leetcode
|
2592ba2e55682fd54d0060c5b1ff1b8469ba7916
|
[
"MIT"
] | null | null | null |
0674_longest_continuous_increasing_subsequence.py
|
subwaymatch/leetcode
|
2592ba2e55682fd54d0060c5b1ff1b8469ba7916
|
[
"MIT"
] | null | null | null |
class Solution:
def findLengthOfLCIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) == 0:
return 0
LCIS = 1
current_max = 1
for index, num in enumerate(nums[1:]):
if num > nums[index]:
current_max += 1
LCIS = max(current_max, LCIS)
else:
current_max = 1
return LCIS
| 22.772727 | 46 | 0.39521 |
7bb45017fcb5e8bbcc78a244559d6370f2680dcf
| 880 |
py
|
Python
|
redisun/models/setmodel.py
|
limen/pyredisun
|
11b9590968a18abbc15a4195737077360c638241
|
[
"MIT"
] | null | null | null |
redisun/models/setmodel.py
|
limen/pyredisun
|
11b9590968a18abbc15a4195737077360c638241
|
[
"MIT"
] | null | null | null |
redisun/models/setmodel.py
|
limen/pyredisun
|
11b9590968a18abbc15a4195737077360c638241
|
[
"MIT"
] | null | null | null |
from redisun.models.vectormodel import VectorModel
from redisun.querybuilder import QueryBuilder
class SetModel(VectorModel):
def _init_query_builder(self):
self._query_builder = QueryBuilder(('class', 'id', 'members'), ('id',), ':')
def create_xx(self, value, ttl=0):
pass
def create_nx(self, value, ttl=0):
pass
def update(self, value, ttl=0):
return self.create_xx(value, ttl)
def put(self, elements):
pass
def pull(self, elements):
pass
def first(self, with_ttl=False):
pass
def last(self, with_ttl=False):
pass
def all(self, with_ttl=False):
pass
def randone(self, with_ttl=False):
pass
def getset_one(self, members, ttl=0):
pass
def getset_all(self, members, ttl=0):
pass
| 20.952381 | 84 | 0.582955 |
1841e80f6cf04f3c4bc3402fe09228d6e1a812c7
| 673 |
py
|
Python
|
manage.py
|
stephken/Hierarchical_assessment
|
537219903357d97d1354a8f262badba9729fb5e0
|
[
"MIT"
] | null | null | null |
manage.py
|
stephken/Hierarchical_assessment
|
537219903357d97d1354a8f262badba9729fb5e0
|
[
"MIT"
] | null | null | null |
manage.py
|
stephken/Hierarchical_assessment
|
537219903357d97d1354a8f262badba9729fb5e0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hierarchical_data.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.26087 | 81 | 0.683507 |
7dee89e99216208cb78b402042807b0ea516db08
| 25,974 |
py
|
Python
|
scripts/automation/trex_control_plane/server/trex_server.py
|
git-root/trex-core
|
82280f7c87fabed60d83643bd9ec2c79cac34668
|
[
"Apache-2.0"
] | null | null | null |
scripts/automation/trex_control_plane/server/trex_server.py
|
git-root/trex-core
|
82280f7c87fabed60d83643bd9ec2c79cac34668
|
[
"Apache-2.0"
] | null | null | null |
scripts/automation/trex_control_plane/server/trex_server.py
|
git-root/trex-core
|
82280f7c87fabed60d83643bd9ec2c79cac34668
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import os
import stat
import sys
import time
import outer_packages
import zmq
from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
import jsonrpclib
from jsonrpclib import Fault
import binascii
import socket
import errno
import signal
import binascii
from common.trex_status_e import TRexStatus
from common.trex_exceptions import *
import subprocess
from random import randrange
import logging
import threading
import CCustomLogger
from trex_launch_thread import AsynchronousTRexSession
from zmq_monitor_thread import ZmqMonitorSession
from argparse import ArgumentParser, RawTextHelpFormatter
from json import JSONEncoder
# setup the logger
CCustomLogger.setup_custom_logger('TRexServer')
logger = logging.getLogger('TRexServer')
class CTRexServer(object):
"""This class defines the server side of the RESTfull interaction with TRex"""
DEFAULT_TREX_PATH = '/auto/proj-pcube-b/apps/PL-b/tools/bp_sim2/v1.55/' #'/auto/proj-pcube-b/apps/PL-b/tools/nightly/trex_latest'
TREX_START_CMD = './t-rex-64'
DEFAULT_FILE_PATH = '/tmp/trex_files/'
def __init__(self, trex_path, trex_files_path, trex_host='0.0.0.0', trex_daemon_port=8090, trex_zmq_port=4500):
"""
Parameters
----------
trex_host : str
a string of the t-rex ip address or hostname.
default value: machine hostname as fetched from socket.gethostname()
trex_daemon_port : int
the port number on which the trex-daemon server can be reached
default value: 8090
trex_zmq_port : int
the port number on which trex's zmq module will interact with daemon server
default value: 4500
Instantiate a TRex client object, and connecting it to listening daemon-server
"""
self.TREX_PATH = os.path.abspath(os.path.dirname(trex_path+'/'))
self.trex_files_path = os.path.abspath(os.path.dirname(trex_files_path+'/'))
self.__check_trex_path_validity()
self.__check_files_path_validity()
self.trex = CTRex()
self.trex_version = None
self.trex_host = trex_host
self.trex_daemon_port = trex_daemon_port
self.trex_zmq_port = trex_zmq_port
self.trex_server_path = "http://{hostname}:{port}".format( hostname = trex_host, port = trex_daemon_port )
self.start_lock = threading.Lock()
self.__reservation = None
self.zmq_monitor = ZmqMonitorSession(self.trex, self.trex_zmq_port) # intiate single ZMQ monitor thread for server usage
def add(self, x, y):
print "server function add ",x,y
logger.info("Processing add function. Parameters are: {0}, {1} ".format( x, y ))
return x + y
# return Fault(-10, "")
def push_file (self, filename, bin_data):
logger.info("Processing push_file() command.")
try:
filepath = os.path.abspath(os.path.join(self.trex_files_path, filename))
with open(filepath, 'wb') as f:
f.write(binascii.a2b_base64(bin_data))
logger.info("push_file() command finished. `{name}` was saved at {fpath}".format( name = filename, fpath = self.trex_files_path))
return True
except IOError as inst:
logger.error("push_file method failed. " + str(inst))
return False
def connectivity_check (self):
logger.info("Processing connectivity_check function.")
return True
def start(self):
"""This method fires up the daemon server based on initialized parameters of the class"""
# initialize the server instance with given resources
try:
print "Firing up TRex REST daemon @ port {trex_port} ...\n".format( trex_port = self.trex_daemon_port )
logger.info("Firing up TRex REST daemon @ port {trex_port} ...".format( trex_port = self.trex_daemon_port ))
logger.info("current working dir is: {0}".format(self.TREX_PATH) )
logger.info("current files dir is : {0}".format(self.trex_files_path) )
logger.debug("Starting TRex server. Registering methods to process.")
logger.info(self.get_trex_version(base64 = False))
self.server = SimpleJSONRPCServer( (self.trex_host, self.trex_daemon_port) )
except socket.error as e:
if e.errno == errno.EADDRINUSE:
logger.error("TRex server requested address already in use. Aborting server launching.")
print "TRex server requested address already in use. Aborting server launching."
raise socket.error(errno.EADDRINUSE, "TRex daemon requested address already in use. "
"Server launch aborted. Please make sure no other process is "
"using the desired server properties.")
elif isinstance(e, socket.gaierror) and e.errno == -3:
# handling Temporary failure in name resolution exception
raise socket.gaierror(-3, "Temporary failure in name resolution.\n"
"Make sure provided hostname has DNS resolving.")
else:
raise
# set further functionality and peripherals to server instance
try:
self.server.register_function(self.add)
self.server.register_function(self.get_trex_log)
self.server.register_function(self.get_trex_daemon_log)
self.server.register_function(self.get_trex_version)
self.server.register_function(self.connectivity_check)
self.server.register_function(self.start_trex)
self.server.register_function(self.stop_trex)
self.server.register_function(self.wait_until_kickoff_finish)
self.server.register_function(self.get_running_status)
self.server.register_function(self.is_running)
self.server.register_function(self.get_running_info)
self.server.register_function(self.is_reserved)
self.server.register_function(self.get_files_path)
self.server.register_function(self.push_file)
self.server.register_function(self.reserve_trex)
self.server.register_function(self.cancel_reservation)
self.server.register_function(self.force_trex_kill)
signal.signal(signal.SIGTSTP, self.stop_handler)
signal.signal(signal.SIGTERM, self.stop_handler)
self.zmq_monitor.start()
self.server.serve_forever()
except KeyboardInterrupt:
logger.info("Daemon shutdown request detected." )
finally:
self.zmq_monitor.join() # close ZMQ monitor thread resources
self.server.shutdown()
pass
# get files from Trex server and return their content (mainly for logs)
@staticmethod
def _pull_file(filepath):
try:
with open(filepath, 'rb') as f:
file_content = f.read()
return binascii.b2a_base64(file_content)
except Exception as e:
err_str = "Can't get requested file: {0}, possibly due to TRex that did not run".format(filepath)
logger.error('{0}, error: {1}'.format(err_str, e))
return Fault(-33, err_str)
# get Trex log /tmp/trex.txt
def get_trex_log(self):
logger.info("Processing get_trex_log() command.")
return self._pull_file('/tmp/trex.txt')
# get daemon log /var/log/trex/trex_daemon_server.log
def get_trex_daemon_log (self):
logger.info("Processing get_trex_daemon_log() command.")
return self._pull_file('/var/log/trex/trex_daemon_server.log')
# get Trex version from ./t-rex-64 --help (last 4 lines)
def get_trex_version (self, base64 = True):
try:
logger.info("Processing get_trex_version() command.")
if not self.trex_version:
help_print = subprocess.Popen(['./t-rex-64', '--help'], cwd = self.TREX_PATH, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
help_print.wait()
help_print_stdout = help_print.stdout.read()
self.trex_version = binascii.b2a_base64('\n'.join(help_print_stdout.split('\n')[-5:-1]))
if base64:
return self.trex_version
else:
return binascii.a2b_base64(self.trex_version)
except Exception as e:
err_str = "Can't get trex version, error: {0}".format(e)
logger.error(err_str)
return Fault(-33, err_str)
def stop_handler (self, signum, frame):
logger.info("Daemon STOP request detected.")
if self.is_running():
# in case TRex process is currently running, stop it before terminating server process
self.stop_trex(self.trex.get_seq())
sys.exit(0)
def is_running (self):
run_status = self.trex.get_status()
logger.info("Processing is_running() command. Running status is: {stat}".format(stat = run_status) )
if run_status==TRexStatus.Running:
return True
else:
return False
def is_reserved (self):
logger.info("Processing is_reserved() command.")
return bool(self.__reservation)
def get_running_status (self):
run_status = self.trex.get_status()
logger.info("Processing get_running_status() command. Running status is: {stat}".format(stat = run_status) )
return { 'state' : run_status.value, 'verbose' : self.trex.get_verbose_status() }
def get_files_path (self):
logger.info("Processing get_files_path() command." )
return self.trex_files_path
def reserve_trex (self, user):
if user == "":
logger.info("TRex reservation cannot apply to empty string user. Request denied.")
return Fault(-33, "TRex reservation cannot apply to empty string user. Request denied.")
with self.start_lock:
logger.info("Processing reserve_trex() command.")
if self.is_reserved():
if user == self.__reservation['user']:
# return True is the same user is asking and already has the resrvation
logger.info("the same user is asking and already has the resrvation. Re-reserving TRex.")
return True
logger.info("TRex is already reserved to another user ({res_user}), cannot reserve to another user.".format( res_user = self.__reservation['user'] ))
return Fault(-33, "TRex is already reserved to another user ({res_user}). Please make sure TRex is free before reserving it.".format(
res_user = self.__reservation['user']) ) # raise at client TRexInUseError
elif self.trex.get_status() != TRexStatus.Idle:
logger.info("TRex is currently running, cannot reserve TRex unless in Idle state.")
return Fault(-13, 'TRex is currently running, cannot reserve TRex unless in Idle state. Please try again when TRex run finished.') # raise at client TRexInUseError
else:
logger.info("TRex is now reserved for user ({res_user}).".format( res_user = user ))
self.__reservation = {'user' : user, 'since' : time.ctime()}
logger.debug("Reservation details: "+ str(self.__reservation))
return True
def cancel_reservation (self, user):
with self.start_lock:
logger.info("Processing cancel_reservation() command.")
if self.is_reserved():
if self.__reservation['user'] == user:
logger.info("TRex reservation to {res_user} has been canceled successfully.".format(res_user = self.__reservation['user']))
self.__reservation = None
return True
else:
logger.warning("TRex is reserved to different user than the provided one. Reservation wasn't canceled.")
return Fault(-33, "Cancel reservation request is available to the user that holds the reservation. Request denied") # raise at client TRexRequestDenied
else:
logger.info("TRex is not reserved to anyone. No need to cancel anything")
assert(self.__reservation is None)
return False
def start_trex(self, trex_cmd_options, user, block_to_success = True, timeout = 30):
with self.start_lock:
logger.info("Processing start_trex() command.")
if self.is_reserved():
# check if this is not the user to which TRex is reserved
if self.__reservation['user'] != user:
logger.info("TRex is reserved to another user ({res_user}). Only that user is allowed to initiate new runs.".format(res_user = self.__reservation['user']))
return Fault(-33, "TRex is reserved to another user ({res_user}). Only that user is allowed to initiate new runs.".format(res_user = self.__reservation['user'])) # raise at client TRexRequestDenied
elif self.trex.get_status() != TRexStatus.Idle:
logger.info("TRex is already taken, cannot create another run until done.")
return Fault(-13, '') # raise at client TRexInUseError
try:
server_cmd_data = self.generate_run_cmd(**trex_cmd_options)
self.zmq_monitor.first_dump = True
self.trex.start_trex(self.TREX_PATH, server_cmd_data)
logger.info("TRex session has been successfully initiated.")
if block_to_success:
# delay server response until TRex is at 'Running' state.
start_time = time.time()
trex_state = None
while (time.time() - start_time) < timeout :
trex_state = self.trex.get_status()
if trex_state != TRexStatus.Starting:
break
else:
time.sleep(0.5)
# check for TRex run started normally
if trex_state == TRexStatus.Starting: # reached timeout
logger.warning("TimeoutError: TRex initiation outcome could not be obtained, since TRex stays at Starting state beyond defined timeout.")
return Fault(-12, 'TimeoutError: TRex initiation outcome could not be obtained, since TRex stays at Starting state beyond defined timeout.') # raise at client TRexWarning
elif trex_state == TRexStatus.Idle:
return Fault(-11, self.trex.get_verbose_status()) # raise at client TRexError
# reach here only if TRex is at 'Running' state
self.trex.gen_seq()
return self.trex.get_seq() # return unique seq number to client
except TypeError as e:
logger.error("TRex command generation failed, probably because either -f (traffic generation .yaml file) and -c (num of cores) was not specified correctly.\nReceived params: {params}".format( params = trex_cmd_options) )
raise TypeError('TRex -f (traffic generation .yaml file) and -c (num of cores) must be specified.')
def stop_trex(self, seq):
logger.info("Processing stop_trex() command.")
if self.trex.get_seq()== seq:
logger.debug("Abort request legit since seq# match")
return self.trex.stop_trex()
else:
if self.trex.get_status() != TRexStatus.Idle:
logger.warning("Abort request is only allowed to process initiated the run. Request denied.")
return Fault(-33, 'Abort request is only allowed to process initiated the run. Request denied.') # raise at client TRexRequestDenied
else:
return False
def force_trex_kill (self):
logger.info("Processing force_trex_kill() command. --> Killing TRex session indiscriminately.")
return self.trex.stop_trex()
def wait_until_kickoff_finish (self, timeout = 40):
# block until TRex exits Starting state
logger.info("Processing wait_until_kickoff_finish() command.")
trex_state = None
start_time = time.time()
while (time.time() - start_time) < timeout :
trex_state = self.trex.get_status()
if trex_state != TRexStatus.Starting:
return
return Fault(-12, 'TimeoutError: TRex initiation outcome could not be obtained, since TRex stays at Starting state beyond defined timeout.') # raise at client TRexWarning
def get_running_info (self):
logger.info("Processing get_running_info() command.")
return self.trex.get_running_info()
def generate_run_cmd (self, f, d, iom = 0, export_path="/tmp/trex.txt", **kwargs):
""" generate_run_cmd(self, trex_cmd_options, export_path) -> str
Generates a custom running command for the kick-off of the TRex traffic generator.
Returns a tuple of command (string) and export path (string) to be issued on the trex server
Parameters
----------
trex_cmd_options : str
Defines the exact command to run on the t-rex
Example: "-c 2 -m 0.500000 -d 100 -f cap2/sfr.yaml --nc -p -l 1000"
export_path : str
a full system path to which the results of the trex-run will be logged.
"""
if 'results_file_path' in kwargs:
export_path = kwargs['results_file_path']
del kwargs['results_file_path']
# adding additional options to the command
trex_cmd_options = ''
for key, value in kwargs.iteritems():
tmp_key = key.replace('_','-')
dash = ' -' if (len(key)==1) else ' --'
if (value == True) and (str(value) != '1'): # checking also int(value) to excape from situation that 1 translates by python to 'True'
trex_cmd_options += (dash + tmp_key)
else:
trex_cmd_options += (dash + '{k} {val}'.format( k = tmp_key, val = value ))
cmd = "{run_command} -f {gen_file} -d {duration} --iom {io} {cmd_options} --no-key > {export}".format( # -- iom 0 disables the periodic log to the screen (not needed)
run_command = self.TREX_START_CMD,
gen_file = f,
duration = d,
cmd_options = trex_cmd_options,
io = iom,
export = export_path )
logger.info("TREX FULL COMMAND: {command}".format(command = cmd) )
return (cmd, export_path, long(d))
def __check_trex_path_validity(self):
# check for executable existance
if not os.path.exists(self.TREX_PATH+'/t-rex-64'):
print "The provided TRex path do not contain an executable TRex file.\nPlease check the path and retry."
logger.error("The provided TRex path do not contain an executable TRex file")
exit(-1)
# check for executable permissions
st = os.stat(self.TREX_PATH+'/t-rex-64')
if not bool(st.st_mode & (stat.S_IXUSR ) ):
print "The provided TRex path do not contain an TRex file with execution privileges.\nPlease check the files permissions and retry."
logger.error("The provided TRex path do not contain an TRex file with execution privileges")
exit(-1)
else:
return
def __check_files_path_validity(self):
# first, check for path existance. otherwise, try creating it with appropriate credentials
if not os.path.exists(self.trex_files_path):
try:
os.makedirs(self.trex_files_path, 0660)
return
except os.error as inst:
print "The provided files path does not exist and cannot be created with needed access credentials using root user.\nPlease check the path's permissions and retry."
logger.error("The provided files path does not exist and cannot be created with needed access credentials using root user.")
exit(-1)
elif os.access(self.trex_files_path, os.W_OK):
return
else:
print "The provided files path has insufficient access credentials for root user.\nPlease check the path's permissions and retry."
logger.error("The provided files path has insufficient access credentials for root user")
exit(-1)
class CTRex(object):
def __init__(self):
self.status = TRexStatus.Idle
self.verbose_status = 'TRex is Idle'
self.errcode = None
self.session = None
self.zmq_monitor = None
self.zmq_dump = None
self.seq = None
self.expect_trex = threading.Event()
self.encoder = JSONEncoder()
def get_status(self):
return self.status
def set_status(self, new_status):
self.status = new_status
def get_verbose_status(self):
return self.verbose_status
def set_verbose_status(self, new_status):
self.verbose_status = new_status
def gen_seq (self):
self.seq = randrange(1,1000)
def get_seq (self):
return self.seq
def get_running_info (self):
if self.status == TRexStatus.Running:
return self.encoder.encode(self.zmq_dump)
else:
logger.info("TRex isn't running. Running information isn't available.")
if self.status == TRexStatus.Idle:
if self.errcode is not None: # some error occured
logger.info("TRex is in Idle state, with errors. returning fault")
return Fault(self.errcode, self.verbose_status) # raise at client relevant exception, depending on the reason the error occured
else:
logger.info("TRex is in Idle state, no errors. returning {}")
return u'{}'
return Fault(-12, self.verbose_status) # raise at client TRexWarning, indicating TRex is back to Idle state or still in Starting state
def stop_trex(self):
if self.status == TRexStatus.Idle:
# t-rex isn't running, nothing to abort
logger.info("TRex isn't running. No need to stop anything.")
if self.errcode is not None: # some error occurred, notify client despite TRex already stopped
return Fault(self.errcode, self.verbose_status) # raise at client relevant exception, depending on the reason the error occured
return False
else:
# handle stopping t-rex's run
self.session.join()
logger.info("TRex session has been successfully aborted.")
return True
def start_trex(self, trex_launch_path, trex_cmd):
self.set_status(TRexStatus.Starting)
logger.info("TRex running state changed to 'Starting'.")
self.set_verbose_status('TRex is starting (data is not available yet)')
self.errcode = None
self.session = AsynchronousTRexSession(self, trex_launch_path, trex_cmd)
self.session.start()
self.expect_trex.set()
# self.zmq_monitor= ZmqMonitorSession(self, zmq_port)
# self.zmq_monitor.start()
def generate_trex_parser ():
default_path = os.path.abspath(os.path.join(outer_packages.CURRENT_PATH, os.pardir, os.pardir, os.pardir))
default_files_path = os.path.abspath(CTRexServer.DEFAULT_FILE_PATH)
parser = ArgumentParser(description = 'Run server application for TRex traffic generator',
formatter_class = RawTextHelpFormatter,
usage = """
trex_daemon_server [options]
""" )
parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0')
parser.add_argument("-p", "--daemon-port", type=int, default = 8090, metavar="PORT", dest="daemon_port",
help="Select port on which the daemon runs.\nDefault port is 8090.", action="store")
parser.add_argument("-z", "--zmq-port", dest="zmq_port", type=int,
action="store", help="Select port on which the ZMQ module listens to TRex.\nDefault port is 4500.", metavar="PORT",
default = 4500)
parser.add_argument("-t", "--trex-path", dest="trex_path",
action="store", help="Specify the compiled TRex directory from which TRex would run.\nDefault path is: {def_path}.".format( def_path = default_path ),
metavar="PATH", default = default_path )
parser.add_argument("-f", "--files-path", dest="files_path",
action="store", help="Specify a path to directory on which pushed files will be saved at.\nDefault path is: {def_path}.".format( def_path = default_files_path ),
metavar="PATH", default = default_files_path )
parser.add_argument("--trex-host", dest="trex_host",
action="store", help="Specify a hostname to be registered as the TRex server.\n"
"Default is to bind all IPs using '0.0.0.0'.",
metavar="HOST", default = '0.0.0.0')
return parser
trex_parser = generate_trex_parser()
def do_main_program ():
args = trex_parser.parse_args()
server = CTRexServer(trex_path = args.trex_path, trex_files_path = args.files_path,
trex_host = args.trex_host, trex_daemon_port = args.daemon_port,
trex_zmq_port = args.zmq_port)
server.start()
if __name__ == "__main__":
do_main_program()
| 49.66348 | 236 | 0.625356 |
3be979445dd55b7ed8473ed0bdf2eb3c9fd14076
| 1,644 |
py
|
Python
|
src/codemetrics_report/cli.py
|
lpereira95/codemetrics-report
|
8061f7da060c618a8e1347bd4325cd664163230d
|
[
"MIT"
] | null | null | null |
src/codemetrics_report/cli.py
|
lpereira95/codemetrics-report
|
8061f7da060c618a8e1347bd4325cd664163230d
|
[
"MIT"
] | null | null | null |
src/codemetrics_report/cli.py
|
lpereira95/codemetrics-report
|
8061f7da060c618a8e1347bd4325cd664163230d
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import click
import codemetrics as cm
from codemetrics_report.report import gather_report_info
from codemetrics_report.report import create_html_report
from codemetrics_report.report import altair2json
from codemetrics_report.vis import create_loc_chart
from codemetrics_report.vis import create_age_chart
from codemetrics_report.vis import create_age_loc_chart
from codemetrics_report.vis import create_hotspots_chart
from codemetrics_report.vis import create_coupling_chart
@click.command()
@click.argument('repo_path', nargs=1, type=str)
@click.option('--weeks', '-w', type=int, default=52)
def generate_codemetrics_report(repo_path, weeks):
repo_path = convert_dirname_to_path(repo_path)
project_name = repo_path.name
# repo
repo = cm.GitProject(repo_path)
# get info
log, loc, ages, hotspots = gather_report_info(repo)
# create charts
charts_json = {
'loc': altair2json(create_loc_chart(loc)),
'age': altair2json(create_age_chart(ages, weeks=weeks)),
'loc_age': create_age_loc_chart(ages),
'hotspots': create_hotspots_chart(hotspots),
'coupling': create_coupling_chart(loc, log)
}
filename = f'codemetrics_{project_name}.html'
create_html_report(project_name, charts_json, filename=filename)
print(f'\nCreated {filename}')
def convert_dirname_to_path(dir_name):
"""
Notes:
Handles definition of home with `~`.
"""
dir_name_ls = dir_name.split('/')
if dir_name_ls[0] == '~':
path = Path.home() / '/'.join(dir_name_ls[1:])
else:
path = Path(dir_name)
return path
| 28.842105 | 68 | 0.72871 |
e1b6ebd37b97bc9b109f511037c684ea5fa2de9b
| 225 |
py
|
Python
|
events/defaults.py
|
bozbalci/cython-experiments
|
a675571e09297e3cda9154e8b611562bb8b14f7e
|
[
"Unlicense"
] | 1 |
2018-06-23T17:52:20.000Z
|
2018-06-23T17:52:20.000Z
|
events/defaults.py
|
bozbalci/cython-experiments
|
a675571e09297e3cda9154e8b611562bb8b14f7e
|
[
"Unlicense"
] | null | null | null |
events/defaults.py
|
bozbalci/cython-experiments
|
a675571e09297e3cda9154e8b611562bb8b14f7e
|
[
"Unlicense"
] | null | null | null |
# defaults.py: contains the built-in variables, events and methods
# used for scripting the C program
import event
events = {}
_event_names = ["on_start", "on_exit"]
for evt in _event_names:
events[evt] = event.Event()
| 22.5 | 66 | 0.724444 |
8ff48bed0ffe3bbd9640f35051ef3e5e06f97cb6
| 1,682 |
py
|
Python
|
model/train-model.py
|
axiom-data-science/coral-spawning-detector
|
2672a978a3e9bdb38565eec9fdf70a63c5b4e15a
|
[
"MIT"
] | 1 |
2021-06-14T18:55:41.000Z
|
2021-06-14T18:55:41.000Z
|
model/train-model.py
|
axiom-data-science/coral-spawning-detector
|
2672a978a3e9bdb38565eec9fdf70a63c5b4e15a
|
[
"MIT"
] | null | null | null |
model/train-model.py
|
axiom-data-science/coral-spawning-detector
|
2672a978a3e9bdb38565eec9fdf70a63c5b4e15a
|
[
"MIT"
] | null | null | null |
#!python
"""Build coral spawning detection model.
Script to recreate, or train, another coral detection model that is derived
from the notebook used to create the original model used in the FWC Coral Spawning
project.
"""
import fastai.vision.all as fai_vision
def prepare_data_loader(image_dir):
"""Given path to image directory, return DataLoader."""
images = fai_vision.get_image_files(image_dir)
# assume dir struct is: <path>/<coral>/{'positive', 'negative'}/<images>
labels = [str(img.parent).split('/')[-1] for img in images]
data_loader = fai_vision.ImageDataLoaders.from_lists(
'frames', # dunno why I need this here
images,
labels
)
return data_loader
def train_model(
data_loader,
model=fai_vision.resnet34,
metrics=fai_vision.error_rate
):
"""Given DataLoader, return trained model."""
model = fai_vision.cnn_learner(
data_loader,
model,
metrics=metrics
)
model.fine_tune(1)
return model
def create_model(
image_dir,
model_save_path,
):
"""Load training data, train model, and save to disk"""
data_loader = prepare_data_loader(image_dir)
model = train_model(data_loader)
model.export(fname=model_save_path)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'image_dir',
type=str,
help='Path to training image directory'
)
parser.add_argument(
'model_save_path',
type=str,
help='Path to save serialized trained model'
)
args = parser.parse_args()
create_model(args.image_dir, args.model_save_path)
| 25.104478 | 82 | 0.677765 |
16c45aba544e037254d08d858c5dd2c2d0df9fe7
| 9,590 |
py
|
Python
|
tensorflow/python/ops/sparse_grad.py
|
sachinpro/sachinpro.github.io
|
c3bbd8d89818f5d8bb7296c851ed5e52c19728e3
|
[
"Apache-2.0"
] | 1 |
2018-01-24T12:57:47.000Z
|
2018-01-24T12:57:47.000Z
|
tensorflow/python/ops/sparse_grad.py
|
sachinpro/sachinpro.github.io
|
c3bbd8d89818f5d8bb7296c851ed5e52c19728e3
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/ops/sparse_grad.py
|
sachinpro/sachinpro.github.io
|
c3bbd8d89818f5d8bb7296c851ed5e52c19728e3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in sparse_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
ops.NoGradient("SparseAddGrad")
ops.NoGradient("SparseConcat")
ops.NoGradient("SparseToDense")
@ops.RegisterGradient("SparseReorder")
def _SparseReorderGrad(op, unused_output_indices_grad, output_values_grad):
"""Gradients for the SparseReorder op.
Args:
op: the SparseReorder op
unused_output_indices_grad: the incoming gradients of the output indices
output_values_grad: the incoming gradients of the output values
Returns:
Gradient for each of the 3 input tensors:
(input_indices, input_values, input_shape)
The gradients for input_indices and input_shape is None.
"""
input_indices = op.inputs[0]
input_shape = op.inputs[2]
num_entries = array_ops.shape(input_indices)[0]
entry_indices = math_ops.range(num_entries)
sp_unordered = ops.SparseTensor(input_indices, entry_indices, input_shape)
sp_ordered = sparse_ops.sparse_reorder(sp_unordered)
inverted_permutation = array_ops.invert_permutation(sp_ordered.values)
return (None,
array_ops.gather(output_values_grad, inverted_permutation),
None)
@ops.RegisterGradient("SparseAdd")
def _SparseAddGrad(op, *grads):
"""The backward operator for the SparseAdd op.
The SparseAdd op calculates A + B, where A, B, and the sum are all represented
as `SparseTensor` objects. This op takes in the upstream gradient w.r.t.
non-empty values of the sum, and outputs the gradients w.r.t. the non-empty
values of A and B.
Args:
op: the SparseAdd op
*grads: the incoming gradients, one element per output of `op`
Returns:
Gradient for each of the 6 input tensors of SparseAdd:
(a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh)
The gradients for the indices, shapes, and the threshold are None.
"""
val_grad = grads[1]
a_indices = op.inputs[0]
b_indices = op.inputs[3]
sum_indices = op.outputs[0]
# NOTE: we do not need to take `thresh` into account, since it simply affects
# the non-zero elements of the sum, and we will peek into `sum_indices` in the
# gradient op.
# pylint: disable=protected-access
a_val_grad, b_val_grad = gen_sparse_ops._sparse_add_grad(val_grad, a_indices,
b_indices,
sum_indices)
a_val_grad.set_shape(op.inputs[1].get_shape())
b_val_grad.set_shape(op.inputs[4].get_shape())
# (a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh)
return (None, a_val_grad, None, None, b_val_grad, None, None)
@ops.RegisterGradient("SparseTensorDenseAdd")
def _SparseTensorDenseAddGrad(op, out_grad):
sp_indices = op.inputs[0]
# (sparse_indices, sparse_values, sparse_shape, dense)
return (None, array_ops.gather_nd(out_grad, sp_indices), None, out_grad)
@ops.RegisterGradient("SparseReduceSum")
def _SparseReduceSumGrad(op, out_grad):
"""Similar to gradient for the Sum Op (i.e. tf.reduce_sum())."""
sp_indices = op.inputs[0]
sp_shape = op.inputs[2]
output_shape_kept_dims = math_ops.reduced_shape(sp_shape, op.inputs[3])
out_grad_reshaped = array_ops.reshape(out_grad, output_shape_kept_dims)
scale = sp_shape // math_ops.to_int64(output_shape_kept_dims)
# (sparse_indices, sparse_values, sparse_shape, reduction_axes)
return (None, array_ops.gather_nd(out_grad_reshaped, sp_indices // scale),
None, None)
@ops.RegisterGradient("SparseTensorDenseMatMul")
def _SparseTensorDenseMatMulGrad(op, grad):
"""Gradients for the dense tensor in the SparseTensorDenseMatMul op.
If either input is complex, no gradient is provided.
Args:
op: the SparseTensorDenseMatMul op
grad: the incoming gradient
Returns:
Gradient for each of the 4 input tensors:
(sparse_indices, sparse_values, sparse_shape, dense_tensor)
The gradients for indices and shape are None.
Raises:
TypeError: When the two operands don't have the same type.
"""
sp_t = ops.SparseTensor(*op.inputs[:3])
adj_a = op.get_attr("adjoint_a")
adj_b = op.get_attr("adjoint_b")
a_type = sp_t.values.dtype.base_dtype
b_type = op.inputs[3].dtype.base_dtype
if a_type != b_type:
raise TypeError("SparseTensorDenseMatMul op received operands with "
"different types: ", a_type, " and ", b_type)
is_complex = a_type == ops.dtypes.complex64
if is_complex:
raise NotImplementedError("SparseTensorDenseMatMul op does not support "
"complex gradients.")
# gradient w.r.t. dense
b_grad = sparse_ops.sparse_tensor_dense_matmul(sp_t, grad,
adjoint_a=not adj_a)
if adj_b:
b_grad = array_ops.transpose(b_grad)
# gradient w.r.t. sparse values
a_indices = op.inputs[0]
b = op.inputs[3]
rows = a_indices[:, 0]
cols = a_indices[:, 1]
# TODO(zongheng, ebrevdo): add conjugates in the right places when complex
# values are allowed.
# TODO(zongheng): these gather calls could potentially duplicate rows/cols in
# memory. If there is a need, we should look into implementing this more
# intelligently to avoid duplicating data.
parts_a = array_ops.gather(grad, rows if not adj_a else cols)
parts_b = array_ops.gather(b if not adj_b else array_ops.transpose(b),
cols if not adj_a else rows)
a_values_grad = math_ops.reduce_sum(parts_a * parts_b, reduction_indices=1)
# gradients w.r.t. (a_indices, a_values, a_shape, b)
return (None, a_values_grad, None, b_grad)
@ops.RegisterGradient("SparseDenseCwiseAdd")
def _SparseDenseCwiseAddGrad(unused_op, unused_grad):
raise NotImplementedError("Gradient for SparseDenseCwiseAdd is currently not"
" implemented yet.")
def _SparseDenseCwiseMulOrDivGrad(op, grad, is_mul):
"""Common code for SparseDenseCwise{Mul,Div} gradients."""
x_indices = op.inputs[0]
x_shape = op.inputs[2]
y = op.inputs[3]
y_shape = math_ops.to_int64(array_ops.shape(y))
num_added_dims = array_ops.expand_dims(
array_ops.size(x_shape) - array_ops.size(y_shape), 0)
augmented_y_shape = array_ops.concat(0, [array_ops.ones(num_added_dims,
ops.dtypes.int64),
y_shape])
scaling = x_shape // augmented_y_shape
scaled_indices = x_indices // scaling
scaled_indices = array_ops.slice(scaled_indices,
array_ops.concat(0, [[0], num_added_dims]),
[-1, -1])
dense_vals = array_ops.gather_nd(y, scaled_indices)
if is_mul:
dx = grad * dense_vals
dy_val = grad * op.inputs[1]
else:
dx = grad / dense_vals
dy_val = grad * (-op.inputs[1] / math_ops.square(dense_vals))
# indices can repeat after scaling, so we can't use sparse_to_dense().
dy = sparse_ops.sparse_add(
array_ops.zeros_like(y),
ops.SparseTensor(scaled_indices, dy_val, y_shape))
# (sp_indices, sp_vals, sp_shape, dense)
return (None, dx, None, dy)
@ops.RegisterGradient("SparseDenseCwiseMul")
def _SparseDenseCwiseMulGrad(op, grad):
"""Gradients for SparseDenseCwiseMul."""
return _SparseDenseCwiseMulOrDivGrad(op, grad, True)
@ops.RegisterGradient("SparseDenseCwiseDiv")
def _SparseDenseCwiseDivGrad(op, grad):
"""Gradients for SparseDenseCwiseDiv."""
return _SparseDenseCwiseMulOrDivGrad(op, grad, False)
@ops.RegisterGradient("SparseSoftmax")
def _SparseSoftmaxGrad(op, grad):
"""Gradients for SparseSoftmax.
The calculation is the same as SoftmaxGrad:
grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
where we now only operate on the non-zero values present in the SparseTensors.
Args:
op: the SparseSoftmax op.
grad: the upstream gradient w.r.t. the non-zero SparseSoftmax output values.
Returns:
Gradients w.r.t. the input (sp_indices, sp_values, sp_shape).
"""
indices, shape = op.inputs[0], op.inputs[2]
out_vals = op.outputs[0]
sp_output = ops.SparseTensor(indices, out_vals, shape)
sp_grad = ops.SparseTensor(indices, grad, shape)
sp_product = ops.SparseTensor(
indices, sp_output.values * sp_grad.values, shape)
# [..., B, 1], dense.
sum_reduced = -sparse_ops.sparse_reduce_sum(sp_product, [-1], keep_dims=True)
# sparse [..., B, C] + dense [..., B, 1] with broadcast; outputs sparse.
sp_sum = sparse_ops.sparse_dense_cwise_add(sp_grad, sum_reduced)
grad_x = sp_sum.values * sp_output.values
return [None, grad_x, None]
| 36.884615 | 80 | 0.704901 |
d7ad07e8956eb58f888824957c5b1a8f74578720
| 12,321 |
py
|
Python
|
ludopy/player.py
|
Larook/DRL_LUDO
|
93e81b45d69de369efd199095bb891aef2c390e7
|
[
"MIT"
] | 4 |
2020-03-29T12:12:53.000Z
|
2020-08-12T14:29:25.000Z
|
ludopy/player.py
|
Larook/DRL_LUDO
|
93e81b45d69de369efd199095bb891aef2c390e7
|
[
"MIT"
] | null | null | null |
ludopy/player.py
|
Larook/DRL_LUDO
|
93e81b45d69de369efd199095bb891aef2c390e7
|
[
"MIT"
] | 6 |
2020-09-14T15:14:41.000Z
|
2021-06-14T02:35:04.000Z
|
import numpy as np
TOTAL_NUMBER_OF_TAILES = 60
DICE_MOVE_OUT_OF_HOME = 6
NO_ENEMY = -1
# This roule is that if, there are two pieces on the field, the last one has to return is to start
PLAY_WITH_RULE_A = True
TAILE_FREE = 0
TAILE_HOME = 1
TAILE_START = 2
TAILE_GLOB = 3
TAILE_GOAL_AREAL = 4
TAILE_STAR = 5
TAILE_GOAL = 6
TAILE_ENEMY_1_GLOB = 7
TAILE_ENEMY_2_GLOB = 8
TAILE_ENEMY_3_GLOB = 9
LIST_TAILE_ENEMY_GLOBS = [TAILE_ENEMY_1_GLOB, TAILE_ENEMY_2_GLOB, TAILE_ENEMY_3_GLOB]
NULL_POS = -1
HOME_INDEX = 0
START_INDEX = 1
STAR_INDEXS = [5, 12, 18, 25, 31, 38, 44, 51]
HOME_AREAL_INDEXS = [53, 54, 55, 56, 57, 58]
GOAL_INDEX = 59
GLOB_INDEXS = [9, 22, 35, 48]
ENEMY_1_GLOB_INDX = 14
ENEMY_2_GLOB_INDX = 27
ENEMY_3_GLOB_INDX = 40
STAR_AT_GOAL_AREAL_INDX = STAR_INDEXS[-1]
BORD_TILES = np.full(TOTAL_NUMBER_OF_TAILES, TAILE_FREE)
BORD_TILES[HOME_INDEX] = TAILE_HOME
BORD_TILES[START_INDEX] = TAILE_START
BORD_TILES[STAR_INDEXS] = TAILE_STAR
BORD_TILES[GLOB_INDEXS] = TAILE_GLOB
BORD_TILES[HOME_AREAL_INDEXS] = TAILE_GOAL_AREAL
BORD_TILES[GOAL_INDEX] = TAILE_GOAL
BORD_TILES[ENEMY_1_GLOB_INDX] = TAILE_ENEMY_1_GLOB
BORD_TILES[ENEMY_2_GLOB_INDX] = TAILE_ENEMY_2_GLOB
BORD_TILES[ENEMY_3_GLOB_INDX] = TAILE_ENEMY_3_GLOB
ENEMY_1_INDX_AT_HOME = 40 # HOME_AREAL_INDEXS[0] - 6 - i * 13 # i = 1
ENEMY_2_INDX_AT_HOME = 27 # HOME_AREAL_INDEXS[0] - 6 - i * 13 # i = 2
ENEMY_3_INDX_AT_HOME = 14 # HOME_AREAL_INDEXS[0] - 6 - i * 13 # i = 3
def enemy_pos_at_pos(pos):
"""
Returns the index's the other players has to be in to be in the same location as the one given in pos
:param pos: The location to check for
:type pos: int
:return enemy_pos: The locations the enemy's pieces has to be at
:rtype enemy_pos: list of list
"""
enemy_pos = []
for enemy_start_pos, enemy_pos_at_start in [[ENEMY_1_GLOB_INDX, ENEMY_1_INDX_AT_HOME],
[ENEMY_2_GLOB_INDX, ENEMY_2_INDX_AT_HOME],
[ENEMY_3_GLOB_INDX, ENEMY_3_INDX_AT_HOME]]:
post_offset = enemy_start_pos - 1
pre_offset = enemy_pos_at_start - 1
if pos == enemy_start_pos:
pos_enemy = [START_INDEX, HOME_AREAL_INDEXS[0]]
elif pos < 0:
pos_enemy = [max(enemy_pos_at_start + pos, -1)]
elif START_INDEX <= pos < enemy_start_pos:
pos_enemy = [pos + pre_offset]
elif pos > HOME_AREAL_INDEXS[0] or pos == HOME_INDEX:
pos_enemy = [-1]
else:
pos_enemy = [pos - post_offset]
enemy_pos.append(pos_enemy)
return enemy_pos
def get_enemy_at_pos(pos, enemys):
"""
Returns the enemy's and the pieces they have at the given location
:param pos: The location to check for
:type pos: int
:param enemys: The locations for the enemy's pieces in a list of 4 lists
:returns:
- enemy_at_pos: The enemy's there are at the location
- enemy_pieces_at_pos: The pieces the enemy's has at the location
:rtype enemy_at_pos: list
:rtype enemy_pieces_at_pos: list of list
"""
# Get the pos the enemy's has to be at to be at the same pos
other_enemy_pos_at_pos = enemy_pos_at_pos(pos)
# Check if there is a enemy and how many pieces the enemy has there
enemy_at_pos = NO_ENEMY
enemy_pieces_at_pos = []
for enemy_i, other_enemy_pos in enumerate(other_enemy_pos_at_pos):
# Check if there already is found a enemy at pos.
if enemy_at_pos != NO_ENEMY:
# If there is then stop checking for more (there can only be one)
break
for o_pos in other_enemy_pos:
if o_pos == NULL_POS:
continue
for enemy_pice, enemy_pos in enumerate(enemys[enemy_i]):
if enemy_pos == o_pos:
enemy_pieces_at_pos.append(enemy_pice)
enemy_at_pos = enemy_i
return enemy_at_pos, enemy_pieces_at_pos
class Player:
"""
A class used by the Game class. This class is not needed for normal use
"""
def __init__(self):
"""
Makes a player with 4 pieces all at the home locations
"""
self.pieces = []
self.number_of_pieces = 4
self.set_all_pieces_to_home()
def get_pieces_that_can_move(self, dice):
"""
Return the pieces that can move with the given dice
:param dice: The dice the move will be done with
:type dice: int
:return: movable_pieces: A list with the pieces that can be moved
:rtype movable_pieces: list
"""
movable_pieces = []
# Go though all the pieces
for piece_i, piece_place in enumerate(self.pieces):
# If the piece is a goal then the piece can't move
if BORD_TILES[piece_place] == TAILE_GOAL:
continue
# If the piece is at home and the dice is DICE_MOVE_OUT_OF_HOME then the dice can move out of the home place
elif BORD_TILES[piece_place] == TAILE_HOME and dice == DICE_MOVE_OUT_OF_HOME:
movable_pieces.append(piece_i)
# If the piece is not at home or at the goal it can move
elif BORD_TILES[piece_place] != TAILE_HOME:
movable_pieces.append(piece_i)
return movable_pieces
def player_winner(self):
"""
Returns rather the player is a winner or not
:return: winner: A bool that indicate rather the player is a winner or not
:rtype winner: bool
"""
# Go though all the pieces
for piece_place in self.pieces:
# If a piece is not at the goal is not the winner
if BORD_TILES[piece_place] != TAILE_GOAL:
return False
# If no piece was not at the goal the player is the winner
return True
def set_pieces(self, pieces):
"""
Sets the players pieces
:param pieces: The pieces to set the players pieces to
"""
self.pieces = np.copy(pieces)
def get_pieces(self):
"""
Returns the players pieces
:return pieces: The players pieces
:rtype pieces: list
"""
return np.copy(self.pieces)
def move_piece(self, piece, dice, enemys):
"""
Move the players piece the given dice following the game rules. Returns the new locations of the enemy's pieces
:param piece: The piece to move
:type piece: int
:param dice: The dice to make the move with
:type dice: int
:param enemys: The enemy's pieces
:type enemys: list with 4 lists each with 4 int's
:return enemys: The new locations of the enemy's pieces
:rtype enemys: list with 4 lists each with 4 int's
"""
enemys_new = enemys.copy()
old_piece_pos = self.pieces[piece]
new_piece_pos = old_piece_pos + dice
move_enemy_home_from_poss = []
do_not_check_rule_a = False
enemy_at_pos, enemy_pieces_at_pos = get_enemy_at_pos(new_piece_pos, enemys)
# If the dice is 0 then no movement can be done
if dice == 0:
pass
# At goal
elif BORD_TILES[old_piece_pos] == TAILE_GOAL:
# The piece can not move
pass
# Goal areal
elif BORD_TILES[old_piece_pos] == TAILE_GOAL_AREAL:
if new_piece_pos <= GOAL_INDEX:
self.pieces[piece] = new_piece_pos
else:
overshoot = new_piece_pos - GOAL_INDEX
new_piece_pos_corrected = old_piece_pos - overshoot
self.pieces[piece] = new_piece_pos_corrected
# The Home areal
elif BORD_TILES[old_piece_pos] == TAILE_HOME:
if dice == DICE_MOVE_OUT_OF_HOME:
self.pieces[piece] = START_INDEX
# Set the enemy there might be at START_INDEX to moved
do_not_check_rule_a = True
move_enemy_home_from_poss.append(START_INDEX)
# Star before the home areal
elif new_piece_pos == STAR_AT_GOAL_AREAL_INDX:
self.pieces[piece] = GOAL_INDEX
# Set the enemy there might be at STAR_AT_GOAL_AREAL_INDX to moved
move_enemy_home_from_poss.append(new_piece_pos)
# The other stars
elif BORD_TILES[new_piece_pos] == TAILE_STAR:
present_star_staridx = STAR_INDEXS.index(new_piece_pos)
next_star_staridx = present_star_staridx + 1
if next_star_staridx >= len(STAR_INDEXS):
next_star_staridx = 0
next_star_pos = STAR_INDEXS[next_star_staridx]
self.pieces[piece] = next_star_pos
# Set the enemy there might be at first star or the start there will be jump to to be moved
if enemy_at_pos != NO_ENEMY:
move_enemy_home_from_poss.append(new_piece_pos)
next_star_enemy_at_pos, next_star_enemy_pieces_at_pos = get_enemy_at_pos(next_star_pos, enemys)
if next_star_enemy_at_pos != NO_ENEMY:
move_enemy_home_from_poss.append(next_star_pos)
# Globs there are not own by enemy
elif BORD_TILES[new_piece_pos] == TAILE_GLOB:
if enemy_at_pos != NO_ENEMY:
self.pieces[piece] = HOME_INDEX
else:
self.pieces[piece] = new_piece_pos
# Globs there are own by enemy
elif BORD_TILES[new_piece_pos] in LIST_TAILE_ENEMY_GLOBS:
# Get the enemy there own the glob
globs_enemy = LIST_TAILE_ENEMY_GLOBS.index(BORD_TILES[new_piece_pos])
# Check if there is a enemy at the glob
if enemy_at_pos != NO_ENEMY:
# If there is a other enemy then send them home and move there
if enemy_at_pos != globs_enemy:
move_enemy_home_from_poss.append(new_piece_pos)
self.pieces[piece] = new_piece_pos
# If it is the same enemy there is there them move there
else:
self.pieces[piece] = HOME_INDEX
# If there ant any enemy at the glob then move there
else:
self.pieces[piece] = new_piece_pos
# If it is a TAILE_FREE or if we move from a GLOB/STAR to a not done case
elif BORD_TILES[old_piece_pos] == TAILE_FREE or \
BORD_TILES[new_piece_pos] == TAILE_FREE or \
BORD_TILES[old_piece_pos] == TAILE_GLOB or \
BORD_TILES[old_piece_pos] == TAILE_STAR:
if enemy_at_pos != NO_ENEMY:
move_enemy_home_from_poss.append(new_piece_pos)
self.pieces[piece] = new_piece_pos
# If the case was not caught then there is a error
else:
print("\nold_piece_pos:", old_piece_pos, "\nnew_piece_pos", new_piece_pos,
"\nBORD_TILES[old_piece_pos]:", BORD_TILES[old_piece_pos],
"\nBORD_TILES[new_piece_pos]:", BORD_TILES[new_piece_pos], "\ndice:", dice)
raise RuntimeError("The new_piece_pos case was not handel")
# Check if there is any enemy there has to be moved
if len(move_enemy_home_from_poss):
# Go through the pos where enemy has to be moved from
for pos in move_enemy_home_from_poss:
# Get the enemy at the pos
enemy_at_pos, enemy_pieces_at_pos = get_enemy_at_pos(pos, enemys)
# Check if there was a enemy at the pos
if enemy_at_pos != NO_ENEMY:
# If there is only one enemy then move the enemy home.
if not do_not_check_rule_a and not PLAY_WITH_RULE_A or len(enemy_pieces_at_pos) == 1:
for enemy_piece in enemy_pieces_at_pos:
enemys_new[enemy_at_pos][enemy_piece] = HOME_INDEX
# If there is more than one then move own piece home
else:
self.pieces[piece] = HOME_INDEX
return enemys_new
def set_all_pieces_to_home(self):
"""
Sets all the players pieces to the home index
"""
self.pieces = []
for i in range(self.number_of_pieces):
self.pieces.append(HOME_INDEX)
| 37.111446 | 120 | 0.627952 |
069977843cf445131a548187d37ab6c4ed815858
| 2,090 |
py
|
Python
|
deep_continuation/utils.py
|
simonverret/deep_continuation
|
986bfba7f6806dc4869a023ff1fc1d0d18324b25
|
[
"MIT"
] | null | null | null |
deep_continuation/utils.py
|
simonverret/deep_continuation
|
986bfba7f6806dc4869a023ff1fc1d0d18324b25
|
[
"MIT"
] | null | null | null |
deep_continuation/utils.py
|
simonverret/deep_continuation
|
986bfba7f6806dc4869a023ff1fc1d0d18324b25
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import sys
import json
import argparse
def parse_file_and_command(default_dict,
help_dict=None,
description=None,
usage=None,
argv=sys.argv,
out_dict=False,
):
parser = argparse.ArgumentParser(description=description, usage=usage)
params_dict = {}
if len(argv)>1:
args_file = argv[1]
if args_file[-5:]=='.json':
if os.path.exists(args_file):
with open(args_file) as f:
params_dict = json.load(f)
print(f'using parameters from file {args_file}')
else:
raise ValueError(f'file {args_file} not found')
else:
print('using default parameters with args')
else:
print('using default parameters')
for name, default in default_dict.items():
## replace the defaults by the json file content
try: default = params_dict[name]
except KeyError: pass
try: help_str = help_dict[name]
except KeyError: help_str = 'no help available'
if type(default) is list:
if type(default[0]) is list:
parser.add_argument('--'+name, type=json.loads, default=default, help=help_str)
else:
parser.add_argument('--'+name, nargs='+', type=type(default[0]), default=default, help=help_str)
elif type(default) is bool:
parser.add_argument('--'+name, action='store_true', default=default, help=help_str)
parser.add_argument('--no_'+name, dest=name, action='store_false', default=default, help='disables '+name)
else:
parser.add_argument('--'+name, type=type(default), default=default, help=help_str)
# using parser.parse_known_args()[0] instead of parser.parse_args() preserve
# compatibility with jupyter in vscode
if out_dict:
return vars(parser.parse_known_args(argv)[0])
else:
return parser.parse_known_args(argv)[0]
class ObjectView():
def __init__(self,dict):
self.__dict__.update(dict)
| 33.174603 | 120 | 0.617703 |
7882af3fe33d7dea4a705657c4cea2bcdeded95d
| 2,001 |
py
|
Python
|
migrations/versions/13241b3d4bfb_a_init.py
|
futheads/flask-restful
|
56e9a12b7a9133504ee16a7f75d4c995c137b981
|
[
"Apache-2.0"
] | 10 |
2019-05-23T15:18:15.000Z
|
2020-02-26T03:52:29.000Z
|
migrations/versions/13241b3d4bfb_a_init.py
|
rymmx-gls/flask-restful
|
56e9a12b7a9133504ee16a7f75d4c995c137b981
|
[
"Apache-2.0"
] | null | null | null |
migrations/versions/13241b3d4bfb_a_init.py
|
rymmx-gls/flask-restful
|
56e9a12b7a9133504ee16a7f75d4c995c137b981
|
[
"Apache-2.0"
] | 2 |
2019-07-26T02:17:08.000Z
|
2019-09-29T12:10:13.000Z
|
"""【A】init
Revision ID: 13241b3d4bfb
Revises:
Create Date: 2019-05-21 17:52:43.088951
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '13241b3d4bfb'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('category',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('phone_number', sa.String(length=11), nullable=True),
sa.Column('password', sa.String(length=30), nullable=True),
sa.Column('nickname', sa.String(length=30), nullable=True),
sa.Column('register_time', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_user_nickname'), ['nickname'], unique=False)
batch_op.create_index(batch_op.f('ix_user_phone_number'), ['phone_number'], unique=False)
op.create_table('post',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=80), nullable=True),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('pub_date', sa.DateTime(), nullable=True),
sa.Column('category_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['category_id'], ['category.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('post')
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_user_phone_number'))
batch_op.drop_index(batch_op.f('ix_user_nickname'))
op.drop_table('user')
op.drop_table('category')
# ### end Alembic commands ###
| 33.35 | 97 | 0.676662 |
33a7340d74aa09827b2cc23d352cb61a00699f44
| 8,972 |
py
|
Python
|
weasyprint/layout/float.py
|
mousetail/WeasyPrint
|
9e82904ceb8b5c60331d0ce2b1d23aa48b54b514
|
[
"BSD-3-Clause"
] | null | null | null |
weasyprint/layout/float.py
|
mousetail/WeasyPrint
|
9e82904ceb8b5c60331d0ce2b1d23aa48b54b514
|
[
"BSD-3-Clause"
] | null | null | null |
weasyprint/layout/float.py
|
mousetail/WeasyPrint
|
9e82904ceb8b5c60331d0ce2b1d23aa48b54b514
|
[
"BSD-3-Clause"
] | null | null | null |
"""
weasyprint.float
----------------
Layout for floating boxes.
"""
from ..formatting_structure import boxes
from .min_max import handle_min_max_width
from .percentages import resolve_percentages, resolve_position_percentages
from .preferred import shrink_to_fit
from .tables import table_wrapper_width
@handle_min_max_width
def float_width(box, context, containing_block):
# Check that box.width is auto even if the caller does it too, because
# the handle_min_max_width decorator can change the value
if box.width == 'auto':
box.width = shrink_to_fit(context, box, containing_block.width)
def float_layout(context, box, containing_block, containing_page, absolute_boxes, fixed_boxes):
"""Set the width and position of floating ``box``."""
# Avoid circular imports
from .blocks import block_container_layout
from .flex import flex_layout
from .inlines import inline_replaced_box_width_height
cb_width, cb_height = (containing_block.width, containing_block.height)
resolve_percentages(box, (cb_width, cb_height), containing_page)
# TODO: This is only handled later in blocks.block_container_layout
# http://www.w3.org/TR/CSS21/visudet.html#normal-block
if cb_height == 'auto':
cb_height = (
containing_block.position_y - containing_block.content_box_y())
resolve_position_percentages(box, (cb_width, cb_height), containing_page)
if box.margin_left == 'auto':
box.margin_left = 0
if box.margin_right == 'auto':
box.margin_right = 0
if box.margin_top == 'auto':
box.margin_top = 0
if box.margin_bottom == 'auto':
box.margin_bottom = 0
clearance = get_clearance(context, box)
if clearance is not None:
box.position_y += clearance
if isinstance(box, boxes.BlockReplacedBox):
inline_replaced_box_width_height(box, containing_block)
elif box.width == 'auto':
float_width(box, context, containing_block)
if box.is_table_wrapper:
table_wrapper_width(context, box, (cb_width, cb_height), containing_page)
if isinstance(box, boxes.BlockContainerBox):
context.create_block_formatting_context()
box, _, _, _, _ = block_container_layout(context, box, containing_page, max_position_y=float('inf'),
skip_stack=None,
page_is_empty=False, absolute_boxes=absolute_boxes,
fixed_boxes=fixed_boxes, adjoining_margins=None)
context.finish_block_formatting_context(box)
elif isinstance(box, boxes.FlexContainerBox):
box, _, _, _, _ = flex_layout(context, box, max_position_y=float('inf'), skip_stack=None,
containing_block=containing_block, containing_page=containing_page, page_is_empty=False,
absolute_boxes=absolute_boxes, fixed_boxes=fixed_boxes)
else:
assert isinstance(box, boxes.BlockReplacedBox)
box = find_float_position(context, box, containing_block)
context.excluded_shapes.append(box)
return box
def find_float_position(context, box, containing_block):
"""Get the right position of the float ``box``."""
# See http://www.w3.org/TR/CSS2/visuren.html#float-position
# Point 4 is already handled as box.position_y is set according to the
# containing box top position, with collapsing margins handled
# Points 5 and 6, box.position_y is set to the highest position_y possible
if context.excluded_shapes:
highest_y = context.excluded_shapes[-1].position_y
if box.position_y < highest_y:
box.translate(0, highest_y - box.position_y)
# Points 1 and 2
position_x, position_y, available_width = avoid_collisions(
context, box, containing_block)
# Point 9
# position_y is set now, let's define position_x
# for float: left elements, it's already done!
if box.style['float'] == 'right':
position_x += available_width - box.margin_width()
box.translate(position_x - box.position_x, position_y - box.position_y)
return box
def get_clearance(context, box, collapsed_margin=0):
"""Return None if there is no clearance, otherwise the clearance value."""
clearance = None
hypothetical_position = box.position_y + collapsed_margin
# Hypothetical position is the position of the top border edge
for excluded_shape in context.excluded_shapes:
if box.style['clear'] in (excluded_shape.style['float'], 'both'):
y, h = excluded_shape.position_y, excluded_shape.margin_height()
if hypothetical_position < y + h:
clearance = max(
(clearance or 0), y + h - hypothetical_position)
return clearance
def avoid_collisions(context, box, containing_block, outer=True):
excluded_shapes = context.excluded_shapes
position_y = box.position_y if outer else box.border_box_y()
box_width = box.margin_width() if outer else box.border_width()
box_height = box.margin_height() if outer else box.border_height()
if box.border_height() == 0 and box.is_floated():
return 0, 0, containing_block.width
while True:
colliding_shapes = []
for shape in excluded_shapes:
# Assign locals to avoid slow attribute lookups.
shape_position_y = shape.position_y
shape_margin_height = shape.margin_height()
if ((shape_position_y < position_y <
shape_position_y + shape_margin_height) or
(shape_position_y < position_y + box_height <
shape_position_y + shape_margin_height) or
(shape_position_y >= position_y and
shape_position_y + shape_margin_height <=
position_y + box_height)):
colliding_shapes.append(shape)
left_bounds = [
shape.position_x + shape.margin_width()
for shape in colliding_shapes
if shape.style['float'] == 'left']
right_bounds = [
shape.position_x
for shape in colliding_shapes
if shape.style['float'] == 'right']
# Set the default maximum bounds
max_left_bound = containing_block.content_box_x()
max_right_bound = \
containing_block.content_box_x() + containing_block.width
if not outer:
max_left_bound += box.margin_left
max_right_bound -= box.margin_right
# Set the real maximum bounds according to sibling float elements
if left_bounds or right_bounds:
if left_bounds:
max_left_bound = max(max(left_bounds), max_left_bound)
if right_bounds:
max_right_bound = min(min(right_bounds), max_right_bound)
# Points 3, 7 and 8
if box_width > max_right_bound - max_left_bound:
# The box does not fit here
new_positon_y = min(
shape.position_y + shape.margin_height()
for shape in colliding_shapes)
if new_positon_y > position_y:
# We can find a solution with a higher position_y
position_y = new_positon_y
continue
# No solution, we must put the box here
break
# See https://www.w3.org/TR/CSS21/visuren.html#floats
# Boxes that can’t collide with floats are:
# - floats
# - line boxes
# - table wrappers
# - block-level replaced box
# - element establishing new formatting contexts (not handled)
assert (
(box.style['float'] in ('right', 'left')) or
isinstance(box, boxes.LineBox) or
box.is_table_wrapper or
isinstance(box, boxes.BlockReplacedBox))
# The x-position of the box depends on its type.
position_x = max_left_bound
if box.style['float'] == 'none':
if containing_block.style['direction'] == 'rtl':
if isinstance(box, boxes.LineBox):
# The position of the line is the position of the cursor, at
# the right bound.
position_x = max_right_bound
elif box.is_table_wrapper:
# The position of the right border of the table is at the right
# bound.
position_x = max_right_bound - box_width
else:
# The position of the right border of the replaced box is at
# the right bound.
assert isinstance(box, boxes.BlockReplacedBox)
position_x = max_right_bound - box_width
available_width = max_right_bound - max_left_bound
if not outer:
position_x -= box.margin_left
position_y -= box.margin_top
return position_x, position_y, available_width
| 39.699115 | 126 | 0.644338 |
7d563cae7c4ced07ca8c8ce57f219596c27493bc
| 811 |
py
|
Python
|
homeassistant/components/local_ip/config_flow.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 6 |
2016-11-25T06:36:27.000Z
|
2021-11-16T11:20:23.000Z
|
homeassistant/components/local_ip/config_flow.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 60 |
2020-07-06T15:10:30.000Z
|
2022-03-31T06:01:46.000Z
|
homeassistant/components/local_ip/config_flow.py
|
klauern/home-assistant-core
|
c18ba6aec0627e6afb6442c678edb5ff2bb17db6
|
[
"Apache-2.0"
] | 14 |
2018-08-19T16:28:26.000Z
|
2021-09-02T18:26:53.000Z
|
"""Config flow for local_ip."""
from homeassistant import config_entries
from .const import DOMAIN
class SimpleConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for local_ip."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
if user_input is None:
return self.async_show_form(step_id="user")
return self.async_create_entry(title=DOMAIN, data=user_input)
async def async_step_import(self, import_info):
"""Handle import from config file."""
return await self.async_step_user(import_info)
| 28.964286 | 69 | 0.710234 |
9fe972172373209f12cc00a1f15baa8ea58c9aa5
| 7,704 |
py
|
Python
|
idataapi_transform/DataProcess/DataWriter/MySQLWriter.py
|
markqiu/idataapi-transform
|
f02008745331c58f85bb1b9073aa49da25c41467
|
[
"MIT"
] | 41 |
2018-01-08T07:11:04.000Z
|
2021-03-23T03:56:53.000Z
|
idataapi_transform/DataProcess/DataWriter/MySQLWriter.py
|
baifengbai/idataapi-transform
|
537a5998e6c089bc4d71be492ea3b963cc2662d8
|
[
"MIT"
] | 4 |
2019-04-04T12:23:00.000Z
|
2021-07-23T06:13:58.000Z
|
idataapi_transform/DataProcess/DataWriter/MySQLWriter.py
|
baifengbai/idataapi-transform
|
537a5998e6c089bc4d71be492ea3b963cc2662d8
|
[
"MIT"
] | 15 |
2019-03-06T02:48:14.000Z
|
2021-12-16T11:29:40.000Z
|
import json
import asyncio
import random
import logging
import traceback
from .BaseWriter import BaseWriter
class MySQLWriter(BaseWriter):
def __init__(self, config):
super().__init__()
self.config = config
self.total_miss_count = 0
self.success_count = 0
self.table_checked = False
self.key_fields = list()
self.auto_increment_keys = set()
async def write(self, responses):
await self.config.get_mysql_pool_cli() # init mysql pool
miss_count = 0
original_length = len(responses)
if self.config.filter:
target_responses = list()
for i in responses:
i = self.config.filter(i)
if i:
target_responses.append(i)
else:
miss_count += 1
responses = target_responses
if not responses:
self.finish_once(miss_count, original_length)
return
# After filtered, still have responses to write
if not self.table_checked:
await self.table_check(responses)
if await self.perform_write(responses):
self.finish_once(miss_count, original_length)
def __exit__(self, exc_type, exc_val, exc_tb):
self.config.free_resource()
logging.info("%s write done, total filtered %d item, total write %d item" %
(self.config.name, self.total_miss_count, self.success_count))
def __enter__(self):
return self
def finish_once(self, miss_count, original_length):
self.total_miss_count += miss_count
self.success_count += original_length
logging.info("%s write %d item, filtered %d item" % (self.config.name, original_length - miss_count, miss_count))
async def table_check(self, responses):
await self.config.cursor.execute("SHOW TABLES LIKE '%s'" % (self.config.table, ))
result = await self.config.cursor.fetchone()
if result is None:
await self.create_table(responses)
# check field
await self.config.cursor.execute("DESC %s" % (self.config.table, ))
results = await self.config.cursor.fetchall()
for field in results:
if "auto_increment" in field:
self.auto_increment_keys.add(field[0])
fields = set(i[0] for i in results)
self.key_fields = list(i[0] for i in results)
real_keys = set(responses[0].keys())
difference_set = real_keys.difference(fields)
if difference_set:
# real keys not subset of fields
raise ValueError("Field %s not in MySQL Table: %s" % (str(difference_set), self.config.table))
self.table_checked = True
async def create_table(self, responses):
test_response = dict()
for response in responses[:50]:
for k, v in response.items():
if k not in test_response:
test_response[k] = v
elif test_response[k] is None:
test_response[k] = v
elif isinstance(v, dict) or isinstance(v, list):
if len(json.dumps(test_response[k])) < len(json.dumps(v)):
test_response[k] = v
elif v is not None and test_response[k] < v:
test_response[k] = v
sql = """
CREATE TABLE `%s` (
""" % (self.config.table, )
first_field = True
for key, value in responses[0].items():
if "Count" in key:
field_type = "BIGINT"
elif value is None:
field_type = "TEXT"
elif key in ("content", ) or isinstance(value, dict) or isinstance(value, list):
field_type = "TEXT"
elif isinstance(value, bool):
field_type = "BOOLEAN"
elif isinstance(value, int):
field_type = "BIGINT"
elif isinstance(value, float):
field_type = "DOUBLE"
# varchar can store at most 65536 bytes, utf8 occupy 1-8 bytes per character,
# so length should be less than 65536 / 8 = 8192
# assume this field (the shortest length) * 4 <= the longest length(8192)
elif len(value) > 2048:
field_type = "TEXT"
else:
length = len(value) * 4
if length < 256:
length = 256
field_type = "VARCHAR(%d)" % (length, )
sql += ("\t" if first_field else "\t\t") + "`%s` %s" % (key, field_type)
if key == "id":
sql += " NOT NULL,\n"
else:
sql += ",\n"
if first_field:
first_field = False
tail_sql = """
\tPRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=%s
""" % (self.config.charset, )
sql += tail_sql
logging.info("Creating table: %s\n%s", self.config.table, sql)
await self.config.cursor.execute(sql)
await self.config.connection.commit()
logging.info("table created")
async def perform_write(self, responses):
sql = "REPLACE INTO %s VALUES " % (self.config.table, )
normal_sql = False
sql_without_auto_increment_keys = list()
for each in responses:
need_specific_sql = False
keys = list()
curr_sql = '('
for field in self.key_fields:
if field in self.auto_increment_keys and field not in each:
need_specific_sql = True
continue
val = each[field]
keys.append(field)
if isinstance(val, dict) or isinstance(val, list):
val = json.dumps(val)
if val is None:
curr_sql += 'NULL,'
else:
curr_sql += repr(val) + ","
curr_sql = curr_sql[:-1] + '),\n'
if need_specific_sql:
sql_keys = "("
for each_sql_key in keys:
sql_keys += each_sql_key + ","
sql_keys = sql_keys[:-1] + ")"
sql_without_auto_increment_keys.append("REPLACE INTO %s%s VALUES " % (self.config.table, sql_keys) + curr_sql[:-2])
else:
normal_sql = True
sql += curr_sql
sql = sql[:-2]
try_time = 0
while try_time < self.config.max_retry:
try:
ret_sql = ""
if normal_sql:
ret_sql += sql + ";\n"
if sql_without_auto_increment_keys:
ret_sql += ";\n".join(sql_without_auto_increment_keys)
ret_sql += ";"
await self.config.cursor.execute(ret_sql)
await self.config.cursor.connection.commit()
return True
except Exception as e:
try_time += 1
if try_time < self.config.max_retry:
logging.error("retry: %d, %s" % (try_time, str(e)))
await asyncio.sleep(random.uniform(self.config.random_min_sleep, self.config.random_max_sleep))
else:
logging.error("Give up MySQL writer: %s, After retry: %d times, still fail to write, "
"total write %d items, total filtered: %d items, reason: %s" %
(self.config.name, self.config.max_retry, self.success_count, self.total_miss_count,
str(traceback.format_exc())))
return False
| 39.507692 | 131 | 0.535177 |
ca5fd3530f13674255e6be70e2ff86f7fe4d2184
| 1,468 |
py
|
Python
|
src/dissues.py
|
MorganShorter/drop
|
a6aef148fd6a99b6380535882fd8bd20a844468f
|
[
"BSD-2-Clause"
] | 5 |
2015-11-07T03:03:48.000Z
|
2020-06-27T00:43:38.000Z
|
src/dissues.py
|
MorganShorter/drop
|
a6aef148fd6a99b6380535882fd8bd20a844468f
|
[
"BSD-2-Clause"
] | 7 |
2015-04-03T00:04:20.000Z
|
2021-07-08T19:40:00.000Z
|
src/dissues.py
|
MorganShorter/drop
|
a6aef148fd6a99b6380535882fd8bd20a844468f
|
[
"BSD-2-Clause"
] | 9 |
2015-04-02T22:52:26.000Z
|
2020-06-27T00:43:43.000Z
|
#!/usr/bin/env python
#
# Copyright (c) 2019, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
if __name__ == '__main__':
import os, sys
from tero.dissues import main
main(sys.argv)
| 45.875 | 78 | 0.77248 |
8ef0e6ad76d2d04974ed4f25d94cbe0d5a18224c
| 6,030 |
py
|
Python
|
apprest/plugins/icat/services/ICAT.py
|
dario-palmisano/calipsoplus-backend
|
9872b6fb4fc089fc6c0879491703bd611c245369
|
[
"MIT"
] | null | null | null |
apprest/plugins/icat/services/ICAT.py
|
dario-palmisano/calipsoplus-backend
|
9872b6fb4fc089fc6c0879491703bd611c245369
|
[
"MIT"
] | null | null | null |
apprest/plugins/icat/services/ICAT.py
|
dario-palmisano/calipsoplus-backend
|
9872b6fb4fc089fc6c0879491703bd611c245369
|
[
"MIT"
] | null | null | null |
import requests
import json
import dateutil.parser
from apprest.plugins.icat.models.calipso_experiment import CalipsoExperiment
from apprest.plugins.icat.models.calipso_session import CalipsoSession
from apprest.plugins.icat.models.calipso_investigation_user import CalipsoInvestigationUser
from calipsoplus.settings_calipso import ICAT_DATA_RETRIEVAL_ENDPOINT, ICAT_PASSWORD, ICAT_PLUGIN, ICAT_USERNAME
icat_url = ICAT_DATA_RETRIEVAL_ENDPOINT
class ICATService:
def get_session_id(self):
session_id = json.loads(requests.post(icat_url + '/session', data={
"plugin": ICAT_PLUGIN,
"username": ICAT_USERNAME,
"password": ICAT_PASSWORD
}).text)['sessionId']
return session_id
def parse_data(self,data_array):
calipso_experiments = []
proposals = set()
beamlines = set()
# Multiple investigations have the same proposal_id. Get the list of proposals using a Set
for i in range(len(data_array)):
proposals.add(data_array[i]["Investigation"]["name"])
for proposal in proposals:
beamlines.clear()
calipso_experiment = CalipsoExperiment(proposal)
calipso_experiment.sessions = []
experiment_abstract = ''
for investigation in range(len(data_array)):
if proposal == str(data_array[investigation]["Investigation"]["name"]):
calipso_session = CalipsoSession(data_array[investigation]["Investigation"]["id"])
calipso_session.start_date = \
dateutil.parser.parse(data_array[investigation]["Investigation"]["createTime"])
calipso_session.end_date = dateutil.parser.parse(
data_array[investigation]["Investigation"]["endDate"])
calipso_experiment.sessions.append(calipso_session)
try:
experiment_abstract = str(data_array[investigation]["Investigation"]["summary"])
except Exception: # Some investigations don't have a summary
experiment_abstract = ''
# Add the beamline from the session to the set of beamlines for the proposal
beamlines.add(str(data_array[investigation]["Investigation"]["visitId"]))
# Add all beamlines in the experiment sessions to the experiment
# Converts a set to a string (sort of)
calipso_experiment.beam_line = ', '.join([str(i) for i in beamlines])
# All sessions of an experiment have the same summary. Use this summary for the proposal abstract (body)
calipso_experiment.body = experiment_abstract
# Add the experiment to the list of experiments
calipso_experiments.append(calipso_experiment)
return calipso_experiments
def get_public_data(self):
"""
Gets all investigations which content is public
:return: List of CalipsoExperiment
"""
# Get the session id (authentication)
session_id = self.get_session_id()
# Get all of public investigation data and create python objects
public_investigations = json.loads(requests.get(icat_url + '/catalogue/' + session_id +
'/investigation/status/released/investigation').text)
calipso_experiments = self.parse_data(public_investigations)
return calipso_experiments
def get_my_investigations(self):
"""
Gets all my investigations. Investigations that I am a participant
:return: List of CalipsoExperiment
"""
# Get the session id (authentication)
session_id = self.get_session_id()
# Get all of public investigation data and create python objects
my_investigations = json.loads(requests.get(icat_url + '/catalogue/' + session_id + '/investigation').text)
calipso_experiments = self.parse_data(my_investigations)
return calipso_experiments
def get_embargo_data(self):
"""
Gets all investigations that are under embargo, releaseDate > NOW
:return: List of CalipsoExperiment
"""
# Get the session id (authentication)
session_id = self.get_session_id()
# Get all of embargoed investigation data and create python objects
embargoed_investigations = json.loads(requests.get(icat_url + '/catalogue/' + session_id +
'/investigation/status/embargoed/investigation').text)
calipso_experiments = self.parse_data(embargoed_investigations)
return calipso_experiments
def get_users_involved_in_investigation(self, investigation_id):
"""
Gets users involved in an investigation
:param investigation_id:
:return: List of CalipsoInvestigationUser
"""
# Get the session id (authentication)
session_id = self.get_session_id()
investigation_users = json.loads(requests.get(icat_url + '/catalogue/' + session_id +
'/investigation/id/' + str(investigation_id)
+ '/investigationusers').text)
calipso_investigation_users = []
for user in range(len(investigation_users)):
investigation_user = CalipsoInvestigationUser()
investigation_user.name = investigation_users[user]["name"]
investigation_user.full_name = investigation_users[user]["fullName"]
investigation_user.role = investigation_users[user]["role"]
investigation_user.investigation_name = investigation_users[user]["investigationName"]
investigation_user.investigation_id = investigation_users[user]["investigationId"]
calipso_investigation_users.append(investigation_user)
return calipso_investigation_users
| 45 | 116 | 0.648093 |
6d284b0ccdec1af20f39ccbddb7c0daec090a3e0
| 48 |
py
|
Python
|
main.py
|
Vasili999/data-science-from-scratch
|
8ce5685a68acd50aecbee924069679c5faa4da18
|
[
"MIT"
] | null | null | null |
main.py
|
Vasili999/data-science-from-scratch
|
8ce5685a68acd50aecbee924069679c5faa4da18
|
[
"MIT"
] | null | null | null |
main.py
|
Vasili999/data-science-from-scratch
|
8ce5685a68acd50aecbee924069679c5faa4da18
|
[
"MIT"
] | null | null | null |
import scratch.linear_algebra as ln
print(ln.B)
| 16 | 35 | 0.8125 |
ad4f0bbf85ce20b7158f88350fac475cd31df8ac
| 6,611 |
py
|
Python
|
freeipa/komand_freeipa/actions/find_user/schema.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
freeipa/komand_freeipa/actions/find_user/schema.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
freeipa/komand_freeipa/actions/find_user/schema.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
SEARCH_PARAMETERS = "search_parameters"
class Output:
FULL_MESSAGE = "full_message"
USERS = "users"
class FindUserInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"search_parameters": {
"type": "string",
"title": "Search Parameters",
"description": "A string to look for in relevant user fields. If blank will return all users with a return limit of 40000",
"order": 1
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class FindUserOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"full_message": {
"type": "array",
"title": "Full Message",
"description": "All stored information match the search criteria",
"items": {
"$ref": "#/definitions/find_user_output"
},
"order": 2
},
"users": {
"type": "array",
"title": "Users",
"description": "A list of users that match the search criteria",
"items": {
"type": "string"
},
"order": 1
}
},
"definitions": {
"find_user_output": {
"type": "object",
"title": "find_user_output",
"properties": {
"cn": {
"type": "array",
"title": "CN",
"description": "CN",
"items": {
"type": "string"
},
"order": 4
},
"displayname": {
"type": "array",
"title": "Display Name",
"description": "Display name",
"items": {
"type": "string"
},
"order": 14
},
"dn": {
"type": "string",
"title": "DN",
"description": "DN",
"order": 13
},
"gecos": {
"type": "array",
"title": "Gecos",
"description": "Gecos",
"items": {
"type": "string"
},
"order": 22
},
"gidnumber": {
"type": "array",
"title": "GID Number",
"description": "GID number",
"items": {
"type": "string"
},
"order": 21
},
"givenname": {
"type": "array",
"title": "Given Name",
"description": "Given name",
"items": {
"type": "string"
},
"order": 18
},
"homedirectory": {
"type": "array",
"title": "Home Directory",
"description": "Home directory",
"items": {
"type": "string"
},
"order": 6
},
"initials": {
"type": "array",
"title": "Initials",
"description": "Initials",
"items": {
"type": "string"
},
"order": 24
},
"ipantsecurityidentifier": {
"type": "array",
"title": "IPA NT Security Identifier",
"description": "IPA NT security identifier",
"items": {
"type": "string"
},
"order": 20
},
"ipasshpubkey": {
"type": "array",
"title": "IPA SSH Pub Key",
"description": "IPA SSH pub key",
"items": {
"type": "string"
},
"order": 3
},
"ipauniqueid": {
"type": "array",
"title": "IPA Unique ID",
"description": "IPA unique ID",
"items": {
"type": "string"
},
"order": 16
},
"krbcanonicalname": {
"type": "array",
"title": "Krb Canonical Name",
"description": "Krb canonical name",
"items": {
"type": "string"
},
"order": 5
},
"krbprincipalname": {
"type": "array",
"title": "Krb Principal Name",
"description": "Krb principal name",
"items": {
"type": "string"
},
"order": 17
},
"loginshell": {
"type": "array",
"title": "Login Shell",
"description": "Login shell",
"items": {
"type": "string"
},
"order": 9
},
"mail": {
"type": "array",
"title": "Mail",
"description": "Mail",
"items": {
"type": "string"
},
"order": 12
},
"memberof_group": {
"type": "array",
"title": "Member of Group",
"description": "Member of group",
"items": {
"type": "string"
},
"order": 2
},
"mepmanagedentry": {
"type": "array",
"title": "Mepmanagedentry",
"description": "Mepmanagedentry",
"items": {
"type": "string"
},
"order": 15
},
"nsaccountlock": {
"type": "boolean",
"title": "NS Account Lock",
"description": "NS account lock",
"order": 7
},
"objectclass": {
"type": "array",
"title": "Object Class",
"description": "Object class",
"items": {
"type": "string"
},
"order": 19
},
"preserved": {
"type": "boolean",
"title": "Preserved",
"description": "Preserved",
"order": 11
},
"sn": {
"type": "array",
"title": "SN",
"description": "SN",
"items": {
"type": "string"
},
"order": 23
},
"sshpubkeyfp": {
"type": "array",
"title": "SSH Pub Key FP",
"description": "SSH pub key FP",
"items": {
"type": "string"
},
"order": 1
},
"uid": {
"type": "array",
"title": "UID",
"description": "UID",
"items": {
"type": "string"
},
"order": 8
},
"uidnumber": {
"type": "array",
"title": "UID Number",
"description": "UID number",
"items": {
"type": "string"
},
"order": 10
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 23.610714 | 129 | 0.399486 |
94d235ab6305d1cda15a93984502b88d935c2a2d
| 740 |
py
|
Python
|
workflow/scripts/split_haplotagged_bam.py
|
dancooke/syntumorsizer
|
c3c4e278796efced2afc1bf3e6bf27948d6d42e0
|
[
"MIT"
] | null | null | null |
workflow/scripts/split_haplotagged_bam.py
|
dancooke/syntumorsizer
|
c3c4e278796efced2afc1bf3e6bf27948d6d42e0
|
[
"MIT"
] | null | null | null |
workflow/scripts/split_haplotagged_bam.py
|
dancooke/syntumorsizer
|
c3c4e278796efced2afc1bf3e6bf27948d6d42e0
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import numpy as np
import pysam as ps
def get_haplotype_ids(read):
try:
return tuple([int(id) for id in read.get_tag('HP').split(',')])
except KeyError:
return None
def split_bam(in_bam_path, out_bam_paths):
in_bam = ps.AlignmentFile(in_bam_path)
out_bams = [ps.AlignmentFile(bam, 'wb', template=in_bam) for bam in out_bam_paths]
for read in in_bam:
haplotype_ids = get_haplotype_ids(read)
if haplotype_ids is None:
haplotype = np.random.choice(len(out_bams))
else:
haplotype = np.random.choice(haplotype_ids)
out_bams[haplotype].write(read)
split_bam(Path(snakemake.input[0]), [Path(bam) for bam in snakemake.output])
| 30.833333 | 86 | 0.678378 |
258a51f083823aebc2e2f6f66a3cc35f7e16d6a3
| 542 |
py
|
Python
|
Registration system.py
|
mdhasan8/Problem_Solving
|
ac18f30ecc7d1baa4cea382c53aec16a544530be
|
[
"MIT"
] | null | null | null |
Registration system.py
|
mdhasan8/Problem_Solving
|
ac18f30ecc7d1baa4cea382c53aec16a544530be
|
[
"MIT"
] | null | null | null |
Registration system.py
|
mdhasan8/Problem_Solving
|
ac18f30ecc7d1baa4cea382c53aec16a544530be
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 17 12:00:12 2021
@author: Easin
"""
in1 = input()
in1 = int(in1)
list1 = []
for elem in range(in1):
in2 = input()
if in2 not in list1:
print("OK")
else:
count = list1.count(in2)
var = in2+ (str(count))
'''
while var in list1:
count += 1
var = in2+ (str(count))
'''
#in2 = var
print(var)
list1.append(in2)
#print(list1)
| 16.424242 | 36 | 0.416974 |
3644e6482d732c07f3ab7e319365f64d51a11a9a
| 1,024 |
py
|
Python
|
webpage/management/commands/delete_migrations.py
|
csae8092/djtranskribus
|
efd73f5ad5752201da4df9020043db020e942e85
|
[
"MIT"
] | 2 |
2021-06-02T11:27:54.000Z
|
2021-08-25T10:29:04.000Z
|
webpage/management/commands/delete_migrations.py
|
csae8092/djtranskribus
|
efd73f5ad5752201da4df9020043db020e942e85
|
[
"MIT"
] | 86 |
2021-01-29T12:31:34.000Z
|
2022-03-28T11:41:04.000Z
|
webpage/management/commands/delete_migrations.py
|
acdh-oeaw/nabucco
|
37286484cc512c4f738db34a47c8ae5fb1c555d2
|
[
"MIT"
] | 2 |
2020-09-03T14:49:42.000Z
|
2021-02-25T10:02:55.000Z
|
import os
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
cwd = settings.BASE_DIR
class Command(BaseCommand):
# Show this when the user types help
help = "Deletes all migration files of the current project"
# A command must define handle()
def handle(self, *args, **options):
deleted_files = []
counter = 0
for root, dirs, files in os.walk(cwd):
for file in files:
if 'migrations' in os.path.join(root, file) and '00' in os.path.join(root, file):
if 'myenv' in os.path.join(root, file):
pass
else:
deleted_files.append((os.path.join(root, file)))
os.remove(os.path.join(root, file))
counter = +1
self.stdout.write("Following {} files have been deleted".format(counter))
for x in deleted_files:
self.stdout.write("Deleted: {}".format(x))
| 36.571429 | 97 | 0.574219 |
f0fa542f14f23b66c0a747573f308672c37a3467
| 19,497 |
py
|
Python
|
ObitSystem/ObitSD/python/CleanOTF.py
|
sarrvesh/Obit
|
e4ce6029e9beb2a8c0316ee81ea710b66b2b7986
|
[
"Linux-OpenIB"
] | 5 |
2019-08-26T06:53:08.000Z
|
2020-10-20T01:08:59.000Z
|
ObitSystem/ObitSD/python/CleanOTF.py
|
sarrvesh/Obit
|
e4ce6029e9beb2a8c0316ee81ea710b66b2b7986
|
[
"Linux-OpenIB"
] | null | null | null |
ObitSystem/ObitSD/python/CleanOTF.py
|
sarrvesh/Obit
|
e4ce6029e9beb2a8c0316ee81ea710b66b2b7986
|
[
"Linux-OpenIB"
] | 8 |
2017-08-29T15:12:32.000Z
|
2022-03-31T12:16:08.000Z
|
""" This class is for performing CLEAN on images.
This implements an OTF image plane CLEAN
It is mostly appropriate for single dish images where the support of
the dirty beam is extremely limited.
There are no restrictions on the relative sizes of the dirty image and beam.
Arguments to the constructor:
name - Name of the CLEAN object (a label)
dirty - Python Obit Image dirty image object
beam - Python Obit Image dirty beam object
clean - Extant Python Obit Image to receive, should be cloned from dirty
err - Python Obit Error/message stack
"""
# $Id$
#-----------------------------------------------------------------------
# Copyright (C) 2005,2008
# Associated Universities, Inc. Washington DC, USA.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 675 Massachusetts Ave, Cambridge,
# MA 02139, USA.
#
# Correspondence concerning this software should be addressed as follows:
# Internet email: [email protected].
# Postal address: William Cotton
# National Radio Astronomy Observatory
# 520 Edgemont Road
# Charlottesville, VA 22903-2475 USA
#-----------------------------------------------------------------------
# Python shadow class to ObitDConCleanOTF class
import Obit, OErr, Image, FArray, Table, InfoList, OWindow, ODisplay
class CleanOTFPtr :
def __init__(self,this):
self.this = this
self.thisown = 0
#def __del__(self):
# if self.thisown == 1 :
# # If Obit has been unloaded don't bother
# if Obit.__class__ == Obit:
# Obit.delete_CleanOTF(self.this)
def __setattr__(self,name,value):
if name == "me" :
Obit.CleanOTF_me_set(self.this,value)
return
if name=="Dirty":
PSetDirty(self, value)
return
if name=="Beam":
PSetBeam(self, value)
return
if name=="Clean":
PSetClean(self, value)
return
self.__dict__[name] = value
def __getattr__(self,name):
if name == "me" :
return Obit.CleanOTF_me_get(self.this)
# Functions to return members
if name=="List":
return PGetList(self)
if name=="Dirty":
return PGetDirty(self)
if name=="Beam":
return PGetBeam(self)
if name=="Clean":
return PGetClean(self)
if name=="Size":
return PGetCleanSize(self)
raise AttributeError,name
def __repr__(self):
return "<C CleanOTF instance>"
class CleanOTF(CleanOTFPtr):
""" This class is for performing CLEAN on images.
This implements an OTF image plane CLEAN
It is mostly appropriate for single dish images where the support of
the dirty beam is extremely limited.
There are no restrictions on the relative sizes of the dirty image and beam.
Arguments to the constructor:
name - Name of the CLEAN object (a label)
dirty - Python Obit Image dirty image object
beam - Python Obit Image dirty beam object
clean - Extant Python Obit Image to receive, should be cloned from dirty
err - Python Obit Error/message stack
"""
def __init__(self, name, dirty, beam, clean, err) :
self.this = Obit.new_CleanOTF(name, dirty, beam, clean, err)
#self.me = Obit.new_CleanOTF(name, dirty, beam, clean, err)
self.thisown = 1
def __del__(self):
if Obit!=None:
Obit.delete_CleanOTF(self.this)
def DefWindow(self, err):
""" Set default window (all image)
self = Python OTF object
err = Python Obit Error/message stack
"""
PDefWindow(self, err)
# end DefWindow
def AddWindow(self, window, err):
""" Add a window
self = Python OTF object
window = set of 4 integers:
if window[0]<0 box is round and
window[1]=radius, [2,3] = center
else rectangular and
blc=(window[0],window[1]), trc= blc=(window[2],window[3])
err = Python Obit Error/message stack
"""
PAddWindow(self, window, err)
# end AddWindow
def PCreate (name, dirty, beam, clean, err):
""" Create CleanOTF Object
returns CleanOTF object
name = Name for clean
dirty = Python Obit dirty image
beam = Python Obit dirty beam
Must have same cell spacing are dirty but need not be same size
if None, use Gaussian the size of beamMaj in dirty
clean = Python Obit CLEAN image
Should be defined but need not be instantiated.
err = Python Obit Error/message stack
"""
################################################################
# Checks
if not Image.PIsA(dirty):
raise TypeError,"dirty MUST be a Python Obit Image"
if not Image.PIsA(clean):
raise TypeError,"clean MUST be a Python Obit Image"
if not OErr.OErrIsA(err):
raise TypeError,"err MUST be a Python ObitErr"
if err.isErr: # existing error?
return None
#
if beam==None:
lbeam = Image.Image("NoBeam")
else:
if not Image.PIsA(beam):
raise TypeError,"beam MUST be a Python Obit Image"
lbeam = beam
out = CleanOTF(name, dirty.me, lbeam.me, clean.me, err.me)
if beam:
dirty.Beam = beam
return out
# end PCreate
def PGetWindow (inCleanOTF):
""" Return the member OWindow
returns OWindow
inCleanOTF = Python CleanOTF object
"""
################################################################
# Checks
if not PIsA(inCleanOTF):
raise TypeError,"inCleanOTF MUST be a Python Obit CleanOTF"
#
out = OWindow.OWindow()
out.me = Obit.CleanOTFGetWindow(inCleanOTF.me)
return out
# end PGetWindow
def PSetWindow (inCleanOTF, window):
""" Replace OWindow in the CleanOTF
inCleanOTF = Python CleanOTF object
window = Python OWindow to attach
"""
################################################################
# Checks
if not PIsA(inCleanOTF):
raise TypeError,"inCleanOTF MUST be a Python ObitCleanOTF"
if not OWindow.PIsA(window):
raise TypeError,"array MUST be a Python Obit OWindow"
#
Obit.CleanOTFSetWindow(inCleanOTF.me, window.me)
# end PSetWindow
def PDefWindow (clean, err):
""" Set default windows on image mosaic member.
If mosaic member Radius>0 then make round boxes on Fly's eye field
with this radius, else use rectangular box including all but outer 5 pixels
On outlier fields, use rectangular box of width OutlierSize.
Assumes all images in mosaic have descriptors defined.
clean = Clean object containing mosaic
err = Python Obit Error/message stack
"""
################################################################
# Checks
if not PIsA(clean):
raise TypeError,"mosaic MUST be a Python Obit CleanOTF"
if not OErr.OErrIsA(err):
raise TypeError,"err MUST be an OErr"
if err.isErr: # existing error?
return
#
Obit.CleanOTFDefWindow(clean.me, err.me)
# end PDefWindow
def PAddWindow (inCleanOTF, window, err):
""" Add a window to be CLEANed
inCleanOTF = Python CleanOTF object
window = set of 4 integers:
if window[0]<0 box is round and
window[1]=radius, [2,3] = center
else rectangular and
blc=(window[0],window[1]), trc= blc=(window[2],window[3])
err = Python Obit Error/message stack
"""
################################################################
# Checks
if not PIsA(inCleanOTF):
raise TypeError,"inCleanOTF MUST be a Python ObitCleanOTF"
if err.isErr: # existing error?
return
#
Obit.CleanOTFAddWindow(inCleanOTF.me, window, err.me)
# end PAddWindow
# Perform Clean
CleanInput={'structure':['Clean',[('CleanOTF','CleanOTF Object'),
('disp','Image display to edit window'),
('Niter','Maximum number of CLEAN iterations'),
('Patch','Beam patch in pixels [def 100]'),
('BeamSize','Restoring beam FWHM (deg)'),
('Gain','CLEAN loop gain'),
('minFlux','Minimun flux density (Jy)'),
('noResid','If True do not include residuals in restored image'),
('doRestore','If True restore components'),
('doScale','If True scale residuals in restored image by beam areas'),
('Factor','CLEAN depth factor'),
('Plane','Plane being processed, 1-rel indices of axes 3-?'),
('autoWindow','Automatically set Windows?'),
('CCVer','CC table version number [0 => highest]'),
('scale','if True, scale CCs to units of restored CLEAN image')]],
# defaults
'CleanOTF':None,
'disp':None,
'Niter':100,
'Patch':100,
'BeamSize':0.0,
'Gain':0.1,
'minFlux':0.0,
'noResid':False,
'doRestore':True,
'doScale':True,
'Factor':0.0,
'Plane':[1,1,1,1,1],
'autoWindow':False,
'CCVer':0,
'scale':True}
def PClean (err, input=CleanInput):
""" Performs image based CLEAN
The peak in the image is iteratively found and then the beam
times a fraction of the peak is subtracted and the process is iterated.
err = Python Obit Error/message stack
input = input parameter dictionary
Input dictionary entries:
CleanOTF = Input CleanOTF,
disp = Image display to edit window
Niter = Maximum number of CLEAN iterations
Patch = Beam patch in pixels [def 100]
maxPixel = Maximum number of residuals [def 20000]
BeamSize = Restoring beam (deg)
Gain = CLEAN loop gain
minFlux = Minimun flux density (Jy)
noResid = If True do not include residuals in restored image
doRestore = If True restore components
doScale = If True scale residuals in restored image by beam areas
Factor = CLEAN depth factor
Plane = Plane being processed, 1-rel indices of axes 3-?
autoWindow = True if autoWindow feature wanted.
CCVer = CC table version number
scale = If True, scale CCs to units of restored CLEAN image
"""
################################################################
# Get input parameters
inCleanOTF = input["CleanOTF"]
# Checks
if not PIsA(inCleanOTF):
print "Really is",inCleanOTF.__class__
raise TypeError,"inCleanOTF MUST be a Python Obit CleanOTF"
if err.isErr: # existing error?
return
#
dim = [1,1,1,1,1]
#
# Set control values on CleanOTF
dim[0] = 1;
inInfo = PGetList(inCleanOTF) #
InfoList.PAlwaysPutInt (inInfo, "Niter", dim, [input["Niter"]])
InfoList.PAlwaysPutInt (inInfo, "Patch", dim, [input["Patch"]])
InfoList.PAlwaysPutInt (inInfo, "CCVer", dim, [input["CCVer"]])
InfoList.PAlwaysPutFloat (inInfo, "BeamSize", dim, [input["BeamSize"]])
InfoList.PAlwaysPutFloat (inInfo, "Gain", dim, [input["Gain"]])
InfoList.PAlwaysPutFloat (inInfo, "minFlux", dim, [input["minFlux"]])
InfoList.PAlwaysPutFloat (inInfo, "Factor", dim, [input["Factor"]])
InfoList.PAlwaysPutBoolean (inInfo, "noResid", dim, [input["noResid"]])
InfoList.PAlwaysPutBoolean (inInfo, "doRestore", dim, [input["doRestore"]])
InfoList.PAlwaysPutBoolean (inInfo, "doScale", dim, [input["doScale"]])
InfoList.PAlwaysPutBoolean (inInfo, "doScaleCC", dim, [input["scale"]])
InfoList.PAlwaysPutBoolean (inInfo, "autoWindow", dim, [input["autoWindow"]])
dim[0] = len(input["Plane"])
InfoList.PAlwaysPutInt (inInfo, "Plane", dim, input["Plane"])
#
# show any errors
OErr.printErrMsg(err, "Clean: Error setting parameters")
# Edit CLEAN window?
disp = input["disp"]
if disp!=None:
window = PGetWindow(inCleanOTF)
print "Display Dirty image for editing"
ODisplay.PImage(disp, inCleanOTF.Dirty, err, window=window)
OErr.printErrMsg(err, "Error editing CLEAN boxes")
#
# if Beam Given set on dirty image
if inCleanOTF.Beam:
dirty = inCleanOTF.Dirty
dirty.Beam = inCleanOTF.Beam
# Do operation
Obit.CleanOTFClean(inCleanOTF.me, err.me)
# end PClean
def PRestore (inCln, err):
""" Restores components
This is done automatically unless the restoring beam size is negative
inCln = Python Obit input OTFClean
err = Python Obit Error/message stack
"""
################################################################
# Checks
if not PIsA(inCln):
raise TypeError,"inCln MUST be a Python Obit OTFClean"
if not OErr.OErrIsA(err):
raise TypeError,"err MUST be a Python ObitErr"
if err.isErr: # existing error?
return
#
Obit.CleanOTFRestore (inCln.me, err.me)
# end PRestore
def PGetDirty (inCln):
""" Get Dirty image
returns Dirty image as Python Obit Image
inCln = Python Obit input OTFClean
"""
################################################################
# Checks
if not PIsA(inCln):
raise TypeError,"inCln MUST be a Python Obit OTFClean"
#
out = Image.Image("None")
out.me = Obit.CleanOTFGetDirty (inCln.me)
return out
# end PGetDirty
def PSetDirty (inCln, image):
""" Set Dirty image
inCln = Python Obit input OTFClean
image = Python Obit Image for dirty image
"""
################################################################
# Checks
if not PIsA(inCln):
raise TypeError,"inCln MUST be a Python Obit OTFClean"
if not Image.PIsA(image):
raise TypeError,"Image MUST be a Python Obit Image"
#
Obit.CleanOTFSetDirty (inCln.me, image.me)
# end PSetDirty
def PGetBeam (inCln):
""" Get Beam image
returns Beam image as Python Obit Image
inCln = Python Obit input OTFClean
"""
################################################################
# Checks
if not PIsA(inCln):
raise TypeError,"inCln MUST be a Python Obit OTFClean"
#
out = Image.Image("None")
out.me = Obit.CleanOTFGetBeam (inCln.me)
return out
# end PGetBeam
def PSetBeam (inCln, image):
""" Set Beam image
inCln = Python Obit input OTFClean
image = Python Obit Image for dirty image
"""
################################################################
# Checks
if not PIsA(inCln):
raise TypeError,"inCln MUST be a Python Obit OTFClean"
if not Image.PIsA(image):
raise TypeError,"Image MUST be a Python Obit Image"
#
Obit.CleanOTFSetBeam (inCln.me, image.me)
# end PSetDirty
def PGetClean (inCln):
""" Get Clean image
returns Clean image as Python Obit Image
inCln = Python Obit input OTFClean
"""
################################################################
# Checks
if not PIsA(inCln):
raise TypeError,"inCln MUST be a Python Obit OTFClean"
#
out = Image.Image("None")
out.me = Obit.CleanOTFGetClean (inCln.me)
return out
# end PGetClean
def PSetClean (inCln, image):
""" Set Clean image
inCln = Python Obit input OTFClean
image = Python Obit Image for clean image
"""
################################################################
# Checks
if not PIsA(inCln):
raise TypeError,"inCln MUST be a Python Obit OTFClean"
if not Image.PIsA(image):
raise TypeError,"Image MUST be a Python Obit Image"
#
Obit.CleanOTFSetClean (inCln.me, image.me)
# end PSetClean
def PGetList (inCln):
""" Get InfoList
return InfoList
inCln = Python Obit input OTFClean
"""
################################################################
# Checks
if not PIsA(inCln):
raise TypeError,"inCln MUST be a Python Obit OTFClean"
#
out = InfoList.InfoList()
out.me = Obit.InfoListUnref(out.me)
out.me = Obit.CleanOTFGetList (inCln.me)
return out
# end PGetList
def PGetNiter (inCln):
""" Get maximum number of CLEAN iterations
This is only set after the CLEAN has run.
returns maximum number of CLEAN iterations (int)
inCln = Python Obit input OTFClean
"""
################################################################
# Checks
if not PIsA(inCln):
raise TypeError,"inCln MUST be a Python Obit OTFClean"
#
return Obit.CleanOTFGetNiter (inCln.me)
# end PGetNiter
def PGetGain (inCln):
""" Get CLEAN loop gain
This is only set after the CLEAN has run.
returns CLEAN loop gain (float)
inCln = Python Obit input OTFClean
"""
################################################################
# Checks
if not PIsA(inCln):
raise TypeError,"inCln MUST be a Python Obit OTFClean"
#
return Obit.CleanOTFGetGain (inCln.me)
# end PGetGain
def PGetFlux (inCln):
""" Get min abs flux density for CLEAN
This is only set after the CLEAN has run.
returns min abs flux density for CLEAN (float)
inCln = Python Obit input OTFClean
"""
################################################################
# Checks
if not PIsA(inCln):
raise TypeError,"inCln MUST be a Python Obit OTFClean"
#
return Obit.CleanOTFGetFlux (inCln.me)
# end PGetFlux
def PGetCleanSize (inCln):
""" Get CLEAN restoring beam size
This is only set after the CLEAN has run.
returns CLEAN restoring beam size in pixels (float)
inCln = Python Obit input OTFClean
"""
################################################################
# Checks
if not PIsA(inCln):
raise TypeError,"inCln MUST be a Python Obit OTFClean"
#
return Obit.CleanOTFGetCleanSize (inCln.me)
# end PGetCleanSize
def PIsA (inCln):
""" Tells if object thinks it's a Python ObitOTFClean
return true, false (1,0)
inCln = Python Obit input OTFClean
"""
################################################################
# Checks
if inCln.__class__ != CleanOTF:
return 0
return Obit.CleanOTFIsA(inCln.me)
# end PIsA
| 34.630551 | 104 | 0.569113 |
949308565bd51f9ab949ff3b4ad199287a216473
| 121 |
py
|
Python
|
Code/Fig3d/RunDesignC.py
|
DMREF-Hydrogel-actuated-soft-robotics/sponges
|
a27c45dbed59efb5f79318beee1b72bba7adbd91
|
[
"MIT"
] | 4 |
2020-11-09T07:20:36.000Z
|
2022-01-11T13:19:03.000Z
|
Code/Fig3d/RunDesignC.py
|
guojiyu/sponge-lattice
|
acd046dbe69b1eb3882016d533006926219a2393
|
[
"MIT"
] | null | null | null |
Code/Fig3d/RunDesignC.py
|
guojiyu/sponge-lattice
|
acd046dbe69b1eb3882016d533006926219a2393
|
[
"MIT"
] | 2 |
2021-01-14T12:41:40.000Z
|
2021-01-14T13:21:17.000Z
|
import numpy as np
DesignB=False
DesignA=False
DesignC=True
allAngles=np.linspace(0,45,46)
execfile('Analysis.py')
| 9.307692 | 30 | 0.760331 |
9c80c3f62d56850e1374c9eaf22a7cb30576892c
| 11,260 |
py
|
Python
|
betdaq/resources/marketdataresources.py
|
ScoreX/betdaq
|
408ccb6f2e3ebf9acc4dc585d6b9092d6ae615f0
|
[
"MIT"
] | 13 |
2017-07-17T22:57:09.000Z
|
2022-03-25T05:19:04.000Z
|
betdaq/resources/marketdataresources.py
|
liampauling/betdaq
|
4b09c4b979665e1841de42ee3c4fe154ba3e3966
|
[
"MIT"
] | 10 |
2017-07-25T12:01:30.000Z
|
2021-08-03T15:17:03.000Z
|
betdaq/resources/marketdataresources.py
|
liampauling/betdaq
|
4b09c4b979665e1841de42ee3c4fe154ba3e3966
|
[
"MIT"
] | 11 |
2017-09-04T18:30:48.000Z
|
2021-07-26T09:04:50.000Z
|
import bisect
from betdaq.utils import make_tz_naive, price_side_map, floatify
from betdaq.enums import MarketType, MarketStatus, SelectionStatus, Polarity
def parse_deep_markets(sports):
markets = []
for sport in sports:
events = sport.get('EventClassifiers')
sub_events = []
while events:
for event in events:
markets += parse_market(
event.get('Markets', []), {'event_name': event.get('Name'),
'tournament_id': event.get('tournament_id'),
'tournament_name': event.get('tournament_name'),
'competition_id': event.get('competition_id'),
'competition_name': event.get('competition_name'),
'sport_id': sport.get('Id'),
'sport_name': sport.get('Name'),
}
)
if event.get('EventClassifiers', []):
sub_events += [{**ev, **{'competition_name': event.get('Name')
if event.get('tournament_name') and event.get('competition_name') is None
else event.get('competition_name') if event.get('competition_name')
else None,
'competition_id': event.get('Id')
if event.get('tournament_name') and event.get('competition_name') is None
else event.get('competition_id') if event.get('competition_name')
else None,
'tournament_name': event.get('tournament_name', event.get('Name')),
'tournament_id': event.get('tournament_id', event.get('Id')),
}
} for ev in event.get('EventClassifiers', [])]
events = sub_events.copy()
sub_events = []
return markets
def parse_runners(data):
return {'runner_id': data.get('Id'),
'runner_name': data.get('Name'),
'runner_status': SelectionStatus(int(data.get('Status'))).name if data.get('Status') else None,
'reset_count': data.get('ResetCount'),
'deduction_factor': floatify(data.get('DeductionFactor')),
'runner_display_order': data.get('DisplayOrder')}
def parse_market(mkt_data, other_info):
return [{**{'runners': [parse_runners(runner) for runner in mkt.get('Selections', [])],
'market_id': mkt.get('Id'),
'market_name': mkt.get('Name'),
'market_type': MarketType(int(mkt.get('Type'))).name if mkt.get('Type') else None,
'is_play_market': mkt.get('IsPlayMarket'),
'market_status': MarketStatus(int(mkt.get('Status'))).name if mkt.get('Status') else None,
'number_of_winners': mkt.get('NumberOfWinningSelections'),
'market_start_time': make_tz_naive(mkt.get('StartTime')),
'withdrawal_sequence_number': mkt.get('WithdrawalSequenceNumber'),
'market_display_order': mkt.get('DisplayOrder'),
'enabled_for_multiples': mkt.get('IsEnabledForMultiples'),
'in_play_available': mkt.get('IsInRunningAllowed'),
'race_grade': mkt.get('RaceGrade'),
'managed_in_running': mkt.get('IsManagedWhenInRunning'),
'in_play': mkt.get('IsCurrentlyInRunning'),
'in_play_delay': mkt.get('InRunningDelaySeconds'),
'event_id': mkt.get('EventClassifierId'),
'place_payout': floatify(mkt.get('PlacePayout'))},
**other_info} for mkt in mkt_data]
def parse_sub_event(data, prefix='comp', parent='sport'):
return {
'%s_id' % prefix: data.get('Id'),
'%s_name' % prefix: data.get('Name'),
'%s_display_order' % prefix: data.get('DisplayOrder'),
'%s_id' % parent: data.get('ParentId'),
'%s_multi_allowed' % prefix: data.get('IsEnabledForMultiples'),
}
def parse_market_prices(mkt):
return {'market_id': mkt.get('Id'),
'market_name': mkt.get('Name'),
'market_type': MarketType(int(mkt.get('Type'))).name if mkt.get('Type') else None,
'market_start_time': make_tz_naive(mkt.get('StartTime')),
'runners': [parse_runner_prices(runner) for runner in mkt.get('Selections', [])],
'is_play_market': mkt.get('IsPlayMarket'),
'status': MarketStatus(int(mkt.get('Status'))) if mkt.get('Status') else None,
'number_of_winners': floatify(mkt.get('NumberOfWinningSelections')),
'withdrawal_sequence_number': mkt.get('WithdrawalSequenceNumber'),
'market_display_order': mkt.get('DisplayOrder'),
'enabled_for_multiples': mkt.get('IsEnabledForMultiples'),
'in_play_available': mkt.get('IsInRunningAllowed'),
'race_grade': mkt.get('RaceGrade'),
'managed_in_running': mkt.get('IsManagedWhenInRunning'),
'in_play': mkt.get('IsCurrentlyInRunning'),
'in_running_delay': mkt.get('InRunningDelaySeconds'),
'event_id': mkt.get('EventClassifierId'),
'market_total_matched': floatify(mkt.get('TotalMatchedAmount')),
'place_payout': floatify(mkt.get('PlacePayout')),
'market_back_matched': floatify(mkt.get('MatchedMarketForStake')),
'market_lay_matched': floatify(mkt.get('MatchedMarketAgainstStake')),
'home_team_score': mkt.get('HomeTeamScore'),
'away_team_score': mkt.get('AwayTeamScore'),
'score_type': mkt.get('ScoreType'),
}
def parse_runner_prices(runner):
return {
'runner_book': parse_runner_book(runner.get('_value_1', [])),
'runner_id': runner.get('Id'),
'runner_name': runner.get('Name'),
'runner_status': SelectionStatus(int(runner.get('Status'))).name if runner.get('Status') else None,
'runner_reset_count': floatify(runner.get('ResetCount')),
'deduction_factor': floatify(runner.get('DeductionFactor')),
'runner_back_matched_size': floatify(runner.get('MatchedSelectionForStake')),
'runner_lay_matched_size': floatify(runner.get('MatchedSelectionAgainstStake')),
'runner_last_matched_time': make_tz_naive(runner.get('LastMatchedOccurredAt')),
'runner_last_matched_price': floatify(runner.get('LastMatchedPrice')),
'runner_last_matched_back_size': floatify(runner.get('LastMatchedForSideAmount')),
'runner_last_matched_lay_size': floatify(runner.get('LastMatchedAgainstSideAmount')),
'runner_open_interest': floatify(runner.get('SelectionOpenInterest')),
'runner_market_winnings': floatify(runner.get('MarketWinnings')),
'runner_positive_winnings': floatify(runner.get('MarketPositiveWinnings')),
'runner_back_matched_same_price': floatify(runner.get('MatchedForSideAmountAtSamePrice')),
'runner_lay_matched_same_price': floatify(runner.get('MatchedAgainstSideAmountAtSamePrice')),
'runner_last_traded_same_price': make_tz_naive(runner.get('FirstMatchAtSamePriceOccurredAt')),
'runner_total_matched_orders': floatify(runner.get('NumberOrders')),
'runner_total_matched_punters': floatify(runner.get('NumberPunters')),
}
def parse_runner_book(book):
back_levels = []
lay_levels = []
order_book = {'batb': [], 'batl': []}
for level in book:
for side, order in level.items():
if order:
side = price_side_map.get(side)
if side == 'back':
bisect.insort(back_levels, floatify(order.get('Price')))
order_book['batb'].append([floatify(order.get('Price')), floatify(order.get('Stake'))])
elif side == 'lay':
bisect.insort_right(lay_levels, floatify(order.get('Price')))
order_book['batl'].append([floatify(order.get('Price')), floatify(order.get('Stake'))])
back_levels.reverse()
order_book['batb'] = [[back_levels.index(x[0]), x[0], x[1]] for x in order_book['batb']]
order_book['batl'] = [[lay_levels.index(x[0]), x[0], x[1]] for x in order_book['batl']]
return order_book
def parse_selection_changes(chg):
return {
'runner_id': chg.get('Id'),
'runner_name': chg.get('Name'),
'runner_display_order': chg.get('DisplayOrder'),
'runner_hidden': chg.get('IsHidden'),
'runner_status': SelectionStatus(int(chg.get('Status'))) if chg.get('Status') else None,
'reset_count': chg.get('ResetCount'),
'market_id': chg.get('MarketId'),
'withdrawal_factor': chg.get('WithdrawalFactor'),
'sequence_number': chg.get('SelectionSequenceNumber'),
'cancel_orders_time': make_tz_naive(chg.get('CancelOrdersTime')),
'settlement_info': [{'settled_time': make_tz_naive(stl.get('SettlementInformation', {}).get('SettledTime')),
'void_percentage': stl.get('SettlementInformation', {}).get('VoidPercentage'),
'result': stl.get('SettlementInformation', {}).get('SettlementResultString'),
'left_side_percentage': stl.get('SettlementInformation', {}).get('LeftSideFactor'),
'right_side_percentage': stl.get('SettlementInformation', {}).get('RightSideFactor')
} for stl in chg.get('_value_1', [])]
}
def parse_market_withdrawal(data):
return {
'runner_id': data.get('SelectionId'),
'withdrawal_time': make_tz_naive(data.get('WithdrawalTime')),
'sequence_number': data.get('SequenceNumber'),
'reduction_factor': floatify(data.get('ReductionFactor')),
'compound_reduction_factor': floatify(data.get('CompoundReductionFactor')),
}
def parse_ladder(data):
return [
{'price': floatify(ol.get('price')),
'value': ol.get('representation')} for ol in data.get('Ladder', {})]
def parse_sports(sport):
return {'display_order': sport.get('DisplayOrder'),
'sport_id': sport.get('Id'),
'sport_name': sport.get('Name')}
def parse_trade_item(trade):
trd = trade.get('TradeItems', {})
return {
'traded_time': make_tz_naive(trd.get('occurredAt')),
'price': floatify(trd.get('price')),
'size': floatify(trd.get('backersStake')),
'side': Polarity(int(trd.get('tradeType'))).name if trd.get('tradeType') else None,
}
def parse_selection_trades(trades):
return {
'runner_id': trades.get('selectionId'),
'max_trade_id': trades.get('maxTradeId'),
'max_trade_id_returned': trades.get('maxTradeIdReturned'),
'trades': [parse_trade_item(t) for t in trades.get('_value_1', [])],
}
| 51.889401 | 118 | 0.585524 |
bfe18c5e041fa7a5c0199b471c04d7d57323655a
| 64 |
py
|
Python
|
osm/__init__.py
|
dcopm999/django-osm
|
7a5d87a4e64292b7726d4401f529020ead732c77
|
[
"MIT"
] | 7 |
2021-08-19T07:18:21.000Z
|
2021-09-13T19:39:18.000Z
|
osm/__init__.py
|
dcopm999/django-osm
|
7a5d87a4e64292b7726d4401f529020ead732c77
|
[
"MIT"
] | 5 |
2021-04-22T06:42:42.000Z
|
2021-09-11T15:21:22.000Z
|
osm/__init__.py
|
dcopm999/django-osm
|
7a5d87a4e64292b7726d4401f529020ead732c77
|
[
"MIT"
] | null | null | null |
__version__ = "0.1.0"
default_app_config = "osm.apps.OsmConfig"
| 21.333333 | 41 | 0.75 |
016e92b2ae7af156c6a1f0e7172f4512ff9944ef
| 3,333 |
py
|
Python
|
metashare/repository/editor/forms.py
|
zeehio/META-SHARE
|
b796769629734353a63d98db72c84617f725e544
|
[
"BSD-3-Clause"
] | 11 |
2015-07-13T13:36:44.000Z
|
2021-11-15T08:07:25.000Z
|
metashare/repository/editor/forms.py
|
zeehio/META-SHARE
|
b796769629734353a63d98db72c84617f725e544
|
[
"BSD-3-Clause"
] | 13 |
2015-03-21T14:08:31.000Z
|
2021-05-18T18:47:58.000Z
|
metashare/repository/editor/forms.py
|
zeehio/META-SHARE
|
b796769629734353a63d98db72c84617f725e544
|
[
"BSD-3-Clause"
] | 12 |
2015-01-07T02:16:50.000Z
|
2021-05-18T08:25:31.000Z
|
import logging
from xml.etree.ElementTree import fromstring
from django import forms
from django.core.exceptions import ValidationError
from metashare.storage.models import ALLOWED_ARCHIVE_EXTENSIONS
from metashare.settings import LOG_HANDLER, MAXIMUM_UPLOAD_SIZE
from zipfile import is_zipfile
# Setup logging support.
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(LOG_HANDLER)
def _validate_resource_data(value):
"""
Validates that the uploaded resource data is valid.
"""
if value.size > MAXIMUM_UPLOAD_SIZE:
raise ValidationError('The maximum upload file size is {:.3} ' \
'MB!'.format(float(MAXIMUM_UPLOAD_SIZE)/(1024*1024)))
_valid_extension = False
for _allowed_extension in ALLOWED_ARCHIVE_EXTENSIONS:
if value.name.lower().endswith(_allowed_extension):
_valid_extension = True
break
if not _valid_extension:
raise ValidationError('Invalid upload file type. Valid file types ' \
'are: {}'.format(ALLOWED_ARCHIVE_EXTENSIONS))
return value
def _validate_resource_description(value):
"""
Validates that the uploaded resource description is valid.
"""
filename = value.name.lower()
if filename.endswith('.xml'):
_xml_raw = ''
for _chunk in value.chunks():
_xml_raw += _chunk
try:
_xml_tree = fromstring(_xml_raw)
except Exception, _msg:
raise ValidationError(_msg)
elif filename.endswith('.zip'):
valid = False
try:
valid = is_zipfile(value)
value.seek(0)
except:
valid = False
if not valid:
raise ValidationError('File is not a zip file.')
else:
raise ValidationError('Invalid upload file type. XML or ZIP file required.')
# For the moment, we simply pass through the received value. Later, we
# could run an XML validation script here or perform other checks...
return value
class StorageObjectUploadForm(forms.Form):
"""
Form to upload resource data into a StorageObject instance.
"""
resource = forms.FileField(label="Resource",
help_text="You can upload resource data (<{:.3} MB) using this " \
"widget. Note that this will overwrite the current data!".format(
float(MAXIMUM_UPLOAD_SIZE/(1024*1024))),
validators=[_validate_resource_data])
uploadTerms = forms.BooleanField(label="Upload Terms",
help_text="By clicking this checkbox, you confirm that you have " \
"cleared permissions for the file you intend to upload.")
class ResourceDescriptionUploadForm(forms.Form):
"""
Form to upload a resource description into the Django database.
"""
description = forms.FileField(label="Resource Description(s)",
help_text="You can upload a new resource description in XML format, " \
"or many resource descriptions in a ZIP file containing XML files. " \
"Please make sure the XML files are Schema-valid before proceeding.",
validators=[_validate_resource_description])
uploadTerms = forms.BooleanField(label="Upload Terms",
help_text="By clicking this checkbox, you confirm that you have " \
"cleared permissions for the description(s) you intend to upload.")
| 33.666667 | 84 | 0.680768 |
09dcce2dab4bedc4c1b8ba5e41b6059e598c792a
| 549 |
py
|
Python
|
sample/Test_Sample3.py
|
BlueYangDroid/test-python-appium-android
|
1a5fac08ddc8c8738092ae7ee341c29bdfd63da2
|
[
"MIT"
] | 1 |
2018-08-30T06:59:00.000Z
|
2018-08-30T06:59:00.000Z
|
sample/Test_Sample3.py
|
BlueYangDroid/test-python-appium-android
|
1a5fac08ddc8c8738092ae7ee341c29bdfd63da2
|
[
"MIT"
] | null | null | null |
sample/Test_Sample3.py
|
BlueYangDroid/test-python-appium-android
|
1a5fac08ddc8c8738092ae7ee341c29bdfd63da2
|
[
"MIT"
] | 1 |
2020-01-08T14:10:43.000Z
|
2020-01-08T14:10:43.000Z
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
import pytest
from sample.Test_Sample0 import TestSample0
class TestSample3(TestSample0):
# def test_answer1(self, fixtrue_env):
# print('test_answer2.1: get fixtrue_env %s' % fixtrue_env)
# assert fixtrue_env == 10
#
# def test_answer_2(self, fixtrue_env):
# print('test_answer2.2: get fixtrue_env %s' % fixtrue_env)
# assert fixtrue_env == 10
def test_answer_3(self, fixtrue_env):
print('test_answer3: --')
assert fixtrue_env == 10
| 24.954545 | 67 | 0.657559 |
4503488d95a1128d215e90ff2279a862e5423434
| 17,706 |
py
|
Python
|
cinder/volume/drivers/netapp/api.py
|
cloudbau/cinder
|
3179f2f42ae940a08b910e326a809556689864d8
|
[
"Apache-2.0"
] | null | null | null |
cinder/volume/drivers/netapp/api.py
|
cloudbau/cinder
|
3179f2f42ae940a08b910e326a809556689864d8
|
[
"Apache-2.0"
] | null | null | null |
cinder/volume/drivers/netapp/api.py
|
cloudbau/cinder
|
3179f2f42ae940a08b910e326a809556689864d8
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 NetApp, Inc.
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
NetApp api for ONTAP and OnCommand DFM.
Contains classes required to issue api calls to ONTAP and OnCommand DFM.
"""
from lxml import etree
import urllib2
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class NaServer(object):
"""Encapsulates server connection logic."""
TRANSPORT_TYPE_HTTP = 'http'
TRANSPORT_TYPE_HTTPS = 'https'
SERVER_TYPE_FILER = 'filer'
SERVER_TYPE_DFM = 'dfm'
URL_FILER = 'servlets/netapp.servlets.admin.XMLrequest_filer'
URL_DFM = 'apis/XMLrequest'
NETAPP_NS = 'http://www.netapp.com/filer/admin'
STYLE_LOGIN_PASSWORD = 'basic_auth'
STYLE_CERTIFICATE = 'certificate_auth'
def __init__(self, host, server_type=SERVER_TYPE_FILER,
transport_type=TRANSPORT_TYPE_HTTP,
style=STYLE_LOGIN_PASSWORD, username=None,
password=None):
self._host = host
self.set_server_type(server_type)
self.set_transport_type(transport_type)
self.set_style(style)
self._username = username
self._password = password
self._refresh_conn = True
def get_transport_type(self):
"""Get the transport type protocol."""
return self._protocol
def set_transport_type(self, transport_type):
"""Set the transport type protocol for api.
Supports http and https transport types.
"""
if transport_type.lower() not in (
NaServer.TRANSPORT_TYPE_HTTP,
NaServer.TRANSPORT_TYPE_HTTPS):
raise ValueError('Unsupported transport type')
self._protocol = transport_type.lower()
if self._protocol == NaServer.TRANSPORT_TYPE_HTTP:
if self._server_type == NaServer.SERVER_TYPE_FILER:
self.set_port(80)
else:
self.set_port(8088)
else:
if self._server_type == NaServer.SERVER_TYPE_FILER:
self.set_port(443)
else:
self.set_port(8488)
self._refresh_conn = True
def get_style(self):
"""Get the authorization style for communicating with the server."""
return self._auth_style
def set_style(self, style):
"""Set the authorization style for communicating with the server.
Supports basic_auth for now. Certificate_auth mode to be done.
"""
if style.lower() not in (NaServer.STYLE_LOGIN_PASSWORD,
NaServer.STYLE_CERTIFICATE):
raise ValueError('Unsupported authentication style')
self._auth_style = style.lower()
def get_server_type(self):
"""Get the target server type."""
return self._server_type
def set_server_type(self, server_type):
"""Set the target server type.
Supports filer and dfm server types.
"""
if server_type.lower() not in (NaServer.SERVER_TYPE_FILER,
NaServer.SERVER_TYPE_DFM):
raise ValueError('Unsupported server type')
self._server_type = server_type.lower()
if self._server_type == NaServer.SERVER_TYPE_FILER:
self._url = NaServer.URL_FILER
else:
self._url = NaServer.URL_DFM
self._ns = NaServer.NETAPP_NS
self._refresh_conn = True
def set_api_version(self, major, minor):
"""Set the api version."""
try:
self._api_major_version = int(major)
self._api_minor_version = int(minor)
self._api_version = str(major) + "." + str(minor)
except ValueError:
raise ValueError('Major and minor versions must be integers')
self._refresh_conn = True
def get_api_version(self):
"""Gets the api version tuple."""
if hasattr(self, '_api_version'):
return (self._api_major_version, self._api_minor_version)
return None
def set_port(self, port):
"""Set the server communication port."""
try:
int(port)
except ValueError:
raise ValueError('Port must be integer')
self._port = str(port)
self._refresh_conn = True
def get_port(self):
"""Get the server communication port."""
return self._port
def set_timeout(self, seconds):
"""Sets the timeout in seconds."""
try:
self._timeout = int(seconds)
except ValueError:
raise ValueError('timeout in seconds must be integer')
def get_timeout(self):
"""Gets the timeout in seconds if set."""
if hasattr(self, '_timeout'):
return self._timeout
return None
def get_vfiler(self):
"""Get the vfiler to use in tunneling."""
return self._vfiler
def set_vfiler(self, vfiler):
"""Set the vfiler to use if tunneling gets enabled."""
self._vfiler = vfiler
def get_vserver(self):
"""Get the vserver to use in tunneling."""
return self._vserver
def set_vserver(self, vserver):
"""Set the vserver to use if tunneling gets enabled."""
self._vserver = vserver
def set_username(self, username):
"""Set the user name for authentication."""
self._username = username
self._refresh_conn = True
def set_password(self, password):
"""Set the password for authentication."""
self._password = password
self._refresh_conn = True
def invoke_elem(self, na_element, enable_tunneling=False):
"""Invoke the api on the server."""
if na_element and not isinstance(na_element, NaElement):
ValueError('NaElement must be supplied to invoke api')
request = self._create_request(na_element, enable_tunneling)
if not hasattr(self, '_opener') or not self._opener \
or self._refresh_conn:
self._build_opener()
try:
if hasattr(self, '_timeout'):
response = self._opener.open(request, timeout=self._timeout)
else:
response = self._opener.open(request)
except urllib2.HTTPError as e:
raise NaApiError(e.code, e.msg)
except Exception as e:
raise NaApiError('Unexpected error', e)
xml = response.read()
return self._get_result(xml)
def invoke_successfully(self, na_element, enable_tunneling=False):
"""Invokes api and checks execution status as success.
Need to set enable_tunneling to True explicitly to achieve it.
This helps to use same connection instance to enable or disable
tunneling. The vserver or vfiler should be set before this call
otherwise tunneling remains disabled.
"""
result = self.invoke_elem(na_element, enable_tunneling)
if result.has_attr('status') and result.get_attr('status') == 'passed':
return result
code = result.get_attr('errno')\
or result.get_child_content('errorno')\
or 'ESTATUSFAILED'
msg = result.get_attr('reason')\
or result.get_child_content('reason')\
or 'Execution status is failed due to unknown reason'
raise NaApiError(code, msg)
def _create_request(self, na_element, enable_tunneling=False):
"""Creates request in the desired format."""
netapp_elem = NaElement('netapp')
netapp_elem.add_attr('xmlns', self._ns)
if hasattr(self, '_api_version'):
netapp_elem.add_attr('version', self._api_version)
if enable_tunneling:
self._enable_tunnel_request(netapp_elem)
netapp_elem.add_child_elem(na_element)
request_d = netapp_elem.to_string()
request = urllib2.Request(
self._get_url(), data=request_d,
headers={'Content-Type': 'text/xml', 'charset': 'utf-8'})
return request
def _enable_tunnel_request(self, netapp_elem):
"""Enables vserver or vfiler tunneling."""
if hasattr(self, '_vfiler') and self._vfiler:
if hasattr(self, '_api_major_version') and \
hasattr(self, '_api_minor_version') and \
self._api_major_version >= 1 and \
self._api_minor_version >= 7:
netapp_elem.add_attr('vfiler', self._vfiler)
else:
raise ValueError('ontapi version has to be atleast 1.7'
' to send request to vfiler')
if hasattr(self, '_vserver') and self._vserver:
if hasattr(self, '_api_major_version') and \
hasattr(self, '_api_minor_version') and \
self._api_major_version >= 1 and \
self._api_minor_version >= 15:
netapp_elem.add_attr('vfiler', self._vserver)
else:
raise ValueError('ontapi version has to be atleast 1.15'
' to send request to vserver')
def _parse_response(self, response):
"""Get the NaElement for the response."""
if not response:
raise NaApiError('No response received')
xml = etree.XML(response)
return NaElement(xml)
def _get_result(self, response):
"""Gets the call result."""
processed_response = self._parse_response(response)
return processed_response.get_child_by_name('results')
def _get_url(self):
return '%s://%s:%s/%s' % (self._protocol, self._host, self._port,
self._url)
def _build_opener(self):
if self._auth_style == NaServer.STYLE_LOGIN_PASSWORD:
auth_handler = self._create_basic_auth_handler()
else:
auth_handler = self._create_certificate_auth_handler()
opener = urllib2.build_opener(auth_handler)
self._opener = opener
def _create_basic_auth_handler(self):
password_man = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_man.add_password(None, self._get_url(), self._username,
self._password)
auth_handler = urllib2.HTTPBasicAuthHandler(password_man)
return auth_handler
def _create_certificate_auth_handler(self):
raise NotImplementedError()
def __str__(self):
return "server: %s" % (self._host)
class NaElement(object):
"""Class wraps basic building block for NetApp api request."""
def __init__(self, name):
"""Name of the element or etree.Element."""
if isinstance(name, etree._Element):
self._element = name
else:
self._element = etree.Element(name)
def get_name(self):
"""Returns the tag name of the element."""
return self._element.tag
def set_content(self, text):
"""Set the text string for the element."""
self._element.text = text
def get_content(self):
"""Get the text for the element."""
return self._element.text
def add_attr(self, name, value):
"""Add the attribute to the element."""
self._element.set(name, value)
def add_attrs(self, **attrs):
"""Add multiple attributes to the element."""
for attr in attrs.keys():
self._element.set(attr, attrs.get(attr))
def add_child_elem(self, na_element):
"""Add the child element to the element."""
if isinstance(na_element, NaElement):
self._element.append(na_element._element)
return
raise
def get_child_by_name(self, name):
"""Get the child element by the tag name."""
for child in self._element.iterchildren():
if child.tag == name or etree.QName(child.tag).localname == name:
return NaElement(child)
return None
def get_child_content(self, name):
"""Get the content of the child."""
for child in self._element.iterchildren():
if child.tag == name or etree.QName(child.tag).localname == name:
return child.text
return None
def get_children(self):
"""Get the children for the element."""
return [NaElement(el) for el in self._element.iterchildren()]
def has_attr(self, name):
"""Checks whether element has attribute."""
attributes = self._element.attrib or {}
return name in attributes.keys()
def get_attr(self, name):
"""Get the attribute with the given name."""
attributes = self._element.attrib or {}
return attributes.get(name)
def get_attr_names(self):
"""Returns the list of attribute names."""
attributes = self._element.attrib or {}
return attributes.keys()
def add_new_child(self, name, content, convert=False):
"""Add child with tag name and context.
Convert replaces entity refs to chars.
"""
child = NaElement(name)
if convert:
content = NaElement._convert_entity_refs(content)
child.set_content(content)
self.add_child_elem(child)
@staticmethod
def _convert_entity_refs(text):
"""Converts entity refs to chars to handle etree auto conversions."""
text = text.replace("<", "<")
text = text.replace(">", ">")
return text
@staticmethod
def create_node_with_children(node, **children):
"""Creates and returns named node with children."""
parent = NaElement(node)
for child in children.keys():
parent.add_new_child(child, children.get(child, None))
return parent
def add_node_with_children(self, node, **children):
"""Creates named node with children."""
parent = NaElement.create_node_with_children(node, **children)
self.add_child_elem(parent)
def to_string(self, pretty=False, method='xml', encoding='UTF-8'):
"""Prints the element to string."""
return etree.tostring(self._element, method=method, encoding=encoding,
pretty_print=pretty)
def __getitem__(self, key):
"""Dict getter method for NaElement.
Returns NaElement list if present,
text value in case no NaElement node
children or attribute value if present.
"""
child = self.get_child_by_name(key)
if child:
if child.get_children():
return child
else:
return child.get_content()
elif self.has_attr(key):
return self.get_attr(key)
raise KeyError(_('No element by given name %s.') % (key))
def __setitem__(self, key, value):
"""Dict setter method for NaElement."""
if key:
if value:
if isinstance(value, NaElement):
child = NaElement(key)
child.add_child_elem(value)
self.add_child_elem(child)
elif isinstance(value, str):
child = self.get_child_by_name(key)
if child:
child.set_content(value)
else:
self.add_new_child(key, value)
elif isinstance(value, dict):
child = NaElement(key)
child.translate_struct(value)
self.add_child_elem(child)
else:
raise TypeError(_('Not a valid value for NaElement.'))
else:
self.add_child_elem(NaElement(key))
else:
raise KeyError(_('NaElement name cannot be null.'))
def translate_struct(self, data_struct):
"""Convert list, tuple, dict to NaElement and appends.
Useful for NaElement queries which have unique
query parameters.
"""
if isinstance(data_struct, list) or isinstance(data_struct, tuple):
for el in data_struct:
self.add_child_elem(NaElement(el))
elif isinstance(data_struct, dict):
for k in data_struct.keys():
child = NaElement(k)
if (isinstance(data_struct[k], dict) or
isinstance(data_struct[k], list) or
isinstance(data_struct[k], tuple)):
child.translate_struct(data_struct[k])
else:
if data_struct[k]:
child.set_content(str(data_struct[k]))
self.add_child_elem(child)
else:
raise ValueError(_('Type cannot be converted into NaElement.'))
class NaApiError(Exception):
"""Base exception class for NetApp api errors."""
def __init__(self, code='unknown', message='unknown'):
self.code = code
self.message = message
def __str__(self, *args, **kwargs):
return 'NetApp api failed. Reason - %s:%s' % (self.code, self.message)
| 36.73444 | 79 | 0.606687 |
ebdfb8d8a38bd2977bbdcf3a231728fbb918a6ab
| 1,209 |
py
|
Python
|
config.py
|
Lechatelia/video_feature_extractor
|
6530c530cfc0f13b3d64e9fa7fbe9f6068545bfd
|
[
"MIT"
] | 56 |
2019-07-02T01:30:48.000Z
|
2022-03-31T03:13:20.000Z
|
torchOnVideo/cleanup/pytorch-video-feature-extractor/config.py
|
torchOnVideo/torchOnVideo
|
aa07d5661f772eca027ecc6b79e14bd68a515aa1
|
[
"MIT"
] | 8 |
2019-10-12T11:25:26.000Z
|
2021-10-15T14:28:13.000Z
|
config.py
|
chldydgh4687/pytorch-video-feature-extractor
|
1128768ae7c113286b664340c45c6e85a8560027
|
[
"MIT"
] | 17 |
2019-10-12T11:12:21.000Z
|
2022-03-31T14:22:45.000Z
|
from abc import ABC
from collections import defaultdict
class AbstractConfig(ABC):
@property
def checkpoint_path(self):
raise NotImplementedError("You should defined 'checkpoint_path'.")
@property
def mean(self):
raise NotImplementedError("You should defined 'mean'.")
@property
def std(self):
raise NotImplementedError("You should defined 'std'.")
@property
def resize_to(self):
raise NotImplementedError("You should defined 'resize_to'.")
crop_to = None
class Default(AbstractConfig):
mean = ( 0.485, 0.456, 0.406 )
std = ( 0.229, 0.224, 0.225 )
resize_to = ( 224, 224 )
class Inception(AbstractConfig):
mean = ( 0.5, 0.5, 0.5 )
std = ( 0.5, 0.5, 0.5 )
resize_to = ( 299, 299 )
class C3D(AbstractConfig):
checkpoint_path = "pretrained_models/c3d.pickle"
import numpy as np
mean = np.load("data/c3d_mean.npy")
mean = mean.squeeze(0).transpose(1, 2, 3, 0)
mean /= 255.
std = ( 0.5, 0.5, 0.5 )
resize_to = ( 171, 128 )
crop_to = ( 112, 112 )
config = defaultdict(lambda: Default)
config['inception_v3'] = Inception
config['inception_v4'] = Inception
config['c3d'] = C3D
| 22.388889 | 74 | 0.636063 |
b1bd65516fa34944b643afc693e4908210cc742c
| 3,858 |
py
|
Python
|
test/test_errorcode.py
|
maxhgerlach/mpi4py
|
4cebe0d45ee60bc7198dedc425adffee3e86bc7b
|
[
"BSD-2-Clause"
] | 533 |
2015-03-02T05:16:27.000Z
|
2022-03-28T09:44:37.000Z
|
test/test_errorcode.py
|
maxhgerlach/mpi4py
|
4cebe0d45ee60bc7198dedc425adffee3e86bc7b
|
[
"BSD-2-Clause"
] | 105 |
2017-09-17T07:50:33.000Z
|
2022-03-29T17:27:43.000Z
|
test/test_errorcode.py
|
maxhgerlach/mpi4py
|
4cebe0d45ee60bc7198dedc425adffee3e86bc7b
|
[
"BSD-2-Clause"
] | 98 |
2015-02-03T03:17:52.000Z
|
2022-03-23T02:03:11.000Z
|
from mpi4py import MPI
import mpiunittest as unittest
class TestErrorCode(unittest.TestCase):
errorclasses = [item[1] for item in vars(MPI).items()
if item[0].startswith('ERR_')]
errorclasses.insert(0, MPI.SUCCESS)
errorclasses.remove(MPI.ERR_LASTCODE)
def testGetErrorClass(self):
self.assertEqual(self.errorclasses[0], 0)
for ierr in self.errorclasses:
errcls = MPI.Get_error_class(ierr)
self.assertTrue(errcls >= MPI.SUCCESS)
self.assertTrue(errcls <= MPI.ERR_LASTCODE)
self.assertEqual(errcls, ierr)
def testGetErrorStrings(self):
for ierr in self.errorclasses:
errstr = MPI.Get_error_string(ierr)
def testException(self):
success = MPI.Exception(MPI.SUCCESS)
lasterr = MPI.Exception(MPI.ERR_LASTCODE)
for ierr in self.errorclasses:
errstr = MPI.Get_error_string(ierr)
errcls = MPI.Get_error_class(ierr)
errexc = MPI.Exception(ierr)
self.assertEqual(errexc.error_code, ierr)
self.assertEqual(errexc.error_class, ierr)
self.assertEqual(errexc.error_string, errstr)
self.assertEqual(repr(errexc), "MPI.Exception(%d)" % ierr)
self.assertEqual(str(errexc), errstr)
self.assertEqual(int(errexc), ierr)
self.assertEqual(hash(errexc), hash(errexc.error_code))
self.assertTrue(errexc == ierr)
self.assertTrue(errexc == errexc)
self.assertFalse(errexc != ierr)
self.assertFalse(errexc != errexc)
self.assertTrue(success <= ierr <= lasterr)
self.assertTrue(success <= errexc <= lasterr)
self.assertTrue(errexc >= ierr)
self.assertTrue(errexc >= success)
self.assertTrue(lasterr >= ierr)
self.assertTrue(lasterr >= errexc)
if errexc == success:
self.assertFalse(errexc)
else:
self.assertTrue(errexc)
self.assertTrue(errexc > success)
self.assertTrue(success < errexc)
exc = MPI.Exception(MPI.SUCCESS-1)
self.assertTrue(exc, MPI.ERR_UNKNOWN)
exc = MPI.Exception(MPI.ERR_LASTCODE+1)
self.assertTrue(exc, MPI.ERR_UNKNOWN)
@unittest.skipMPI('openmpi(<1.10.0)')
def testAddErrorClass(self):
try:
errclass = MPI.Add_error_class()
except NotImplementedError:
self.skipTest('mpi-add_error_class')
self.assertTrue(errclass >= MPI.ERR_LASTCODE)
@unittest.skipMPI('openmpi(<1.10.0)')
def testAddErrorClassCodeString(self):
try:
errclass = MPI.Add_error_class()
except NotImplementedError:
self.skipTest('mpi-add_error_class')
lastused = MPI.COMM_WORLD.Get_attr(MPI.LASTUSEDCODE)
self.assertTrue(errclass == lastused)
errstr = MPI.Get_error_string(errclass)
self.assertEqual(errstr, "")
MPI.Add_error_string(errclass, "error class")
self.assertEqual(MPI.Get_error_string(errclass), "error class")
errcode1 = MPI.Add_error_code(errclass)
errstr = MPI.Get_error_string(errcode1)
self.assertEqual(errstr, "")
MPI.Add_error_string(errcode1, "error code 1")
self.assertEqual(MPI.Get_error_class(errcode1), errclass)
self.assertEqual(MPI.Get_error_string(errcode1), "error code 1")
errcode2 = MPI.Add_error_code(errclass)
errstr = MPI.Get_error_string(errcode2)
self.assertEqual(errstr, "")
MPI.Add_error_string(errcode2, "error code 2")
self.assertEqual(MPI.Get_error_class(errcode2), errclass)
self.assertEqual(MPI.Get_error_string(errcode2), "error code 2")
if __name__ == '__main__':
unittest.main()
| 40.610526 | 72 | 0.632193 |
210d98f96b464f9dca7c11e38c77e2858f99cab2
| 4,452 |
py
|
Python
|
tests/unit/unit_test_retry.py
|
kellrott/synapsePythonClient
|
8af4b89a95140fafca2e98af2768423c3ea949fc
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/unit_test_retry.py
|
kellrott/synapsePythonClient
|
8af4b89a95140fafca2e98af2768423c3ea949fc
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/unit_test_retry.py
|
kellrott/synapsePythonClient
|
8af4b89a95140fafca2e98af2768423c3ea949fc
|
[
"Apache-2.0"
] | null | null | null |
import filecmp
import tempfile
import os
from nose.tools import assert_raises
from synapseclient.retry import RetryRequest
from synapseclient.dict_object import DictObject
def setup():
print '\n'
print '~' * 60
print os.path.basename(__file__)
print '~' * 60
class MyException(Exception):
"""Mock HTTP Exceptions"""
def __init__(self, message, **kwargs):
self.message = message
self.__dict__.update(kwargs)
def __str__(self):
return 'MyException: ' + str(self.__dict__)
class MockResponse(DictObject):
def __init__(self, *args, **kwargs):
super(MockResponse, self).__init__(*args, **kwargs)
self.headers={'content-type':'application/json'}
def json(self):
if self.status_code >= 500:
return {'reason':self.get('reason', 'Darnit!')}
else:
return {'ok':'ok'}
class Failer(object):
"""A class to generate failure for testing the RetryRequest"""
def __init__(self):
self.counter = 0
def fail_n_times_exception(self, n, status_code, message):
if self.counter < n:
self.counter += 1
response = MockResponse(reason=message, status_code=status_code)
raise MyException('Fail n times exception: ' + str(self.counter), response=response)
self.reset()
return MockResponse(status_code=200, text='it worked!')
@RetryRequest(retries=3, wait=0, verbose=True, tag='fail_n_times_decorated')
def fail_n_times_decorated(self, n, status_code, message):
if self.counter < n:
self.counter += 1
response = MockResponse(status_code=status_code, reason=message)
raise MyException('Fail n times exception: ' + str(self.counter), response=response)
self.reset()
return MockResponse(status_code=200, text=message)
def fail_n_times(self, n, result):
if self.counter < n:
self.counter += 1
return MockResponse(status_code=503, text=result)
self.reset()
return MockResponse(status_code=200, text=result)
def dont_fail(self, result):
return MockResponse(status_code=200, text=result)
def always_fail(self, result):
return MockResponse(status_code=503, text=result)
def reset(self):
self.counter = 0
def test_retry_request():
failer = Failer()
with_retry = RetryRequest(retries=3, wait=0, verbose=True)
print '\n\ndon\'t fail', '-' * 60
## test something that doesn't fail
response = with_retry(failer.dont_fail)('didn\'t fail!')
assert response.status_code == 200
print 'always fail', '-' * 60
## test something that totally borks
response = with_retry(failer.always_fail)('failed!')
assert response.status_code == 503
print 'fail 2 times', '-' * 60
## fail n times then succeed
response = with_retry(failer.fail_n_times)(2, 'fail 2 times')
assert response.status_code == 200
print 'fail 2 times', '-' * 60
response = failer.fail_n_times_decorated(2, 503, 'fail 2 times')
print 'fail 2 times', '-' * 60
response = with_retry(failer.fail_n_times_exception)(2, 502, 'fail 2 times')
print 'fail 10 times', '-' * 60
assert_raises(Exception, with_retry(failer.fail_n_times_exception), 10, 502, 'fail 10 times')
print 'errmsg', '-' * 60
failer.reset()
with_retry = RetryRequest(retries=3, wait=0, retry_errors=['The specified key does not exist.'], verbose=True)
response = with_retry(failer.fail_n_times_exception)(2, 500, 'The specified key does not exist.')
assert response.status_code==200
def test_as_decorator():
@RetryRequest(retries=3, verbose=True)
def foo(x,y):
"""docstring of foo"""
if x + y < 0:
raise Exception('Foobar exception!')
return DictObject(status_code=200, text=(x + y))
assert foo.__name__ == 'foo'
assert foo.__doc__ == "docstring of foo"
assert foo(8,3).text == 11
assert_raises(Exception, foo, -8, 3)
def test_double_wrapped():
failer = Failer()
with_retry = RetryRequest(retries=3, wait=0,
retry_status_codes=[],
retry_errors=['The specified key does not exist.'],
verbose=True, tag='key does not exist')
response = with_retry(failer.fail_n_times_decorated)(2, 500, 'The specified key does not exist.')
| 31.132867 | 114 | 0.641509 |
b78e8c254b776841ad02ffdfe9f9d510645c0ecb
| 26,715 |
py
|
Python
|
wsgidav/server/server_cli.py
|
zouquan741/wsgidav
|
6fcdf7d8cb2951c1f651ee695feb866ef4cf0683
|
[
"MIT"
] | null | null | null |
wsgidav/server/server_cli.py
|
zouquan741/wsgidav
|
6fcdf7d8cb2951c1f651ee695feb866ef4cf0683
|
[
"MIT"
] | null | null | null |
wsgidav/server/server_cli.py
|
zouquan741/wsgidav
|
6fcdf7d8cb2951c1f651ee695feb866ef4cf0683
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
server_cli
==========
:Author: Martin Wendt
:Copyright: Licensed under the MIT license, see LICENSE file in this package.
Standalone server that runs WsgiDAV.
These tasks are performed:
- Set up the configuration from defaults, configuration file, and command line
options.
- Instantiate the WsgiDAVApp object (which is a WSGI application)
- Start a WSGI server for this WsgiDAVApp object
Configuration is defined like this:
1. Get the name of a configuration file from command line option
``--config-file=FILENAME`` (or short ``-cFILENAME``).
If this option is omitted, we use ``wsgidav.conf`` in the current
directory.
2. Set reasonable default settings.
3. If configuration file exists: read and use it to overwrite defaults.
4. If command line options are passed, use them to override settings:
``--host`` option overrides ``hostname`` setting.
``--port`` option overrides ``port`` setting.
``--root=FOLDER`` option creates a FilesystemProvider that publishes
FOLDER on the '/' share.
"""
from __future__ import print_function
from inspect import isfunction
from jsmin import jsmin
from pprint import pformat
from wsgidav import __version__, util
from wsgidav.default_conf import DEFAULT_CONFIG, DEFAULT_VERBOSE
from wsgidav.fs_dav_provider import FilesystemProvider
from wsgidav.wsgidav_app import WsgiDAVApp
from wsgidav.xml_tools import use_lxml
import argparse
import copy
import io
import json
import logging
import os
import platform
import sys
import traceback
import yaml
__docformat__ = "reStructuredText"
#: Try this config files if no --config=... option is specified
DEFAULT_CONFIG_FILES = ("wsgidav.yaml", "wsgidav.json", "wsgidav.conf")
_logger = logging.getLogger("wsgidav")
def _get_checked_path(path, config, must_exist=True, allow_none=True):
"""Convert path to absolute if not None."""
if path in (None, ""):
if allow_none:
return None
raise ValueError("Invalid path {!r}".format(path))
# Evaluate path relative to the folder of the config file (if any)
config_file = config.get("_config_file")
if config_file and not os.path.isabs(path):
path = os.path.normpath(os.path.join(os.path.dirname(config_file), path))
else:
path = os.path.abspath(path)
if must_exist and not os.path.exists(path):
raise ValueError("Invalid path {!r}".format(path))
return path
class FullExpandedPath(argparse.Action):
"""Expand user- and relative-paths"""
def __call__(self, parser, namespace, values, option_string=None):
new_val = os.path.abspath(os.path.expanduser(values))
setattr(namespace, self.dest, new_val)
def _init_command_line_options():
"""Parse command line options into a dictionary."""
description = """\
Run a WEBDAV server to share file system folders.
Examples:
Share filesystem folder '/temp' for anonymous access (no config file used):
wsgidav --port=80 --host=0.0.0.0 --root=/temp --auth=anonymous
Run using a specific configuration file:
wsgidav --port=80 --host=0.0.0.0 --config=~/my_wsgidav.yaml
If no config file is specified, the application will look for a file named
'wsgidav.yaml' in the current directory.
See
http://wsgidav.readthedocs.io/en/latest/run-configure.html
for some explanation of the configuration file format.
"""
epilog = """\
Licensed under the MIT license.
See https://github.com/mar10/wsgidav for additional information.
"""
parser = argparse.ArgumentParser(
prog="wsgidav",
description=description,
epilog=epilog,
# allow_abbrev=False, # Py3.5+
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"-p",
"--port",
dest="port",
type=int,
# default=8080,
help="port to serve on (default: 8080)",
)
parser.add_argument(
"-H", # '-h' conflicts with --help
"--host",
dest="host",
help=(
"host to serve from (default: localhost). 'localhost' is only "
"accessible from the local computer. Use 0.0.0.0 to make your "
"application public"
),
)
parser.add_argument(
"-r",
"--root",
dest="root_path",
action=FullExpandedPath,
help="path to a file system folder to publish as share '/'.",
)
parser.add_argument(
"--auth",
choices=("anonymous", "nt", "pam-login"),
help="quick configuration of a domain controller when no config file "
"is used",
)
parser.add_argument(
"--server",
choices=SUPPORTED_SERVERS.keys(),
# default="cheroot",
help="type of pre-installed WSGI server to use (default: cheroot).",
)
parser.add_argument(
"--ssl-adapter",
choices=("builtin", "pyopenssl"),
# default="builtin",
help="used by 'cheroot' server if SSL certificates are configured "
"(default: builtin).",
)
qv_group = parser.add_mutually_exclusive_group()
qv_group.add_argument(
"-v",
"--verbose",
action="count",
default=3,
help="increment verbosity by one (default: %(default)s, range: 0..5)",
)
qv_group.add_argument(
"-q", "--quiet", default=0, action="count", help="decrement verbosity by one"
)
qv_group = parser.add_mutually_exclusive_group()
qv_group.add_argument(
"-c",
"--config",
dest="config_file",
action=FullExpandedPath,
help=(
"configuration file (default: {} in current directory)".format(
DEFAULT_CONFIG_FILES
)
),
)
qv_group.add_argument(
"--no-config",
action="store_true",
dest="no_config",
help="do not try to load default {}".format(DEFAULT_CONFIG_FILES),
)
parser.add_argument(
"-V",
"--version",
action="store_true",
help="print version info and exit (may be combined with --verbose)",
)
args = parser.parse_args()
args.verbose -= args.quiet
del args.quiet
if args.root_path and not os.path.isdir(args.root_path):
msg = "{} is not a directory".format(args.root_path)
raise parser.error(msg)
if args.version:
if args.verbose >= 4:
msg = "WsgiDAV/{} Python/{} {}".format(
__version__, util.PYTHON_VERSION, platform.platform(aliased=True)
)
else:
msg = "{}".format(__version__)
print(msg)
sys.exit()
if args.no_config:
pass
# ... else ignore default config files
elif args.config_file is None:
# If --config was omitted, use default (if it exists)
for filename in DEFAULT_CONFIG_FILES:
defPath = os.path.abspath(filename)
if os.path.exists(defPath):
if args.verbose >= 3:
print("Using default configuration file: {}".format(defPath))
args.config_file = defPath
break
else:
# If --config was specified convert to absolute path and assert it exists
args.config_file = os.path.abspath(args.config_file)
if not os.path.isfile(args.config_file):
parser.error(
"Could not find specified configuration file: {}".format(
args.config_file
)
)
# Convert args object to dictionary
cmdLineOpts = args.__dict__.copy()
if args.verbose >= 5:
print("Command line args:")
for k, v in cmdLineOpts.items():
print(" {:>12}: {}".format(k, v))
return cmdLineOpts, parser
def _read_config_file(config_file, verbose):
"""Read configuration file options into a dictionary."""
config_file = os.path.abspath(config_file)
if not os.path.exists(config_file):
raise RuntimeError("Couldn't open configuration file '{}'.".format(config_file))
if config_file.endswith(".json"):
with io.open(config_file, mode="r", encoding="utf-8") as json_file:
# Minify the JSON file to strip embedded comments
minified = jsmin(json_file.read())
conf = json.loads(minified)
elif config_file.endswith(".yaml"):
with io.open(config_file, mode="r", encoding="utf-8") as yaml_file:
conf = yaml.safe_load(yaml_file)
else:
try:
import imp
conf = {}
configmodule = imp.load_source("configuration_module", config_file)
for k, v in vars(configmodule).items():
if k.startswith("__"):
continue
elif isfunction(v):
continue
conf[k] = v
except Exception:
exc_type, exc_value = sys.exc_info()[:2]
exc_info_list = traceback.format_exception_only(exc_type, exc_value)
exc_text = "\n".join(exc_info_list)
print(
"Failed to read configuration file: "
+ config_file
+ "\nDue to "
+ exc_text,
file=sys.stderr,
)
raise
conf["_config_file"] = config_file
return conf
def _init_config():
"""Setup configuration dictionary from default, command line and configuration file."""
cli_opts, parser = _init_command_line_options()
cli_verbose = cli_opts["verbose"]
# Set config defaults
config = copy.deepcopy(DEFAULT_CONFIG)
# Configuration file overrides defaults
config_file = cli_opts.get("config_file")
if config_file:
file_opts = _read_config_file(config_file, cli_verbose)
util.deep_update(config, file_opts)
if cli_verbose != DEFAULT_VERBOSE and "verbose" in file_opts:
if cli_verbose >= 2:
print(
"Config file defines 'verbose: {}' but is overridden by command line: {}.".format(
file_opts["verbose"], cli_verbose
)
)
config["verbose"] = cli_verbose
else:
if cli_verbose >= 2:
print("Running without configuration file.")
# Command line overrides file
if cli_opts.get("port"):
config["port"] = cli_opts.get("port")
if cli_opts.get("host"):
config["host"] = cli_opts.get("host")
if cli_opts.get("profile") is not None:
config["profile"] = True
if cli_opts.get("server") is not None:
config["server"] = cli_opts.get("server")
if cli_opts.get("ssl_adapter") is not None:
config["ssl_adapter"] = cli_opts.get("ssl_adapter")
# Command line overrides file only if -v or -q where passed:
if cli_opts.get("verbose") != DEFAULT_VERBOSE:
config["verbose"] = cli_opts.get("verbose")
if cli_opts.get("root_path"):
root_path = os.path.abspath(cli_opts.get("root_path"))
config["provider_mapping"]["/"] = FilesystemProvider(root_path)
if config["verbose"] >= 5:
# TODO: remove passwords from user_mapping
# config_cleaned = copy.deepcopy(config)
print("Configuration({}):\n{}".format(cli_opts["config_file"], pformat(config)))
if not config["provider_mapping"]:
parser.error("No DAV provider defined.")
# Quick-configuration of DomainController
auth = cli_opts.get("auth")
auth_conf = config.get("http_authenticator", {})
if auth and auth_conf.get("domain_controller"):
parser.error(
"--auth option can only be used when no domain_controller is configured"
)
if auth == "anonymous":
if config["simple_dc"]["user_mapping"]:
parser.error(
"--auth=anonymous can only be used when no user_mapping is configured"
)
auth_conf.update(
{
"domain_controller": "wsgidav.dc.simple_dc.SimpleDomainController",
"accept_basic": True,
"accept_digest": True,
"default_to_digest": True,
}
)
config["simple_dc"]["user_mapping"] = {"*": True}
elif auth == "nt":
if config.get("nt_dc"):
parser.error(
"--auth=nt can only be used when no nt_dc settings are configured"
)
auth_conf.update(
{
"domain_controller": "wsgidav.dc.nt_dc.NTDomainController",
"accept_basic": True,
"accept_digest": False,
"default_to_digest": False,
}
)
config["nt_dc"] = {}
elif auth == "pam-login":
if config.get("pam_dc"):
parser.error(
"--auth=pam-login can only be used when no pam_dc settings are configured"
)
auth_conf.update(
{
"domain_controller": "wsgidav.dc.pam_dc.PAMDomainController",
"accept_basic": True,
"accept_digest": False,
"default_to_digest": False,
}
)
config["pam_dc"] = {"service": "login"}
# print(config)
if cli_opts.get("reload"):
print("Installing paste.reloader.", file=sys.stderr)
from paste import reloader # @UnresolvedImport
reloader.install()
if config_file:
# Add config file changes
reloader.watch_file(config_file)
# import pydevd
# pydevd.settrace()
return config
def _run_paste(app, config, mode):
"""Run WsgiDAV using paste.httpserver, if Paste is installed.
See http://pythonpaste.org/modules/httpserver.html for more options
"""
from paste import httpserver
version = "WsgiDAV/{} {} Python {}".format(
__version__, httpserver.WSGIHandler.server_version, util.PYTHON_VERSION
)
_logger.info("Running {}...".format(version))
# See http://pythonpaste.org/modules/httpserver.html for more options
server = httpserver.serve(
app,
host=config["host"],
port=config["port"],
server_version=version,
# This option enables handling of keep-alive
# and expect-100:
protocol_version="HTTP/1.1",
start_loop=False,
)
if config["verbose"] >= 5:
__handle_one_request = server.RequestHandlerClass.handle_one_request
def handle_one_request(self):
__handle_one_request(self)
if self.close_connection == 1:
_logger.debug("HTTP Connection : close")
else:
_logger.debug("HTTP Connection : continue")
server.RequestHandlerClass.handle_one_request = handle_one_request
# __handle = server.RequestHandlerClass.handle
# def handle(self):
# _logger.debug("open HTTP connection")
# __handle(self)
server.RequestHandlerClass.handle_one_request = handle_one_request
host, port = server.server_address
if host == "0.0.0.0":
_logger.info(
"Serving on 0.0.0.0:{} view at {}://127.0.0.1:{}".format(port, "http", port)
)
else:
_logger.info("Serving on {}://{}:{}".format("http", host, port))
try:
server.serve_forever()
except KeyboardInterrupt:
_logger.warning("Caught Ctrl-C, shutting down...")
return
def _run_gevent(app, config, mode):
"""Run WsgiDAV using gevent if gevent is installed.
See
https://github.com/gevent/gevent/blob/master/src/gevent/pywsgi.py#L1356
https://github.com/gevent/gevent/blob/master/src/gevent/server.py#L38
for more options
"""
import gevent
import gevent.monkey
gevent.monkey.patch_all()
from gevent.pywsgi import WSGIServer
server_args = {"bind_addr": (config["host"], config["port"]), "wsgi_app": app}
server_name = "WsgiDAV/{} gevent/{} Python/{}".format(
__version__, gevent.__version__, util.PYTHON_VERSION
)
# Support SSL
ssl_certificate = _get_checked_path(config.get("ssl_certificate"), config)
ssl_private_key = _get_checked_path(config.get("ssl_private_key"), config)
ssl_certificate_chain = _get_checked_path(
config.get("ssl_certificate_chain"), config
)
# Override or add custom args
server_args.update(config.get("server_args", {}))
protocol = "http"
if ssl_certificate:
assert ssl_private_key
protocol = "https"
_logger.info("SSL / HTTPS enabled.")
dav_server = WSGIServer(
server_args["bind_addr"],
app,
keyfile=ssl_private_key,
certfile=ssl_certificate,
ca_certs=ssl_certificate_chain,
)
else:
dav_server = WSGIServer(server_args["bind_addr"], app)
# If the caller passed a startup event, monkey patch the server to set it
# when the request handler loop is entered
startup_event = config.get("startup_event")
if startup_event:
def _patched_start():
dav_server.start_accepting = org_start # undo the monkey patch
org_start()
_logger.info("gevent is ready")
startup_event.set()
org_start = dav_server.start_accepting
dav_server.start_accepting = _patched_start
_logger.info("Running {}".format(server_name))
_logger.info(
"Serving on {}://{}:{} ...".format(protocol, config["host"], config["port"])
)
try:
gevent.spawn(dav_server.serve_forever())
except KeyboardInterrupt:
_logger.warning("Caught Ctrl-C, shutting down...")
return
def _run__cherrypy(app, config, mode):
"""Run WsgiDAV using cherrypy.wsgiserver if CherryPy is installed."""
assert mode == "cherrypy-wsgiserver"
try:
from cherrypy import wsgiserver
from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter
_logger.warning("WARNING: cherrypy.wsgiserver is deprecated.")
_logger.warning(
" Starting with CherryPy 9.0 the functionality from cherrypy.wsgiserver"
)
_logger.warning(" was moved to the cheroot project.")
_logger.warning(" Consider using --server=cheroot.")
except ImportError:
_logger.error("*" * 78)
_logger.error("ERROR: Could not import cherrypy.wsgiserver.")
_logger.error(
"Try `pip install cherrypy` or specify another server using the --server option."
)
_logger.error("Note that starting with CherryPy 9.0, the server was moved to")
_logger.error(
"the cheroot project, so it is recommended to use `-server=cheroot`"
)
_logger.error("and run `pip install cheroot` instead.")
_logger.error("*" * 78)
raise
server_name = "WsgiDAV/{} {} Python/{}".format(
__version__, wsgiserver.CherryPyWSGIServer.version, util.PYTHON_VERSION
)
wsgiserver.CherryPyWSGIServer.version = server_name
# Support SSL
ssl_certificate = _get_checked_path(config.get("ssl_certificate"), config)
ssl_private_key = _get_checked_path(config.get("ssl_private_key"), config)
ssl_certificate_chain = _get_checked_path(
config.get("ssl_certificate_chain"), config
)
protocol = "http"
if ssl_certificate:
assert ssl_private_key
wsgiserver.CherryPyWSGIServer.ssl_adapter = BuiltinSSLAdapter(
ssl_certificate, ssl_private_key, ssl_certificate_chain
)
protocol = "https"
_logger.info("SSL / HTTPS enabled.")
_logger.info("Running {}".format(server_name))
_logger.info(
"Serving on {}://{}:{} ...".format(protocol, config["host"], config["port"])
)
server_args = {
"bind_addr": (config["host"], config["port"]),
"wsgi_app": app,
"server_name": server_name,
}
# Override or add custom args
server_args.update(config.get("server_args", {}))
server = wsgiserver.CherryPyWSGIServer(**server_args)
# If the caller passed a startup event, monkey patch the server to set it
# when the request handler loop is entered
startup_event = config.get("startup_event")
if startup_event:
def _patched_tick():
server.tick = org_tick # undo the monkey patch
org_tick()
_logger.info("CherryPyWSGIServer is ready")
startup_event.set()
org_tick = server.tick
server.tick = _patched_tick
try:
server.start()
except KeyboardInterrupt:
_logger.warning("Caught Ctrl-C, shutting down...")
finally:
server.stop()
return
def _run_cheroot(app, config, mode):
"""Run WsgiDAV using cheroot.server if Cheroot is installed."""
assert mode == "cheroot"
try:
from cheroot import server, wsgi
# from cheroot.ssl.builtin import BuiltinSSLAdapter
# import cheroot.ssl.pyopenssl
except ImportError:
_logger.error("*" * 78)
_logger.error("ERROR: Could not import Cheroot.")
_logger.error(
"Try `pip install cheroot` or specify another server using the --server option."
)
_logger.error("*" * 78)
raise
server_name = "WsgiDAV/{} {} Python/{}".format(
__version__, wsgi.Server.version, util.PYTHON_VERSION
)
wsgi.Server.version = server_name
# Support SSL
ssl_certificate = _get_checked_path(config.get("ssl_certificate"), config)
ssl_private_key = _get_checked_path(config.get("ssl_private_key"), config)
ssl_certificate_chain = _get_checked_path(
config.get("ssl_certificate_chain"), config
)
ssl_adapter = config.get("ssl_adapter", "builtin")
protocol = "http"
if ssl_certificate and ssl_private_key:
ssl_adapter = server.get_ssl_adapter_class(ssl_adapter)
wsgi.Server.ssl_adapter = ssl_adapter(
ssl_certificate, ssl_private_key, ssl_certificate_chain
)
protocol = "https"
_logger.info("SSL / HTTPS enabled. Adapter: {}".format(ssl_adapter))
elif ssl_certificate or ssl_private_key:
raise RuntimeError(
"Option 'ssl_certificate' and 'ssl_private_key' must be used together."
)
# elif ssl_adapter:
# print("WARNING: Ignored option 'ssl_adapter' (requires 'ssl_certificate').")
_logger.info("Running {}".format(server_name))
_logger.info(
"Serving on {}://{}:{} ...".format(protocol, config["host"], config["port"])
)
server_args = {
"bind_addr": (config["host"], config["port"]),
"wsgi_app": app,
"server_name": server_name,
# File Explorer needs lot of threads (see issue #149):
"numthreads": 50,
}
# Override or add custom args
server_args.update(config.get("server_args", {}))
server = wsgi.Server(**server_args)
# If the caller passed a startup event, monkey patch the server to set it
# when the request handler loop is entered
startup_event = config.get("startup_event")
if startup_event:
def _patched_tick():
server.tick = org_tick # undo the monkey patch
_logger.info("wsgi.Server is ready")
startup_event.set()
org_tick()
org_tick = server.tick
server.tick = _patched_tick
try:
server.start()
except KeyboardInterrupt:
_logger.warning("Caught Ctrl-C, shutting down...")
finally:
server.stop()
return
def _run_flup(app, config, mode):
"""Run WsgiDAV using flup.server.fcgi if Flup is installed."""
# http://trac.saddi.com/flup/wiki/FlupServers
if mode == "flup-fcgi":
from flup.server.fcgi import WSGIServer, __version__ as flupver
elif mode == "flup-fcgi-fork":
from flup.server.fcgi_fork import WSGIServer, __version__ as flupver
else:
raise ValueError
_logger.info(
"Running WsgiDAV/{} {}/{}...".format(
__version__, WSGIServer.__module__, flupver
)
)
server = WSGIServer(
app,
bindAddress=(config["host"], config["port"]),
# debug=True,
)
try:
server.run()
except KeyboardInterrupt:
_logger.warning("Caught Ctrl-C, shutting down...")
return
def _run_wsgiref(app, config, mode):
"""Run WsgiDAV using wsgiref.simple_server, on Python 2.5+."""
# http://www.python.org/doc/2.5.2/lib/module-wsgiref.html
from wsgiref.simple_server import make_server, software_version
version = "WsgiDAV/{} {}".format(__version__, software_version)
_logger.info("Running {}...".format(version))
_logger.warning(
"WARNING: This single threaded server (wsgiref) is not meant for production."
)
httpd = make_server(config["host"], config["port"], app)
try:
httpd.serve_forever()
except KeyboardInterrupt:
_logger.warning("Caught Ctrl-C, shutting down...")
return
def _run_ext_wsgiutils(app, config, mode):
"""Run WsgiDAV using ext_wsgiutils_server from the wsgidav package."""
from wsgidav.server import ext_wsgiutils_server
_logger.info(
"Running WsgiDAV {} on wsgidav.ext_wsgiutils_server...".format(__version__)
)
_logger.warning(
"WARNING: This single threaded server (ext-wsgiutils) is not meant for production."
)
try:
ext_wsgiutils_server.serve(config, app)
except KeyboardInterrupt:
_logger.warning("Caught Ctrl-C, shutting down...")
return
SUPPORTED_SERVERS = {
"paste": _run_paste,
"gevent": _run_gevent,
"cheroot": _run_cheroot,
"cherrypy": _run__cherrypy,
"ext-wsgiutils": _run_ext_wsgiutils,
"flup-fcgi": _run_flup,
"flup-fcgi_fork": _run_flup,
"wsgiref": _run_wsgiref,
}
def run():
config = _init_config()
util.init_logging(config)
app = WsgiDAVApp(config)
server = config["server"]
handler = SUPPORTED_SERVERS.get(server)
if not handler:
raise RuntimeError(
"Unsupported server type {!r} (expected {!r})".format(
server, "', '".join(SUPPORTED_SERVERS.keys())
)
)
if not use_lxml and config["verbose"] >= 3:
_logger.warning(
"Could not import lxml: using xml instead (up to 10% slower). "
"Consider `pip install lxml`(see https://pypi.python.org/pypi/lxml)."
)
handler(app, config, server)
if __name__ == "__main__":
# Just in case...
from multiprocessing import freeze_support
freeze_support()
run()
| 32.186747 | 102 | 0.619727 |
8d73ceb6277f3a5ca9727d2ac29fb3c17eca8152
| 12,741 |
py
|
Python
|
tools/pull_api.py
|
munoztd0/IQMGraphs
|
492d5c828b9b5e6ffe9639f1c099eb1297b303b3
|
[
"MIT"
] | 2 |
2020-11-05T11:12:29.000Z
|
2021-09-28T18:02:58.000Z
|
BIDS/MRIQC/tools/pull_api.py
|
munoztd0/OBIWAN
|
f0301e8ae972d75bfb96a7b7f6b9672201b621ee
|
[
"CC0-1.0"
] | null | null | null |
BIDS/MRIQC/tools/pull_api.py
|
munoztd0/OBIWAN
|
f0301e8ae972d75bfb96a7b7f6b9672201b621ee
|
[
"CC0-1.0"
] | null | null | null |
import json
import requests
import re
import pandas as pd
import dateparser
from urllib.request import urlopen
from pandas.io.json import json_normalize
def backend_query_api(stype, filters):
""" Query the MRIQC API using 3 element conditional statement.
Args:
stype (string): Scan type. Supported: 'bold','T1w',or 'T2w'.
filters (list): List of conditional phrases consisting of:
keyword to query + conditional argument + value. All
conditions checked against API as and phrases.
Returns: A pandas dataframe of all MRIQC entries that satisfy the
contitional statement (keyword condition value).
"""
url_root = 'https://mriqc.nimh.nih.gov/api/v1/' + stype
print('Search currently slow. Running through approximately '
'12k possible pages...')
print('Checking %d search phrases' % len(filters))
# complex search line working?
# https://mriqc.nimh.nih.gov/api/v1/bold?max_results=1000&where=bids_meta.MultibandAccelerationFactor%3C8&RepetitionTime=0.72&page=3
# https://mriqc.nimh.nih.gov/api/v1/bold?max_results=1000&where=bids_meta.MultibandAccelerationFactor%3C8&bids_meta.RepetitionTime=0.72&page=3
# https://mriqc.nimh.nih.gov/api/v1/bold?max_results=1000&where{"bids_meta.MultibandAccelerationFactor": {"$gte":"3"}}
# looks like API limits at a max results of 1k
if isinstance(filters, str):
filters_str = filters
elif isinstance(filters, list):
filters_str = '&'.join(filters)
else:
raise ValueError("The filters can either be a list of strings or a "
"string")
dfs = []
# for phrase in args:
# try:
# del last_page
# except:
# pass
print('\nPhrase: ' + filters_str)
page = 0
while True:
# Give quick page update
if page == 0:
pass
else:
if page % 10 == 0:
print('On page %d' % page + '...')
else:
pass
### CHANGE THIS TO OPENING A LOCAL API DUMP IN THE FUTURE ##
page_url = url_root + '?max_results=1000&page=%d' % page
print(page_url)
# page_url = url_root + '?max_results=1000&where=bids_meta.' + \
# filters_str + '&page=%d' % page
# print(page_url)
with urlopen(page_url) as url:
data = json.loads(url.read().decode())
try:
last_page
except NameError:
last_page = data['_links']['last']['href'].split('=')[-1]
print('Searching through %s pages...' % last_page)
dfs.append(json_normalize(data['_items']))
if page > int(last_page):
break
## TEMPORARY BREAK FOR HACKADEMY TESTING ##
# elif page == 15:
# break
else:
page += 1
print('Done searching!')
print(len(dfs))
# Concatenate all into pandas df
df = pd.concat(dfs, ignore_index=True, sort=True)
## if it's OR, remove duplicates, if it's AND, *only* take duplicates??
## Figure out a good way to do the sorting here ##
# remove duplicates from df
df_unique = df.groupby('provenance.md5sum').mean()
print(df_unique.head())
return df_unique
def mriqc_url(modality, filters='', page_number=0, max_page_results=1000):
url_root = 'https://mriqc.nimh.nih.gov/api/v1/' + modality
page = '&page={}'.format(page_number)
max_results = '?max_results=%s' % max_page_results
filters_prefix = "&where="
if isinstance(filters, str):
if not filters.startswith(filters_prefix):
filters_str = filters_prefix + filters
else:
filters_str = filters
elif isinstance(filters, list):
filters_str = filters_prefix + '&'.join(filters)
else:
raise TypeError("filters must be either a string of a list of strings")
page_url = url_root + max_results + filters_str + page
return page_url
def request_page(url):
headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
r = requests.get(url, headers=headers)
return r
def database_info(url):
data = request_page(url).json()
page = data['_meta']['page']
if len(data['_items']) == 0:
raise ValueError("Page {} is empty".format(page))
try:
print(re.findall("page=(\d*)&", data['_links']['last']['href'])[0])
except KeyError:
print("no last page attribute")
print(data['_links']['self']['href'])
# last_page = re.findall("page=(\d*)&", data['_links']['last']['href'])[0]
# self_page = re.findall("page=(\d*)&", data['_links']['self']['href'])[0]
# total_results = data['_meta']['total']
# page = data['_meta']['page']
# return last_page, self_page, data['_meta']# page, data
def store_page(data, out_csv=None, append=True):
df = json_normalize(data['_items'])
if out_csv is not None:
if append:
with open(out_csv, 'a') as f:
df.to_csv(f)
else:
df.to_csv(out_csv)
return df
def pull_pages(modality, filters='', page_number=-1, max_page_results=1000,
out_csv=None, append=True):
page_url = mriqc_url(modality, filters, page_number, max_page_results)
request_res = request_page(page_url)
data = request_res.json()
page = data['_meta']['page']
if len(data['_items']) == 0:
raise ValueError("Page {} is empty".format(page))
if page == 0:
# continue
print('todo')
try:
last_page = re.findall(
r"page=(\d*)&", data['_links']['last']['href']
)[0]
except KeyError:
print("Page {} is the last page".format(page))
'''
In[29]: data.keys()
Out[29]: dict_keys(['_items', '_links', '_meta'])
data['_items'] is a list of dictionaries
In[30]: data['_links'].keys()
Out[30]: dict_keys(['parent', 'self', 'next', 'last', 'prev'])
In [31]: data['_meta'].keys()
Out[31]: dict_keys(['page', 'max_results', 'total'])
'''
df = store_page(data, out_csv, append)
# print(type(data))
# print(str(data))
return df
# curl -X GET "https://mriqc.nimh.nih.gov/api/v1/bold?max_results=10&where=bids_meta.MultibandAccelerationFactor%3C8&bids_meta.RepetitionTime=0.72&page=3" -H "accept: application/json"
#
# curl -X GET "https://mriqc.nimh.nih.gov/api/v1/bold?max_results=10&where=bids_meta.MultibandAccelerationFactor>3&bids_meta.RepetitionTime=0.72&bids_meta.EchoTime=0.03&page=3" -H "accept: application/json" > ../toto.json
#
# url1 = "https://mriqc.nimh.nih.gov/api/v1/bold?max_results=130&where=bids_meta.MultibandAccelerationFactor>3&bids_meta.RepetitionTime=0.72&bids_meta.EchoTime=0.03&page=0"
# url2 = "https://mriqc.nimh.nih.gov/api/v1/bold?max_results=130&where=bids_meta.MultibandAccelerationFactor>3&bids_meta.RepetitionTime=0.72&bids_meta.EchoTime=0.03&page=86060"
# payload = open("payload.json")
# headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
# r = requests.get(url1, headers=headers)
# r2 = requests.get(url2)
url_root = 'https://mriqc.nimh.nih.gov/api/v1/bold'
response = requests.get(
url_root,
params={'page': '3',
'where': 'bids_meta.MultibandAccelerationFactor==3',
'where': 'bids_meta.RepetitionTime==1'},
)
print(response.url)
print(len(response.json()['_items']))
print(response.json()['_meta'])
data = response.json()
def tata(data):
for v in data['_items']:
try:
r1 = v['bids_meta']['RepetitionTime']
except KeyError:
r1 = None
try:
r2 = v['bids_meta']['MultibandAccelerationFactor']
except KeyError:
r2 = None
try:
r3 = v['bids_meta']['EchoTime']
except KeyError:
r3 = None
if r1 is not None:
print(str(r1))
else:
print('None')
if r2 is not None:
print(str(r2))
else:
print('None')
if r3 is not None:
print(str(r3))
else:
print('None')
print()
# https://mriqc.nimh.nih.gov/api/v1/bold?max_results=25&where={%22bids_meta.RepetitionTime%22:2,%22bids_meta.task_id%22:%22balloonanalogrisktask%22}
# "Mon, 9 May 2016 12:00:00 GMT"
# https://mriqc.nimh.nih.gov/api/v1/bold?max_results=25&where={"_updated":"Sun, 04 Jun 2017 04:19:33 GMT"}
# https://mriqc.nimh.nih.gov/api/v1/bold?max_results=25&where={"_updated":{"$gt":"Sun, 04 Jun 2017 04:19:33 GMT"}}
# https://mriqc.nimh.nih.gov/api/v1/bold?max_results=25&where={"bids_meta.RepetitionTime":{"$gt":2}}
# https://mriqc.nimh.nih.gov/api/v1/bold?max_results=25&where={"_updated":{">":"Mon, 9 May 2016 12:00:00 GMT"}}
# date_obj = datetime.strptime(date_input, '%m/%d/%Y')
# from datetime import datetime
# import dateutil
date_input = '07/15/2017 10:55:50'
date_obj = dateparser.parse(date_input)
good_format = date_obj.strftime('%a, %d %b %Y %H:%M:%S GMT')
print(good_format)
filter = '{"_updated":{"$gt":"%s"}}' % good_format
url = mriqc_url('bold', filter, 3, 30)
r = request_page(url)
keys = ['_updated', '_created']
gt_str = '{"_updated":{"$gt":"Sun, 04 Jun 2017 04:19:33 GMT"}, "_updated":{"$lt":"Sun, 11 Jun 2017 04:19:33 GMT"}, "bids_meta.RepetitionTime":2}'
gt_list = ["_updated>06/04/2017 04:19:33"]
url = mriqc_url('bold', gt_str, max_page_results=30)
r = request_page(url)
r.status_code
len(r.json()['_items'])
print(str([item['_updated'] for item in r.json()['_items']]))
print(str([item['bids_meta']['RepetitionTime'] for item in r.json()['_items']]))
def aq(string):
"""
Add the quotes " characters around a string if they are not already there.
Parameters
----------
string : str
just a string to be formatted
Returns
-------
str
a string with " character at the beginning and the end
"""
tmp = string
if not string.startswith('"'):
tmp = '"' + tmp
if not string.endswith('"'):
tmp = tmp + '"'
return tmp
def find_date(arg):
if isinstance(arg, str):
re.findall(r'\"_updated\":((\d|-)+)*', gt_str)
re.search(r'\"_updated\":((\d|-)+)*', gt_str)
elif isinstance(arg, list):
print("todo")
else:
raise TypeError("arg can be either a string or a list")
def add_date(s):
"""
Parameters
----------
s : str
a date string it can be in any format handled by the datetime package
Returns
-------
str
date string in mongodb format
"""
date_obj = dateparser.parse(s)
mongodb_format = date_obj.strftime('%a, %d %b %Y %H:%M:%S GMT')
return aq(mongodb_format)
def format_operator(op):
"""
Translate operators into mongodb syntax operators
Parameters
----------
op : str
an operator in python/bash/mongodb format
Returns
-------
op_out : str
operator in mongodb syntax
"""
# op_list = ['>', '>=', '<', '<=', '=', '==', ':', '<>', '!=']
# op_list_letters_dollar = ['$' + s for s in op_list_letters]
op_list_letters = ['gt', 'ge', 'le', 'lt', 'eq', 'ne']
op_dict_invert = {
'$gt': ['>', 'gt'],
'$gte': ['>=', 'ge', '$ge'],
'$lt': ['<', 'lt'],
'$lte': ['<=', 'le', '$le'],
'$eq': ['=', '==', ':', 'eq'],
'$ne': ['<>', '!=', 'ne']
}
for key in op_dict_invert.keys():
op_dict_invert[key].append(key)
# associate all the operators to mongodb operators
op_dict = dict((v, k) for k in op_dict_invert for v in op_dict_invert[k])
for k in op_dict_invert.keys():
op_dict[k] = k
return aq(op_dict[op])
def add_operator(operator_str, string):
"""
Parameters
----------
operator_str : str
an operator to be added to the string
string : str
a string shaped like "key:val"
Returns
-------
element : str
"key":{"operator":"val"}
"""
tmp = string.split(':')
key = tmp[0]
val = tmp[1]
element = '%s:{%s:%s}' % (aq(key), format_operator(operator_str), aq(val))
return element
def add_filter(req_filter, request=''):
"""
Parameters
----------
req_filter : str
string shaped like "key":"val" or "key":{"op":"val"}
request : str (optional)
request string shaped like: {"key1":{"op1":"val1"}[,"key2":{"op2":"val2"}]*}
Returns
-------
str
a string shaped like {["key_i":{"op_i":"val_i"},]*, "key", "val"}
"""
if request == "":
return "{%s}" % req_filter
else:
return request[:-1] + ',' + req_filter + '}'
| 31.69403 | 221 | 0.591241 |
2c31aa9c12e7d42b14f4f8ed5b37e9c011cc847f
| 2,744 |
py
|
Python
|
src/sentry/api/endpoints/project_rule_details.py
|
apragacz/sf-sentry
|
2fdd6c1195c29a1d401d1cd538c22ea68556699a
|
[
"BSD-3-Clause"
] | 1 |
2018-03-05T15:40:12.000Z
|
2018-03-05T15:40:12.000Z
|
src/sentry/api/endpoints/project_rule_details.py
|
pkaminski/sentry
|
27e948283e27d93ca5192ca7b580830e092c25c7
|
[
"BSD-3-Clause"
] | 5 |
2020-07-17T11:20:41.000Z
|
2021-05-09T12:16:53.000Z
|
src/sentry/api/endpoints/project_rule_details.py
|
zaasmi/codeerrorhelp
|
1ab8d3e314386b9b2d58dad9df45355bf6014ac9
|
[
"BSD-3-Clause"
] | 2 |
2021-01-26T09:53:39.000Z
|
2022-03-22T09:01:47.000Z
|
from __future__ import absolute_import
from rest_framework import status
from rest_framework.response import Response
from sentry.api.bases.project import ProjectEndpoint, ProjectSettingPermission
from sentry.api.serializers import serialize
from sentry.api.serializers.rest_framework import RuleSerializer
from sentry.models import AuditLogEntryEvent, Rule, RuleStatus
class ProjectRuleDetailsEndpoint(ProjectEndpoint):
permission_classes = [ProjectSettingPermission]
def get(self, request, project, rule_id):
"""
Retrieve a rule
Return details on an individual rule.
{method} {path}
"""
rule = Rule.objects.get(
project=project,
id=rule_id,
status__in=[RuleStatus.ACTIVE, RuleStatus.INACTIVE],
)
return Response(serialize(rule, request.user))
def put(self, request, project, rule_id):
"""
Update a rule
Update various attributes for the given rule.
{method} {path}
{{
"name": "My rule name",
"conditions": [],
"actions": [],
"actionMatch": "all"
}}
"""
rule = Rule.objects.get(
project=project,
id=rule_id,
)
serializer = RuleSerializer(
{
'actionMatch': rule.data.get('action_match') or Rule.DEFAULT_ACTION_MATCH,
'frequency': rule.data.get('frequency') or Rule.DEFAULT_FREQUENCY,
},
context={'project': project},
data=request.DATA,
partial=True
)
if serializer.is_valid():
rule = serializer.save(rule=rule)
self.create_audit_entry(
request=request,
organization=project.organization,
target_object=rule.id,
event=AuditLogEntryEvent.RULE_EDIT,
data=rule.get_audit_log_data(),
)
return Response(serialize(rule, request.user))
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, project, rule_id):
"""
Delete a rule
"""
rule = Rule.objects.get(
project=project,
id=rule_id,
status__in=[RuleStatus.ACTIVE, RuleStatus.INACTIVE],
)
rule.update(status=RuleStatus.PENDING_DELETION)
self.create_audit_entry(
request=request,
organization=project.organization,
target_object=rule.id,
event=AuditLogEntryEvent.RULE_REMOVE,
data=rule.get_audit_log_data(),
)
return Response(status=202)
| 29.505376 | 90 | 0.587464 |
7e7615c3932225cadf7e6d5c9d4ac6c4a7abd714
| 2,030 |
py
|
Python
|
2_DeepLearning-Keras/01_Keras_Boston_Dataset_Regression/network.py
|
felixdittrich92/DeepLearning-tensorflow-keras
|
2880d8ed28ba87f28851affa92b6fa99d2e47be9
|
[
"Apache-2.0"
] | null | null | null |
2_DeepLearning-Keras/01_Keras_Boston_Dataset_Regression/network.py
|
felixdittrich92/DeepLearning-tensorflow-keras
|
2880d8ed28ba87f28851affa92b6fa99d2e47be9
|
[
"Apache-2.0"
] | null | null | null |
2_DeepLearning-Keras/01_Keras_Boston_Dataset_Regression/network.py
|
felixdittrich92/DeepLearning-tensorflow-keras
|
2880d8ed28ba87f28851affa92b6fa99d2e47be9
|
[
"Apache-2.0"
] | null | null | null |
import os
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.datasets import boston_housing
from tensorflow.keras.layers import *
from tensorflow.keras.activations import *
from tensorflow.keras.models import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.initializers import *
# Dataset
(x_train, y_train), (x_test, y_test) = boston_housing.load_data()
x_train = x_train.astype(np.float32)
y_train = y_train.astype(np.float32)
y_train = y_train.reshape(-1, 1)
x_test = x_test.astype(np.float32)
y_test = y_test.astype(np.float32)
y_test = y_test.reshape(-1, 1)
# Dataset Variablen
num_features = x_train.shape[1]
num_targets = y_train.shape[1]
train_size = x_train.shape[0]
test_size = x_test.shape[0]
def r_squared(y_true, y_pred):
numerator = tf.math.reduce_sum(tf.math.square(tf.math.subtract(y_true, y_pred)))
y_true_mean = tf.math.reduce_mean(y_true)
denominator = tf.math.reduce_sum(tf.math.square(tf.math.subtract(y_true, y_true_mean)))
r2 = tf.math.subtract(1.0, tf.math.divide(numerator, denominator))
r2_clipped = tf.clip_by_value(r2, clip_value_min=0.0, clip_value_max=1.0)
return r2_clipped
# Modell Parameter
init_w = RandomUniform(minval=-1.0, maxval=1.0)
init_b = Constant(value=0.0)
lr = 0.005
optimizer = Adam(lr=lr)
epochs = 1000
batch_size = 512
# DNN definieren
model = Sequential()
model.add(Dense(units=16, kernel_initializer=init_w, bias_initializer=init_b, input_shape=(num_features, )))
model.add(Activation("relu"))
model.add(Dense(units=num_targets, kernel_initializer=init_w, bias_initializer=init_b,))
model.summary()
# Modell kompilieren, trainieren und evaluieren
# Netz vorbereiten
model.compile(
loss="mse",
optimizer=optimizer,
metrics=[r_squared])
# Netz trainieren
model.fit(
x=x_train,
y=y_train,
epochs=epochs,
batch_size=batch_size,
validation_data=[x_test, y_test])
# Netz auswerten
score = model.evaluate(x_test, y_test, verbose=0)
print("Score: ", score)
| 29.42029 | 108 | 0.757635 |
0e0a060c49cbcaa9245b858ddbeeb88f20fba6e8
| 2,059 |
py
|
Python
|
homeassistant/util/distance.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 6 |
2017-08-02T19:26:39.000Z
|
2020-03-14T22:47:41.000Z
|
homeassistant/util/distance.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 58 |
2020-08-03T07:33:02.000Z
|
2022-03-31T06:02:05.000Z
|
homeassistant/util/distance.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 14 |
2018-08-19T16:28:26.000Z
|
2021-09-02T18:26:53.000Z
|
"""Distance util functions."""
from numbers import Number
from typing import Callable, Dict
from homeassistant.const import (
LENGTH,
LENGTH_CENTIMETERS,
LENGTH_FEET,
LENGTH_INCHES,
LENGTH_KILOMETERS,
LENGTH_METERS,
LENGTH_MILES,
LENGTH_MILLIMETERS,
LENGTH_YARD,
UNIT_NOT_RECOGNIZED_TEMPLATE,
)
VALID_UNITS = [
LENGTH_KILOMETERS,
LENGTH_MILES,
LENGTH_FEET,
LENGTH_METERS,
LENGTH_CENTIMETERS,
LENGTH_MILLIMETERS,
LENGTH_INCHES,
LENGTH_YARD,
]
TO_METERS: Dict[str, Callable[[float], float]] = {
LENGTH_METERS: lambda meters: meters,
LENGTH_MILES: lambda miles: miles * 1609.344,
LENGTH_YARD: lambda yards: yards * 0.9144,
LENGTH_FEET: lambda feet: feet * 0.3048,
LENGTH_INCHES: lambda inches: inches * 0.0254,
LENGTH_KILOMETERS: lambda kilometers: kilometers * 1000,
LENGTH_CENTIMETERS: lambda centimeters: centimeters * 0.01,
LENGTH_MILLIMETERS: lambda millimeters: millimeters * 0.001,
}
METERS_TO: Dict[str, Callable[[float], float]] = {
LENGTH_METERS: lambda meters: meters,
LENGTH_MILES: lambda meters: meters * 0.000621371,
LENGTH_YARD: lambda meters: meters * 1.09361,
LENGTH_FEET: lambda meters: meters * 3.28084,
LENGTH_INCHES: lambda meters: meters * 39.3701,
LENGTH_KILOMETERS: lambda meters: meters * 0.001,
LENGTH_CENTIMETERS: lambda meters: meters * 100,
LENGTH_MILLIMETERS: lambda meters: meters * 1000,
}
def convert(value: float, unit_1: str, unit_2: str) -> float:
"""Convert one unit of measurement to another."""
if unit_1 not in VALID_UNITS:
raise ValueError(UNIT_NOT_RECOGNIZED_TEMPLATE.format(unit_1, LENGTH))
if unit_2 not in VALID_UNITS:
raise ValueError(UNIT_NOT_RECOGNIZED_TEMPLATE.format(unit_2, LENGTH))
if not isinstance(value, Number):
raise TypeError(f"{value} is not of numeric type")
if unit_1 == unit_2 or unit_1 not in VALID_UNITS:
return value
meters: float = TO_METERS[unit_1](value)
return METERS_TO[unit_2](meters)
| 30.279412 | 77 | 0.71151 |
fc9b11d0b00cef74b0cd59b2601562a1a9b2c117
| 608 |
py
|
Python
|
env/lib/python3.8/site-packages/plotly/validators/choropleth/colorbar/_tickmode.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 76 |
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
env/lib/python3.8/site-packages/plotly/validators/choropleth/colorbar/_tickmode.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11 |
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
env/lib/python3.8/site-packages/plotly/validators/choropleth/colorbar/_tickmode.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11 |
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
import _plotly_utils.basevalidators
class TickmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="tickmode", parent_name="choropleth.colorbar", **kwargs
):
super(TickmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
implied_edits=kwargs.pop("implied_edits", {}),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["auto", "linear", "array"]),
**kwargs
)
| 35.764706 | 81 | 0.628289 |
a97e8c65d0664ba462bef59b6b9b1c171bdefb52
| 12,176 |
py
|
Python
|
training/training_code/deeplab/vis.py
|
yil532/MAX-Image-Segmenter
|
a9e51645061f15b49b71bf1d7c6682c59843517b
|
[
"Apache-2.0"
] | 34 |
2018-07-11T23:38:45.000Z
|
2022-03-29T03:06:26.000Z
|
training/training_code/deeplab/vis.py
|
yil532/MAX-Image-Segmenter
|
a9e51645061f15b49b71bf1d7c6682c59843517b
|
[
"Apache-2.0"
] | 48 |
2018-07-30T19:15:49.000Z
|
2022-03-11T23:59:24.000Z
|
training/training_code/deeplab/vis.py
|
yil532/MAX-Image-Segmenter
|
a9e51645061f15b49b71bf1d7c6682c59843517b
|
[
"Apache-2.0"
] | 23 |
2018-07-30T20:29:26.000Z
|
2021-12-27T08:21:12.000Z
|
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Segmentation results visualization on a given set of images.
See model.py for more details and usage.
"""
import os.path
import time
import numpy as np
import tensorflow as tf
from deeplab import common
from deeplab import model
from deeplab.datasets import data_generator
from deeplab.utils import save_annotation
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('master', '', 'BNS name of the tensorflow server')
# Settings for log directories.
flags.DEFINE_string('vis_logdir', None, 'Where to write the event logs.')
flags.DEFINE_string('checkpoint_dir', None, 'Directory of model checkpoints.')
# Settings for visualizing the model.
flags.DEFINE_integer('vis_batch_size', 1,
'The number of images in each batch during evaluation.')
flags.DEFINE_multi_integer('vis_crop_size', [513, 513],
'Crop size [height, width] for visualization.')
flags.DEFINE_integer('eval_interval_secs', 60 * 5,
'How often (in seconds) to run evaluation.')
# For `xception_65`, use atrous_rates = [12, 24, 36] if output_stride = 8, or
# rates = [6, 12, 18] if output_stride = 16. For `mobilenet_v2`, use None. Note
# one could use different atrous_rates/output_stride during training/evaluation.
flags.DEFINE_multi_integer('atrous_rates', None,
'Atrous rates for atrous spatial pyramid pooling.')
flags.DEFINE_integer('output_stride', 16,
'The ratio of input to output spatial resolution.')
# Change to [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] for multi-scale test.
flags.DEFINE_multi_float('eval_scales', [1.0],
'The scales to resize images for evaluation.')
# Change to True for adding flipped images during test.
flags.DEFINE_bool('add_flipped_images', False,
'Add flipped images for evaluation or not.')
# Dataset settings.
flags.DEFINE_string('dataset', 'pqr',
'Name of the segmentation dataset.')
flags.DEFINE_string('vis_split', 'val',
'Which split of the dataset used for visualizing results')
flags.DEFINE_string('dataset_dir', None, 'Where the dataset reside.')
flags.DEFINE_enum('colormap_type', 'pascal', ['pascal', 'cityscapes'],
'Visualization colormap type.')
flags.DEFINE_boolean('also_save_raw_predictions', False,
'Also save raw predictions.')
flags.DEFINE_integer('max_number_of_iterations', 0,
'Maximum number of visualization iterations. Will loop '
'indefinitely upon nonpositive values.')
# The folder where semantic segmentation predictions are saved.
_SEMANTIC_PREDICTION_SAVE_FOLDER = 'segmentation_results'
# The folder where raw semantic segmentation predictions are saved.
_RAW_SEMANTIC_PREDICTION_SAVE_FOLDER = 'raw_segmentation_results'
# The format to save image.
_IMAGE_FORMAT = '%06d_image'
# The format to save prediction
_PREDICTION_FORMAT = '%06d_prediction'
# To evaluate Cityscapes results on the evaluation server, the labels used
# during training should be mapped to the labels for evaluation.
_CITYSCAPES_TRAIN_ID_TO_EVAL_ID = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 31, 32, 33]
def _convert_train_id_to_eval_id(prediction, train_id_to_eval_id):
"""Converts the predicted label for evaluation.
There are cases where the training labels are not equal to the evaluation
labels. This function is used to perform the conversion so that we could
evaluate the results on the evaluation server.
Args:
prediction: Semantic segmentation prediction.
train_id_to_eval_id: A list mapping from train id to evaluation id.
Returns:
Semantic segmentation prediction whose labels have been changed.
"""
converted_prediction = prediction.copy()
for train_id, eval_id in enumerate(train_id_to_eval_id):
converted_prediction[prediction == train_id] = eval_id
return converted_prediction
def _process_batch(sess, original_images, semantic_predictions, image_names,
image_heights, image_widths, image_id_offset, save_dir,
raw_save_dir, train_id_to_eval_id=None):
"""Evaluates one single batch qualitatively.
Args:
sess: TensorFlow session.
original_images: One batch of original images.
semantic_predictions: One batch of semantic segmentation predictions.
image_names: Image names.
image_heights: Image heights.
image_widths: Image widths.
image_id_offset: Image id offset for indexing images.
save_dir: The directory where the predictions will be saved.
raw_save_dir: The directory where the raw predictions will be saved.
train_id_to_eval_id: A list mapping from train id to eval id.
"""
(original_images,
semantic_predictions,
image_names,
image_heights,
image_widths) = sess.run([original_images, semantic_predictions,
image_names, image_heights, image_widths])
num_image = semantic_predictions.shape[0]
for i in range(num_image):
image_height = np.squeeze(image_heights[i])
image_width = np.squeeze(image_widths[i])
original_image = np.squeeze(original_images[i])
semantic_prediction = np.squeeze(semantic_predictions[i])
crop_semantic_prediction = semantic_prediction[:image_height, :image_width]
# Save image.
save_annotation.save_annotation(
original_image, save_dir, _IMAGE_FORMAT % (image_id_offset + i),
add_colormap=False)
# Save prediction.
save_annotation.save_annotation(
crop_semantic_prediction, save_dir,
_PREDICTION_FORMAT % (image_id_offset + i), add_colormap=True,
colormap_type=FLAGS.colormap_type)
if FLAGS.also_save_raw_predictions:
image_filename = os.path.basename(image_names[i])
if train_id_to_eval_id is not None:
crop_semantic_prediction = _convert_train_id_to_eval_id(
crop_semantic_prediction,
train_id_to_eval_id)
save_annotation.save_annotation(
crop_semantic_prediction, raw_save_dir, image_filename,
add_colormap=False)
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
# Get dataset-dependent information.
dataset = data_generator.Dataset(
dataset_name=FLAGS.dataset,
split_name=FLAGS.vis_split,
dataset_dir=FLAGS.dataset_dir,
batch_size=FLAGS.vis_batch_size,
crop_size=FLAGS.vis_crop_size,
min_resize_value=FLAGS.min_resize_value,
max_resize_value=FLAGS.max_resize_value,
resize_factor=FLAGS.resize_factor,
model_variant=FLAGS.model_variant,
is_training=False,
should_shuffle=False,
should_repeat=False)
train_id_to_eval_id = None
if dataset.dataset_name == data_generator.get_cityscapes_dataset_name():
tf.logging.info('Cityscapes requires converting train_id to eval_id.')
train_id_to_eval_id = _CITYSCAPES_TRAIN_ID_TO_EVAL_ID
# Prepare for visualization.
tf.gfile.MakeDirs(FLAGS.vis_logdir)
save_dir = os.path.join(FLAGS.vis_logdir, _SEMANTIC_PREDICTION_SAVE_FOLDER)
tf.gfile.MakeDirs(save_dir)
raw_save_dir = os.path.join(
FLAGS.vis_logdir, _RAW_SEMANTIC_PREDICTION_SAVE_FOLDER)
tf.gfile.MakeDirs(raw_save_dir)
tf.logging.info('Visualizing on %s set', FLAGS.vis_split)
with tf.Graph().as_default():
samples = dataset.get_one_shot_iterator().get_next()
model_options = common.ModelOptions(
outputs_to_num_classes={common.OUTPUT_TYPE: dataset.num_of_classes},
crop_size=FLAGS.vis_crop_size,
atrous_rates=FLAGS.atrous_rates,
output_stride=FLAGS.output_stride)
if tuple(FLAGS.eval_scales) == (1.0,):
tf.logging.info('Performing single-scale test.')
predictions = model.predict_labels(
samples[common.IMAGE],
model_options=model_options,
image_pyramid=FLAGS.image_pyramid)
else:
tf.logging.info('Performing multi-scale test.')
predictions = model.predict_labels_multi_scale(
samples[common.IMAGE],
model_options=model_options,
eval_scales=FLAGS.eval_scales,
add_flipped_images=FLAGS.add_flipped_images)
predictions = predictions[common.OUTPUT_TYPE]
if FLAGS.min_resize_value and FLAGS.max_resize_value:
# Only support batch_size = 1, since we assume the dimensions of original
# image after tf.squeeze is [height, width, 3].
assert FLAGS.vis_batch_size == 1
# Reverse the resizing and padding operations performed in preprocessing.
# First, we slice the valid regions (i.e., remove padded region) and then
# we resize the predictions back.
original_image = tf.squeeze(samples[common.ORIGINAL_IMAGE])
original_image_shape = tf.shape(original_image)
predictions = tf.slice(
predictions,
[0, 0, 0],
[1, original_image_shape[0], original_image_shape[1]])
resized_shape = tf.to_int32([tf.squeeze(samples[common.HEIGHT]),
tf.squeeze(samples[common.WIDTH])])
predictions = tf.squeeze(
tf.image.resize_images(tf.expand_dims(predictions, 3),
resized_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True), 3)
num_iteration = 0
max_num_iteration = FLAGS.max_number_of_iterations
checkpoints_iterator = [FLAGS.checkpoint_dir]
for checkpoint_path in checkpoints_iterator:
print(checkpoint_path)
if max_num_iteration > 0 and num_iteration > max_num_iteration:
break
num_iteration += 1
tf.logging.info(
'Starting visualization at ' + time.strftime('%Y-%m-%d-%H:%M:%S',
time.gmtime()))
tf.logging.info('Visualizing with model %s', checkpoint_path)
tf.train.get_or_create_global_step()
scaffold = tf.train.Scaffold(init_op=tf.global_variables_initializer())
session_creator = tf.train.ChiefSessionCreator(
scaffold=scaffold,
master=FLAGS.master,
checkpoint_filename_with_path=checkpoint_path)
with tf.train.MonitoredSession(
session_creator=session_creator, hooks=None) as sess:
batch = 0
image_id_offset = 0
while not sess.should_stop():
tf.logging.info('Visualizing batch %d', batch + 1)
_process_batch(sess=sess,
original_images=samples[common.ORIGINAL_IMAGE],
semantic_predictions=predictions,
image_names=samples[common.IMAGE_NAME],
image_heights=samples[common.HEIGHT],
image_widths=samples[common.WIDTH],
image_id_offset=image_id_offset,
save_dir=save_dir,
raw_save_dir=raw_save_dir,
train_id_to_eval_id=train_id_to_eval_id)
image_id_offset += FLAGS.vis_batch_size
batch += 1
tf.logging.info(
'Finished visualization at ' + time.strftime('%Y-%m-%d-%H:%M:%S',
time.gmtime()))
if __name__ == '__main__':
flags.mark_flag_as_required('checkpoint_dir')
flags.mark_flag_as_required('vis_logdir')
flags.mark_flag_as_required('dataset_dir')
tf.app.run()
| 38.900958 | 80 | 0.687582 |
18ab570289aa5e765be96a05a2bb697f8e8139c8
| 3,720 |
py
|
Python
|
Bio/SCOP/Dom.py
|
barwil/biopython
|
477a5efc1325d5158ac1d7bbd11adcb8efe9ed5e
|
[
"PostgreSQL"
] | 1 |
2016-05-09T08:14:49.000Z
|
2016-05-09T08:14:49.000Z
|
Bio/SCOP/Dom.py
|
barwil/biopython
|
477a5efc1325d5158ac1d7bbd11adcb8efe9ed5e
|
[
"PostgreSQL"
] | null | null | null |
Bio/SCOP/Dom.py
|
barwil/biopython
|
477a5efc1325d5158ac1d7bbd11adcb8efe9ed5e
|
[
"PostgreSQL"
] | 6 |
2020-02-26T16:34:20.000Z
|
2020-03-04T15:34:00.000Z
|
# Copyright 2000 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
# Gavin E. Crooks 2001-11-07 :
# Interface and comments modified to reflect changes to the SCOP
# module, and to SCOP itself.
""" Handle the SCOP DOMain file.
The DOM file has been officially deprecated. For more information see
the SCOP"release notes.":http://scop.berkeley.edu/release-notes-1.55.html
The DOM files for older releases can be found
"elsewhere at SCOP.":http://scop.mrc-lmb.cam.ac.uk/scop/parse/
"""
from Residues import Residues
class Record:
"""Holds information for one SCOP domain.
sid -- The SCOP ID of the entry, e.g. d1anu1
residues -- The domain definition as a Residues object
hierarchy -- A string specifying where this domain is in the hierarchy.
"""
def __init__(self, line=None):
self.sid = ''
self.residues = []
self.hierarchy = ''
if line:
self._process(line)
def _process(self, line):
"""Parses DOM records.
Records consist of 4 tab deliminated fields;
sid, pdbid, residues, hierarchy
"""
#For example ::
#
#d1sctg_ 1sct g: 1.001.001.001.001.001
#d1scth_ 1sct h: 1.001.001.001.001.001
#d1flp__ 1flp - 1.001.001.001.001.002
#d1moh__ 1moh - 1.001.001.001.001.002
line = line.rstrip() # no trailing whitespace
columns = line.split("\t") # separate the tab-delineated cols
if len(columns) != 4:
raise ValueError("I don't understand the format of %s" % line)
self.sid, pdbid, res, self.hierarchy = columns
self.residues = Residues(res)
self.residues.pdbid =pdbid
def __str__(self):
s = []
s.append(self.sid)
s.append(str(self.residues).replace(" ","\t") )
s.append(self.hierarchy)
return "\t".join(s) + "\n"
def parse(handle):
"""Iterates over a DOM file, returning a Dom record for each line
in the file.
Arguments:
handle -- file-like object.
"""
for line in handle:
yield Record(line)
class Iterator:
"""Iterates over a DOM file.
"""
def __init__(self, handle, parser=None):
"""Create an object that iterates over a DES file.
handle -- file-like object.
parser -- an optional Parser object to change the results into
another form. If set to None, then the raw contents
of the file will be returned.
"""
import warnings
warnings.warn("Bio.SCOP.Dom.Iterator is deprecated. Please use Bio.SCOP.Dom.parse() instead.", DeprecationWarning)
from types import FileType, InstanceType
if type(handle) is not FileType and type(handle) is not InstanceType:
raise ValueError("I expected a file handle or file-like object")
self._handle = handle
self._parser = parser
def next(self):
line = self._handle.readline()
if not line:
return None
if self._parser is not None:
return self._parser.parse(line)
return line
class Parser:
def parse(self, entry):
"""Returns a Dom.Record """
import warnings
warnings.warn("""Bio.SCOP.Dom.Parser is deprecated.
Instead of
parser = Dom.Parser()
record = parser.parse(entry)
please use
record = Dom.Record(entry)
""", DeprecationWarning)
return Record(entry)
| 30.243902 | 122 | 0.607527 |
a3da2b9d3e5d066dc5249b2b87a3ad92a434bfcf
| 12,422 |
py
|
Python
|
ui/app/base/routes.py
|
ccamacho/kubeinit
|
d9098b6545627074cf83f5c9a56ad752995dea30
|
[
"Apache-2.0"
] | 36 |
2020-08-01T07:31:15.000Z
|
2020-09-04T05:39:32.000Z
|
ui/app/base/routes.py
|
ccamacho/kubeinit
|
d9098b6545627074cf83f5c9a56ad752995dea30
|
[
"Apache-2.0"
] | 9 |
2020-08-03T17:07:29.000Z
|
2020-08-28T14:26:53.000Z
|
ui/app/base/routes.py
|
ccamacho/kubeinit
|
d9098b6545627074cf83f5c9a56ad752995dea30
|
[
"Apache-2.0"
] | 14 |
2020-08-02T01:07:37.000Z
|
2020-08-25T13:14:02.000Z
|
#!/usr/bin/env python
"""
Copyright kubeinit contributors.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
from datetime import date
import app
from app import version as kubeinit_ui_version
from app.base import blueprint
from app.base.k8sclient import (cluster_name_configured,
state_namespaces,
state_nodes,
state_pods,
web_terminal)
from flask import jsonify, redirect, render_template, request, url_for
from google.cloud import firestore
# , session
# from flask_login import (current_user,
# login_required,
# login_user,
# logout_user)
from models import DataCenter, db
from pystol.lister import list_actions, show_actions
KUBEINIT_VERSION = kubeinit_ui_version.__version__
#
# Begin authentication
#
try:
from app.auth.routes import get_session_data
# from app.auth.util import remote_cluster
except ImportError:
print("Module not available")
try:
fdb = firestore.Client()
transaction = fdb.transaction()
except Exception as e:
print("Cant connect to firestore: %s" % (e))
#
# End authentication
#
@blueprint.route('/error-<error>')
def route_errors(error):
"""
Define a route.
This is a main routing method
"""
#
# Basic authentication module requirement
# If the auth module is installed and the user is not authenticated, so go to login
#
session = {}
if hasattr(app, 'auth'):
try:
session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))
except Exception as e:
print(e)
return redirect(url_for('auth_blueprint.login'))
else:
session['kubeconfig'] = None
# not current_user.is_authenticated:
if hasattr(app, 'auth') and session['email'] is None:
return redirect(url_for('auth_blueprint.login'))
#
# End basic authentication requirement
#
return render_template('errors/{}.html'.format(error))
# API endpoints
@blueprint.route('/api/v1/ListActions', methods=['GET'])
def api_list_actions():
"""
Define a route.
This is a main routing method
"""
#
# Basic authentication module requirement
# If the auth module is installed and the user is not authenticated, so go to login
#
session = {}
if hasattr(app, 'auth'):
try:
session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))
except Exception as e:
print(e)
return redirect(url_for('auth_blueprint.login'))
else:
session['kubeconfig'] = None
# not current_user.is_authenticated:
if hasattr(app, 'auth') and session['email'] is None:
return redirect(url_for('auth_blueprint.login'))
#
# End basic authentication requirement
#
return jsonify(list_actions())
@blueprint.route('/api/v1/ShowActions', methods=['GET'])
def api_show_actions():
"""
Define a route.
This is a main routing method
"""
#
# Basic authentication module requirement
# If the auth module is installed and the user is not authenticated, so go to login
#
session = {}
if hasattr(app, 'auth'):
try:
session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))
except Exception as e:
print(e)
return redirect(url_for('auth_blueprint.login'))
else:
session['kubeconfig'] = None
# not current_user.is_authenticated:
if hasattr(app, 'auth') and session['email'] is None:
return redirect(url_for('auth_blueprint.login'))
#
# End basic authentication requirement
#
return jsonify(show_actions())
@blueprint.route('/api/v1/StateNamespaces', methods=['GET'])
def api_state_namespaces():
"""
Define a route.
This is a main routing method
"""
#
# Basic authentication module requirement
# If the auth module is installed and the user is not authenticated, so go to login
#
session = {}
if hasattr(app, 'auth'):
try:
session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))
except Exception as e:
print(e)
return redirect(url_for('auth_blueprint.login'))
else:
session['kubeconfig'] = None
# not current_user.is_authenticated:
if hasattr(app, 'auth') and session['email'] is None:
return redirect(url_for('auth_blueprint.login'))
#
# End basic authentication requirement
#
return jsonify(state_namespaces())
@blueprint.route('/api/v1/StateNodes', methods=['GET'])
def api_state_nodes():
"""
Define a route.
This is a main routing method
"""
#
# Basic authentication module requirement
# If the auth module is installed and the user is not authenticated, so go to login
#
session = {}
if hasattr(app, 'auth'):
try:
session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))
except Exception as e:
print(e)
return redirect(url_for('auth_blueprint.login'))
else:
session['kubeconfig'] = None
# not current_user.is_authenticated:
if hasattr(app, 'auth') and session['email'] is None:
return redirect(url_for('auth_blueprint.login'))
#
# End basic authentication requirement
#
return jsonify(state_nodes())
@blueprint.route('/api/v1/StatePods', methods=['GET'])
def api_state_pods():
"""
Define a route.
This is a main routing method
"""
#
# Basic authentication module requirement
# If the auth module is installed and the user is not authenticated, so go to login
#
session = {}
if hasattr(app, 'auth'):
try:
session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))
except Exception as e:
print(e)
return redirect(url_for('auth_blueprint.login'))
else:
session['kubeconfig'] = None
# not current_user.is_authenticated:
if hasattr(app, 'auth') and session['email'] is None:
return redirect(url_for('auth_blueprint.login'))
#
# End basic authentication requirement
#
return jsonify(state_pods())
@blueprint.route('/api/v1/Terminal', methods=['GET'])
def api_web_terminal():
"""
Define a route.
This is a main routing method
"""
#
# Basic authentication module requirement
# If the auth module is installed and the user is not authenticated, so go to login
#
session = {}
if hasattr(app, 'auth'):
try:
session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))
except Exception as e:
print(e)
return redirect(url_for('auth_blueprint.login'))
else:
session['kubeconfig'] = None
# not current_user.is_authenticated:
if hasattr(app, 'auth') and session['email'] is None:
return redirect(url_for('auth_blueprint.login'))
#
# End basic authentication requirement
#
return jsonify(web_terminal())
@blueprint.route('/api/v1/ClusterName', methods=['GET'])
def api_cluster_name_configured():
"""
Define a route.
This is a main routing method
"""
#
# Basic authentication module requirement
# If the auth module is installed and the user is not authenticated, so go to login
#
session = {}
if hasattr(app, 'auth'):
try:
session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))
except Exception as e:
print(e)
return redirect(url_for('auth_blueprint.login'))
else:
session['kubeconfig'] = None
# not current_user.is_authenticated:
if hasattr(app, 'auth') and session['email'] is None:
return redirect(url_for('auth_blueprint.login'))
#
# End basic authentication requirement
#
return jsonify(cluster_name_configured())
@blueprint.route('/shutdown')
def shutdown():
"""
Define a route.
This is a main routing method
"""
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
return 'Server shutting down...'
@blueprint.errorhandler(404)
def not_found_error(error):
"""
Define a route.
This is a main routing method
"""
return render_template('page-404.html',
template_folder="../home/templates/"), 404
@blueprint.errorhandler(404)
def internal_error(error):
"""
Define a route.
This is a main routing method
"""
return render_template('page-500.html',
template_folder="../home/templates/"), 500
@blueprint.route('/api/v1/DataCenters', methods=['GET'])
def get_datacenters():
"""
Define a route.
This is a main routing method for getting the datacenters
"""
#
# Basic authentication module requirement
# If the auth module is installed and the user is not authenticated, so go to login
#
session = {}
if hasattr(app, 'auth'):
try:
session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))
except Exception as e:
print(e)
return redirect(url_for('auth_blueprint.login'))
else:
session['kubeconfig'] = None
# not current_user.is_authenticated:
if hasattr(app, 'auth') and session['email'] is None:
return redirect(url_for('auth_blueprint.login'))
#
# End basic authentication requirement
#
print("------ Get DataCenters ------")
return jsonify(DataCenter.query.all())
# return jsonify({'hola':'mundo'})
@blueprint.route('/api/v1/AddDataCenter', methods=['GET'])
def add_datacenters():
"""
Define a route.
This is a main routing method for getting the datacenters
"""
#
# Basic authentication module requirement
# If the auth module is installed and the user is not authenticated, so go to login
#
session = {}
if hasattr(app, 'auth'):
try:
session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))
except Exception as e:
print(e)
return redirect(url_for('auth_blueprint.login'))
else:
session['kubeconfig'] = None
# not current_user.is_authenticated:
if hasattr(app, 'auth') and session['email'] is None:
return redirect(url_for('auth_blueprint.login'))
#
# End basic authentication requirement
#
cluster_1 = DataCenter(availability_zone='john',
airport_name='john',
name='john',
created=date.today(),
location='john')
db.session.add(cluster_1)
db.session.commit()
print("------ Get DataCenters ------")
dc = DataCenter.query.all()
print(dc)
# return jsonify(DataCenter.query.all())
return jsonify({'hola': 'mundo'})
# Errors
# @login_manager.unauthorized_handler
# def unauthorized_handler():
# """
# Define a route.
#
# This is a main routing method
# """
# return render_template('page-403.html',
# template_folder="../home/templates/"), 403
# @blueprint.errorhandler(403)
# def access_forbidden(error):
# """
# Define a route.
#
# This is a main routing method
# """
# return render_template('page-403.html',
# template_folder="../home/templates/"), 403
| 28.360731 | 109 | 0.636612 |
5701fc98039f80a6a91273b94ae5eea8fb43af1e
| 21,003 |
py
|
Python
|
research/object_detection/data_decoders/tf_example_decoder.py
|
taegoobot/models
|
2c54560546b17d3766a12f248e5a57f5e65995a8
|
[
"Apache-2.0"
] | null | null | null |
research/object_detection/data_decoders/tf_example_decoder.py
|
taegoobot/models
|
2c54560546b17d3766a12f248e5a57f5e65995a8
|
[
"Apache-2.0"
] | null | null | null |
research/object_detection/data_decoders/tf_example_decoder.py
|
taegoobot/models
|
2c54560546b17d3766a12f248e5a57f5e65995a8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow Example proto decoder for object detection.
A decoder to decode string tensors containing serialized tensorflow.Example
protos for object detection.
"""
import tensorflow as tf
from object_detection.core import data_decoder
from object_detection.core import standard_fields as fields
from object_detection.protos import input_reader_pb2
from object_detection.utils import label_map_util
slim_example_decoder = tf.contrib.slim.tfexample_decoder
class _ClassTensorHandler(slim_example_decoder.Tensor):
"""An ItemHandler to fetch class ids from class text."""
def __init__(self,
tensor_key,
label_map_proto_file,
shape_keys=None,
shape=None,
default_value=''):
"""Initializes the LookupTensor handler.
Simply calls a vocabulary (most often, a label mapping) lookup.
Args:
tensor_key: the name of the `TFExample` feature to read the tensor from.
label_map_proto_file: File path to a text format LabelMapProto message
mapping class text to id.
shape_keys: Optional name or list of names of the TF-Example feature in
which the tensor shape is stored. If a list, then each corresponds to
one dimension of the shape.
shape: Optional output shape of the `Tensor`. If provided, the `Tensor` is
reshaped accordingly.
default_value: The value used when the `tensor_key` is not found in a
particular `TFExample`.
Raises:
ValueError: if both `shape_keys` and `shape` are specified.
"""
name_to_id = label_map_util.get_label_map_dict(
label_map_proto_file, use_display_name=False)
# We use a default_value of -1, but we expect all labels to be contained
# in the label map.
name_to_id_table = tf.contrib.lookup.HashTable(
initializer=tf.contrib.lookup.KeyValueTensorInitializer(
keys=tf.constant(list(name_to_id.keys())),
values=tf.constant(list(name_to_id.values()), dtype=tf.int64)),
default_value=-1)
display_name_to_id = label_map_util.get_label_map_dict(
label_map_proto_file, use_display_name=True)
# We use a default_value of -1, but we expect all labels to be contained
# in the label map.
display_name_to_id_table = tf.contrib.lookup.HashTable(
initializer=tf.contrib.lookup.KeyValueTensorInitializer(
keys=tf.constant(list(display_name_to_id.keys())),
values=tf.constant(
list(display_name_to_id.values()), dtype=tf.int64)),
default_value=-1)
self._name_to_id_table = name_to_id_table
self._display_name_to_id_table = display_name_to_id_table
super(_ClassTensorHandler, self).__init__(tensor_key, shape_keys, shape,
default_value)
def tensors_to_item(self, keys_to_tensors):
unmapped_tensor = super(_ClassTensorHandler,
self).tensors_to_item(keys_to_tensors)
return tf.maximum(self._name_to_id_table.lookup(unmapped_tensor),
self._display_name_to_id_table.lookup(unmapped_tensor))
class _BackupHandler(slim_example_decoder.ItemHandler):
"""An ItemHandler that tries two ItemHandlers in order."""
def __init__(self, handler, backup):
"""Initializes the BackupHandler handler.
If the first Handler's tensors_to_item returns a Tensor with no elements,
the second Handler is used.
Args:
handler: The primary ItemHandler.
backup: The backup ItemHandler.
Raises:
ValueError: if either is not an ItemHandler.
"""
if not isinstance(handler, slim_example_decoder.ItemHandler):
raise ValueError('Primary handler is of type %s instead of ItemHandler' %
type(handler))
if not isinstance(backup, slim_example_decoder.ItemHandler):
raise ValueError(
'Backup handler is of type %s instead of ItemHandler' % type(backup))
self._handler = handler
self._backup = backup
super(_BackupHandler, self).__init__(handler.keys + backup.keys)
def tensors_to_item(self, keys_to_tensors):
item = self._handler.tensors_to_item(keys_to_tensors)
return tf.cond(
pred=tf.equal(tf.reduce_prod(tf.shape(item)), 0),
true_fn=lambda: self._backup.tensors_to_item(keys_to_tensors),
false_fn=lambda: item)
class TfExampleDecoder(data_decoder.DataDecoder):
"""Tensorflow Example proto decoder."""
def __init__(self,
load_instance_masks=False,
instance_mask_type=input_reader_pb2.NUMERICAL_MASKS,
label_map_proto_file=None,
use_display_name=False,
dct_method='',
num_keypoints=0,
num_additional_channels=0):
"""Constructor sets keys_to_features and items_to_handlers.
Args:
load_instance_masks: whether or not to load and handle instance masks.
instance_mask_type: type of instance masks. Options are provided in
input_reader.proto. This is only used if `load_instance_masks` is True.
label_map_proto_file: a file path to a
object_detection.protos.StringIntLabelMap proto. If provided, then the
mapped IDs of 'image/object/class/text' will take precedence over the
existing 'image/object/class/label' ID. Also, if provided, it is
assumed that 'image/object/class/text' will be in the data.
use_display_name: whether or not to use the `display_name` for label
mapping (instead of `name`). Only used if label_map_proto_file is
provided.
dct_method: An optional string. Defaults to None. It only takes
effect when image format is jpeg, used to specify a hint about the
algorithm used for jpeg decompression. Currently valid values
are ['INTEGER_FAST', 'INTEGER_ACCURATE']. The hint may be ignored, for
example, the jpeg library does not have that specific option.
num_keypoints: the number of keypoints per object.
num_additional_channels: how many additional channels to use.
Raises:
ValueError: If `instance_mask_type` option is not one of
input_reader_pb2.DEFAULT, input_reader_pb2.NUMERICAL, or
input_reader_pb2.PNG_MASKS.
"""
# TODO(rathodv): delete unused `use_display_name` argument once we change
# other decoders to handle label maps similarly.
del use_display_name
self.keys_to_features = {
'image/encoded':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/format':
tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/filename':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/key/sha256':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/source_id':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/gsd':
tf.FixedLenFeature((), tf.float32, default_value=1.0),
'image/height':
tf.FixedLenFeature((), tf.int64, default_value=1),
'image/width':
tf.FixedLenFeature((), tf.int64, default_value=1),
# Image-level labels.
'image/class/text':
tf.VarLenFeature(tf.string),
'image/class/label':
tf.VarLenFeature(tf.int64),
# Object boxes and classes.
'image/object/bbox/xmin':
tf.VarLenFeature(tf.float32),
'image/object/bbox/xmax':
tf.VarLenFeature(tf.float32),
'image/object/bbox/ymin':
tf.VarLenFeature(tf.float32),
'image/object/bbox/ymax':
tf.VarLenFeature(tf.float32),
'image/object/class/label':
tf.VarLenFeature(tf.int64),
'image/object/class/text':
tf.VarLenFeature(tf.string),
'image/object/area':
tf.VarLenFeature(tf.float32),
'image/object/is_crowd':
tf.VarLenFeature(tf.int64),
'image/object/difficult':
tf.VarLenFeature(tf.int64),
'image/object/group_of':
tf.VarLenFeature(tf.int64),
'image/object/weight':
tf.VarLenFeature(tf.float32),
}
# We are checking `dct_method` instead of passing it directly in order to
# ensure TF version 1.6 compatibility.
if dct_method:
image = slim_example_decoder.Image(
image_key='image/encoded',
format_key='image/format',
channels=3,
dct_method=dct_method)
additional_channel_image = slim_example_decoder.Image(
image_key='image/additional_channels/encoded',
format_key='image/format',
channels=1,
repeated=True,
dct_method=dct_method)
else:
image = slim_example_decoder.Image(
image_key='image/encoded', format_key='image/format', channels=3)
additional_channel_image = slim_example_decoder.Image(
image_key='image/additional_channels/encoded',
format_key='image/format',
channels=1,
repeated=True)
self.items_to_handlers = {
fields.InputDataFields.image:
image,
fields.InputDataFields.source_id: (
slim_example_decoder.Tensor('image/source_id')),
fields.InputDataFields.key: (
slim_example_decoder.Tensor('image/key/sha256')),
fields.InputDataFields.filename: (
slim_example_decoder.Tensor('image/filename')),
fields.InputDataFields.groundtruth_gsd: (
slim_example_decoder.Tensor('image/gsd')),
# Object boxes and classes.
fields.InputDataFields.groundtruth_boxes: (
slim_example_decoder.BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'],
'image/object/bbox/')),
fields.InputDataFields.groundtruth_area:
slim_example_decoder.Tensor('image/object/area'),
fields.InputDataFields.groundtruth_is_crowd: (
slim_example_decoder.Tensor('image/object/is_crowd')),
fields.InputDataFields.groundtruth_difficult: (
slim_example_decoder.Tensor('image/object/difficult')),
fields.InputDataFields.groundtruth_group_of: (
slim_example_decoder.Tensor('image/object/group_of')),
fields.InputDataFields.groundtruth_weights: (
slim_example_decoder.Tensor('image/object/weight')),
}
if num_additional_channels > 0:
self.keys_to_features[
'image/additional_channels/encoded'] = tf.FixedLenFeature(
(num_additional_channels,), tf.string)
self.items_to_handlers[
fields.InputDataFields.
image_additional_channels] = additional_channel_image
self._num_keypoints = num_keypoints
if num_keypoints > 0:
self.keys_to_features['image/object/keypoint/x'] = (
tf.VarLenFeature(tf.float32))
self.keys_to_features['image/object/keypoint/y'] = (
tf.VarLenFeature(tf.float32))
self.items_to_handlers[fields.InputDataFields.groundtruth_keypoints] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/keypoint/y', 'image/object/keypoint/x'],
self._reshape_keypoints))
if load_instance_masks:
if instance_mask_type in (input_reader_pb2.DEFAULT,
input_reader_pb2.NUMERICAL_MASKS):
self.keys_to_features['image/object/mask'] = (
tf.VarLenFeature(tf.float32))
self.items_to_handlers[
fields.InputDataFields.groundtruth_instance_masks] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/mask', 'image/height', 'image/width'],
self._reshape_instance_masks))
elif instance_mask_type == input_reader_pb2.PNG_MASKS:
self.keys_to_features['image/object/mask'] = tf.VarLenFeature(tf.string)
self.items_to_handlers[
fields.InputDataFields.groundtruth_instance_masks] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/mask', 'image/height', 'image/width'],
self._decode_png_instance_masks))
else:
raise ValueError('Did not recognize the `instance_mask_type` option.')
if label_map_proto_file:
# If the label_map_proto is provided, try to use it in conjunction with
# the class text, and fall back to a materialized ID.
label_handler = _BackupHandler(
_ClassTensorHandler(
'image/object/class/text', label_map_proto_file,
default_value=''),
slim_example_decoder.Tensor('image/object/class/label'))
image_label_handler = _BackupHandler(
_ClassTensorHandler(
fields.TfExampleFields.image_class_text,
label_map_proto_file,
default_value=''),
slim_example_decoder.Tensor(fields.TfExampleFields.image_class_label))
else:
label_handler = slim_example_decoder.Tensor('image/object/class/label')
image_label_handler = slim_example_decoder.Tensor(
fields.TfExampleFields.image_class_label)
self.items_to_handlers[
fields.InputDataFields.groundtruth_classes] = label_handler
self.items_to_handlers[
fields.InputDataFields.groundtruth_image_classes] = image_label_handler
def decode(self, tf_example_string_tensor):
"""Decodes serialized tensorflow example and returns a tensor dictionary.
Args:
tf_example_string_tensor: a string tensor holding a serialized tensorflow
example proto.
Returns:
A dictionary of the following tensors.
fields.InputDataFields.image - 3D uint8 tensor of shape [None, None, 3]
containing image.
fields.InputDataFields.original_image_spatial_shape - 1D int32 tensor of
shape [2] containing shape of the image.
fields.InputDataFields.source_id - string tensor containing original
image id.
fields.InputDataFields.key - string tensor with unique sha256 hash key.
fields.InputDataFields.filename - string tensor with original dataset
filename.
fields.InputDataFields.groundtruth_boxes - 2D float32 tensor of shape
[None, 4] containing box corners.
fields.InputDataFields.groundtruth_classes - 1D int64 tensor of shape
[None] containing classes for the boxes.
fields.InputDataFields.groundtruth_weights - 1D float32 tensor of
shape [None] indicating the weights of groundtruth boxes.
fields.InputDataFields.num_groundtruth_boxes - int32 scalar indicating
the number of groundtruth_boxes.
fields.InputDataFields.groundtruth_area - 1D float32 tensor of shape
[None] containing containing object mask area in pixel squared.
fields.InputDataFields.groundtruth_is_crowd - 1D bool tensor of shape
[None] indicating if the boxes enclose a crowd.
Optional:
fields.InputDataFields.image_additional_channels - 3D uint8 tensor of
shape [None, None, num_additional_channels]. 1st dim is height; 2nd dim
is width; 3rd dim is the number of additional channels.
fields.InputDataFields.groundtruth_difficult - 1D bool tensor of shape
[None] indicating if the boxes represent `difficult` instances.
fields.InputDataFields.groundtruth_group_of - 1D bool tensor of shape
[None] indicating if the boxes represent `group_of` instances.
fields.InputDataFields.groundtruth_keypoints - 3D float32 tensor of
shape [None, None, 2] containing keypoints, where the coordinates of
the keypoints are ordered (y, x).
fields.InputDataFields.groundtruth_instance_masks - 3D float32 tensor of
shape [None, None, None] containing instance masks.
fields.InputDataFields.groundtruth_image_classes - 1D uint64 of shape
[None] containing classes for the boxes.
"""
serialized_example = tf.reshape(tf_example_string_tensor, shape=[])
decoder = slim_example_decoder.TFExampleDecoder(self.keys_to_features,
self.items_to_handlers)
keys = decoder.list_items()
tensors = decoder.decode(serialized_example, items=keys)
tensor_dict = dict(zip(keys, tensors))
is_crowd = fields.InputDataFields.groundtruth_is_crowd
tensor_dict[is_crowd] = tf.cast(tensor_dict[is_crowd], dtype=tf.bool)
tensor_dict[fields.InputDataFields.image].set_shape([None, None, 3])
tensor_dict[fields.InputDataFields.original_image_spatial_shape] = tf.shape(
tensor_dict[fields.InputDataFields.image])[:2]
tensor_dict[fields.InputDataFields.num_groundtruth_boxes] = tf.shape(
tensor_dict[fields.InputDataFields.groundtruth_boxes])[0]
if fields.InputDataFields.image_additional_channels in tensor_dict:
channels = tensor_dict[fields.InputDataFields.image_additional_channels]
channels = tf.squeeze(channels, axis=3)
channels = tf.transpose(channels, perm=[1, 2, 0])
tensor_dict[fields.InputDataFields.image_additional_channels] = channels
def default_groundtruth_weights():
return tf.ones(
[tf.shape(tensor_dict[fields.InputDataFields.groundtruth_boxes])[0]],
dtype=tf.float32)
tensor_dict[fields.InputDataFields.groundtruth_weights] = tf.cond(
tf.greater(
tf.shape(
tensor_dict[fields.InputDataFields.groundtruth_weights])[0],
0), lambda: tensor_dict[fields.InputDataFields.groundtruth_weights],
default_groundtruth_weights)
return tensor_dict
def _reshape_keypoints(self, keys_to_tensors):
"""Reshape keypoints.
The instance segmentation masks are reshaped to [num_instances,
num_keypoints, 2].
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 3-D float tensor of shape [num_instances, num_keypoints, 2] with values
in {0, 1}.
"""
y = keys_to_tensors['image/object/keypoint/y']
if isinstance(y, tf.SparseTensor):
y = tf.sparse_tensor_to_dense(y)
y = tf.expand_dims(y, 1)
x = keys_to_tensors['image/object/keypoint/x']
if isinstance(x, tf.SparseTensor):
x = tf.sparse_tensor_to_dense(x)
x = tf.expand_dims(x, 1)
keypoints = tf.concat([y, x], 1)
keypoints = tf.reshape(keypoints, [-1, self._num_keypoints, 2])
return keypoints
def _reshape_instance_masks(self, keys_to_tensors):
"""Reshape instance segmentation masks.
The instance segmentation masks are reshaped to [num_instances, height,
width].
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 3-D float tensor of shape [num_instances, height, width] with values
in {0, 1}.
"""
height = keys_to_tensors['image/height']
width = keys_to_tensors['image/width']
to_shape = tf.cast(tf.stack([-1, height, width]), tf.int32)
masks = keys_to_tensors['image/object/mask']
if isinstance(masks, tf.SparseTensor):
masks = tf.sparse_tensor_to_dense(masks)
masks = tf.reshape(tf.to_float(tf.greater(masks, 0.0)), to_shape)
return tf.cast(masks, tf.float32)
def _decode_png_instance_masks(self, keys_to_tensors):
"""Decode PNG instance segmentation masks and stack into dense tensor.
The instance segmentation masks are reshaped to [num_instances, height,
width].
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 3-D float tensor of shape [num_instances, height, width] with values
in {0, 1}.
"""
def decode_png_mask(image_buffer):
image = tf.squeeze(
tf.image.decode_image(image_buffer, channels=1), axis=2)
image.set_shape([None, None])
image = tf.to_float(tf.greater(image, 0))
return image
png_masks = keys_to_tensors['image/object/mask']
height = keys_to_tensors['image/height']
width = keys_to_tensors['image/width']
if isinstance(png_masks, tf.SparseTensor):
png_masks = tf.sparse_tensor_to_dense(png_masks, default_value='')
return tf.cond(
tf.greater(tf.size(png_masks), 0),
lambda: tf.map_fn(decode_png_mask, png_masks, dtype=tf.float32),
lambda: tf.zeros(tf.to_int32(tf.stack([0, height, width]))))
| 44.216842 | 80 | 0.681236 |
67400ce0472de79088f7836ac4dd8ba84158a812
| 27,252 |
py
|
Python
|
pytorch_lightning/loops/epoch/training_epoch_loop.py
|
caillonantoine/pytorch-lightning
|
8676d8026480b55ab8aa343a80e237c5de0a7802
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/loops/epoch/training_epoch_loop.py
|
caillonantoine/pytorch-lightning
|
8676d8026480b55ab8aa343a80e237c5de0a7802
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/loops/epoch/training_epoch_loop.py
|
caillonantoine/pytorch-lightning
|
8676d8026480b55ab8aa343a80e237c5de0a7802
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from collections import defaultdict
from typing import Any, Dict, Generator, List, Optional, overload, Tuple, Union
import numpy as np
import torch
from pytorch_lightning import loops # import as loops to avoid circular imports
from pytorch_lightning.loops.batch import TrainingBatchLoop
from pytorch_lightning.loops.batch.training_batch_loop import _OUTPUTS_TYPE as _BATCH_OUTPUTS_TYPE
from pytorch_lightning.loops.utilities import _get_active_optimizers, _is_max_limit_reached
from pytorch_lightning.trainer.connectors.logger_connector.result import _ResultCollection
from pytorch_lightning.trainer.progress import BatchProgress, SchedulerProgress
from pytorch_lightning.trainer.supporters import CombinedLoader
from pytorch_lightning.utilities.apply_func import apply_to_collection
from pytorch_lightning.utilities.auto_restart import _collect_states_on_rank_zero_over_collection
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.fetching import AbstractDataFetcher, DataLoaderIterDataFetcher
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_warn
from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature
from pytorch_lightning.utilities.warnings import WarningCache
_OUTPUTS_TYPE = List[_BATCH_OUTPUTS_TYPE]
class TrainingEpochLoop(loops.Loop[_OUTPUTS_TYPE]):
"""Runs over all batches in a dataloader (one epoch).
Args:
min_steps: The minimum number of steps (batches) to process
max_steps: The maximum number of steps (batches) to process
"""
def __init__(self, min_steps: Optional[int] = None, max_steps: int = -1) -> None:
super().__init__()
if max_steps is None:
rank_zero_deprecation(
"Setting `max_steps = None` is deprecated in v1.5 and will no longer be supported in v1.7."
" Use `max_steps = -1` instead."
)
max_steps = -1
elif max_steps < -1:
raise MisconfigurationException(
f"`max_steps` must be a non-negative integer or -1 (infinite steps). You passed in {max_steps}."
)
self.min_steps = min_steps
self.max_steps = max_steps
self.batch_progress = BatchProgress()
self.scheduler_progress = SchedulerProgress()
self.batch_loop = TrainingBatchLoop()
self.val_loop = loops.EvaluationLoop(verbose=False)
self._results = _ResultCollection(training=True)
self._outputs: _OUTPUTS_TYPE = []
self._warning_cache = WarningCache()
# caches the loaded dataloader state until dataloader objects are available
self._dataloader_state_dict: Dict[str, Any] = {}
self._batches_that_stepped: int = 0
@property
def total_batch_idx(self) -> int:
"""Returns the current batch index (across epochs)"""
# use `ready` instead of `completed` in case this is accessed after `completed` has been increased
# but before the next `ready` increase
return self.batch_progress.total.ready - 1
@property
def batch_idx(self) -> int:
"""Returns the current batch index (within this epoch)"""
# use `ready` instead of `completed` in case this is accessed after `completed` has been increased
# but before the next `ready` increase
return self.batch_progress.current.ready - 1
@property
def global_step(self) -> int:
lightning_module = self.trainer.lightning_module
if lightning_module is None or lightning_module.automatic_optimization:
return self.batch_loop.optimizer_loop.optim_progress.optimizer_steps
return self.batch_loop.manual_loop.optim_step_progress.total.completed
@property
def _is_training_done(self) -> bool:
max_steps_reached = _is_max_limit_reached(self.global_step, self.max_steps)
return max_steps_reached or self._num_ready_batches_reached()
@property
def _is_validation_done(self) -> bool:
# when we are restarting we want to check whether the val loop has finished
return not self.restarting or self.val_loop.done
@property
def done(self) -> bool:
"""Evaluates when to leave the loop."""
return (self._is_training_done and self._is_validation_done) or self.trainer.should_stop
def connect( # type: ignore[override]
self,
batch_loop: Optional[TrainingBatchLoop] = None,
val_loop: Optional["loops.EvaluationLoop"] = None,
) -> None:
"""Optionally connect a custom batch or validation loop to this training epoch loop."""
if batch_loop is not None:
self.batch_loop = batch_loop
if val_loop is not None:
self.val_loop = val_loop
def reset(self) -> None:
"""Resets the internal state of the loop for a new run."""
if self.restarting:
self.batch_progress.reset_on_restart()
self.scheduler_progress.reset_on_restart()
self.batch_loop.optimizer_loop.optim_progress.reset_on_restart()
trainer = self.trainer
if not trainer.state._fault_tolerant_mode.is_enabled and trainer.num_training_batches != float("inf"):
expected_steps = math.ceil(trainer.num_training_batches / trainer.accumulate_grad_batches)
if self.global_step % expected_steps != 0:
rank_zero_warn(
"You're resuming from a checkpoint that ended before the epoch ended. This can cause unreliable"
" results if further training is done. Consider using an end-of-epoch checkpoint or enabling"
" fault-tolerant training:"
" https://pytorch-lightning.readthedocs.io/en/stable/advanced/fault_tolerant_training.html"
)
else:
self.batch_progress.reset_on_run()
self.scheduler_progress.reset_on_run()
self.batch_loop.optimizer_loop.optim_progress.reset_on_run()
# when the epoch starts, the total val batch progress should be reset as it's supposed to count the batches
# seen per epoch, this is useful for tracking when validation is run multiple times per epoch
self.val_loop.epoch_loop.batch_progress.total.reset()
self._outputs = []
def on_run_start(self, data_fetcher: AbstractDataFetcher) -> None: # type: ignore[override]
self._reload_dataloader_state_dict(data_fetcher)
_ = iter(data_fetcher) # creates the iterator inside the fetcher
# add the previous `fetched` value to properly track `is_last_batch` with no prefetching
data_fetcher.fetched += self.batch_progress.current.ready
def advance(self, data_fetcher: AbstractDataFetcher) -> None: # type: ignore[override]
"""Runs a single training batch.
Raises:
StopIteration: When the epoch is canceled by the user returning -1
"""
if self.restarting and self._should_check_val_fx(self.batch_idx, self.batch_progress.is_last_batch):
# skip training and run validation in `on_advance_end`
return
# we are going to train first so the val loop does not need to restart
self.val_loop.restarting = False
if not isinstance(data_fetcher, DataLoaderIterDataFetcher):
batch_idx = self.batch_idx + 1
batch = next(data_fetcher)
else:
batch_idx, batch = next(data_fetcher)
self.batch_progress.is_last_batch = data_fetcher.done
self.batch_progress.increment_ready()
self.trainer._logger_connector.on_batch_start(batch, batch_idx)
if batch is None:
self._warning_cache.warn("train_dataloader yielded None. If this was on purpose, ignore this warning...")
batch_output = []
else:
# hook
self.trainer._call_callback_hooks("on_batch_start")
# TODO: Update this in v1.7 (deprecation: #9816)
model_fx = self.trainer.lightning_module.on_train_batch_start
extra_kwargs = (
{"dataloader_idx": 0}
if callable(model_fx) and is_param_in_hook_signature(model_fx, "dataloader_idx", explicit=True)
else {}
)
# hook
self.trainer._call_callback_hooks("on_train_batch_start", batch, batch_idx, **extra_kwargs)
response = self.trainer._call_lightning_module_hook(
"on_train_batch_start", batch, batch_idx, **extra_kwargs
)
self.trainer._call_strategy_hook("on_train_batch_start", batch, batch_idx, **extra_kwargs)
if response == -1:
self.batch_progress.increment_processed()
raise StopIteration
self.batch_progress.increment_started()
with self.trainer.profiler.profile("run_training_batch"):
batch_output = self.batch_loop.run(batch, batch_idx)
self.batch_progress.increment_processed()
# update non-plateau LR schedulers
# update epoch-interval ones only when we are at the end of training epoch
self.update_lr_schedulers("step", update_plateau_schedulers=False)
if self._num_ready_batches_reached():
self.update_lr_schedulers("epoch", update_plateau_schedulers=False)
batch_end_outputs = self._prepare_outputs_training_batch_end(
batch_output,
automatic=self.trainer.lightning_module.trainer.lightning_module.automatic_optimization,
num_optimizers=len(self.trainer.optimizers),
)
# TODO: Update this in v1.7 (deprecation: #9816)
model_fx = self.trainer.lightning_module.on_train_batch_end
extra_kwargs = (
{"dataloader_idx": 0}
if callable(model_fx) and is_param_in_hook_signature(model_fx, "dataloader_idx", explicit=True)
else {}
)
self.trainer._call_callback_hooks("on_train_batch_end", batch_end_outputs, batch, batch_idx, **extra_kwargs)
self.trainer._call_lightning_module_hook(
"on_train_batch_end", batch_end_outputs, batch, batch_idx, **extra_kwargs
)
self.trainer._call_callback_hooks("on_batch_end")
self.trainer._logger_connector.on_batch_end()
self.batch_progress.increment_completed()
if is_overridden("training_epoch_end", self.trainer.lightning_module):
self._outputs.append(batch_output)
# -----------------------------------------
# SAVE METRICS TO LOGGERS AND PROGRESS_BAR
# -----------------------------------------
self.trainer._logger_connector.update_train_step_metrics()
def on_advance_end(self) -> None:
# -----------------------------------------
# VALIDATE IF NEEDED
# -----------------------------------------
should_check_val = self._should_check_val_fx(self.batch_idx, self.batch_progress.is_last_batch)
if should_check_val:
self.trainer.validating = True
self._run_validation()
self.trainer.training = True
# update plateau LR scheduler after metrics are logged
self.update_lr_schedulers("step", update_plateau_schedulers=True)
if not self._should_accumulate():
# this is increased once per batch disregarding multiple optimizers or tbptt on purpose for loggers
self._batches_that_stepped += 1
# this will save based on the `batches_that_stepped` value
self._save_loggers_on_train_batch_end()
# if training finished, defer exit to the parent. this assumes there will be enough time in between
# which might not be the case depending on what's in the `*_epoch_end` hooks
if not self._is_training_done:
# if fault tolerant is enabled and process has been notified, exit.
self.trainer._exit_gracefully_on_signal()
def on_run_end(self) -> _OUTPUTS_TYPE:
outputs, self._outputs = self._outputs, []
return outputs
def teardown(self) -> None:
self._results.cpu()
self.batch_loop.teardown()
self.val_loop.teardown()
def on_save_checkpoint(self) -> Dict:
state_dict = super().on_save_checkpoint()
if (
self.trainer is not None
and self.trainer.state._fault_tolerant_mode.is_enabled
and self.trainer.train_dataloader is not None
and not self._num_completed_batches_reached() # did not finish
# TODO: fault-tolerance requires a minimum number of batches so probably should be > 0
and self.batch_progress.current.ready # did start
):
loader: CombinedLoader = self.trainer.train_dataloader
state = loader.state_dict(has_completed=self._has_completed())
if state:
state_dict["dataloader_state_dict"] = _collect_states_on_rank_zero_over_collection(state)
return state_dict
def on_load_checkpoint(self, state_dict: Dict) -> None:
# cache the dataloader state dict until the dataloader objects are available
self._dataloader_state_dict = state_dict.get("dataloader_state_dict")
def _run_validation(self) -> None:
# reload dataloaders
self.val_loop._reload_evaluation_dataloaders()
with torch.no_grad():
self.val_loop.run()
def _accumulated_batches_reached(self) -> bool:
"""Determine if accumulation will be finished by the end of the current batch."""
return self.batch_progress.current.ready % self.trainer.accumulate_grad_batches == 0
def _num_ready_batches_reached(self) -> bool:
"""Checks if we are in the last batch or if there are more batches to follow."""
epoch_finished_on_ready = self.batch_progress.current.ready == self.trainer.num_training_batches
return epoch_finished_on_ready or self.batch_progress.is_last_batch
def _num_completed_batches_reached(self) -> bool:
epoch_finished_on_completed = self.batch_progress.current.completed == self.trainer.num_training_batches
dataloader_consumed_successfully = self.batch_progress.is_last_batch and self._has_completed()
return epoch_finished_on_completed or dataloader_consumed_successfully
def _has_completed(self) -> bool:
return self.batch_progress.current.ready == self.batch_progress.current.completed
def _should_accumulate(self) -> bool:
"""Checks if the optimizer step should be performed or gradients should be accumulated for the current
step."""
accumulation_done = self._accumulated_batches_reached()
# Lightning steps on the final batch
is_final_batch = self._num_ready_batches_reached()
# but the strategy might not
strategy_accumulates_on_final_batch = self.trainer.strategy.handles_gradient_accumulation or not is_final_batch
return not accumulation_done and strategy_accumulates_on_final_batch
@staticmethod
def _prepare_outputs_training_batch_end(
batch_output: _BATCH_OUTPUTS_TYPE,
automatic: bool,
num_optimizers: int,
) -> Union[List[List[Dict[str, Any]]], List[Dict[str, Any]]]:
"""Processes the outputs from the batch loop into the format passed to the ``training_batch_end`` hook.
``(tbptt_steps, n_opt) -> (n_opt, tbptt_steps)``. The optimizer dimension might have been squeezed.
"""
if not batch_output:
return []
# convert optimizer dicts to list
if automatic:
batch_output = apply_to_collection(
batch_output, dtype=dict, function=_convert_optim_dict, num_optimizers=num_optimizers
)
array = np.array(batch_output, dtype=object)
if array.ndim == 1:
array = np.expand_dims(array, 1)
array = array.transpose((1, 0))
array = array.squeeze()
array = array.tolist()
array = _recursive_unpad(array)
return array
@staticmethod
def _prepare_outputs_training_epoch_end(
batch_outputs: _OUTPUTS_TYPE,
automatic: bool,
num_optimizers: int,
) -> Union[List[List[List[Dict[str, Any]]]], List[List[Dict[str, Any]]], List[Dict[str, Any]]]:
"""Processes the outputs from the batch loop into the format passed to the ``training_epoch_end`` hook.
``(n_batches, tbptt_steps, n_opt) -> (n_opt, n_batches, tbptt_steps)``.
All single-element dimensions might have been squeezed.
This processing is necessary because the format of the inputs to the ``training_epoch_end`` hook does not
match the loop structure and because empty dimensions are squeezed. This could break with loop customization.
"""
# `batch_outputs` (plural) is the same as `epoch_end_output` (singular)
if not batch_outputs:
return []
# convert optimizer dicts to list
if automatic:
batch_outputs = apply_to_collection(
batch_outputs, dtype=dict, function=_convert_optim_dict, num_optimizers=num_optimizers
)
array = _recursive_pad(batch_outputs)
if array.ndim == 2:
array = np.expand_dims(array, 2)
array = array.transpose((2, 0, 1))
array = array.squeeze()
array = array.tolist()
array = _recursive_unpad(array)
# in case we squeezed from 1-element array to a 0-dim array
array = array if isinstance(array, list) else [array]
# remove residual empty lists
array = [item for item in array if not isinstance(item, list) or len(item)]
return array
def update_lr_schedulers(self, interval: str, update_plateau_schedulers: bool) -> None:
"""updates the lr schedulers based on the given interval."""
if interval == "step" and self._should_accumulate():
return
active_optimizers = _get_active_optimizers(
self.trainer.optimizers, self.trainer.optimizer_frequencies, self.total_batch_idx
)
self._update_learning_rates(
interval=interval,
update_plateau_schedulers=update_plateau_schedulers,
opt_indices=[opt_idx for opt_idx, _ in active_optimizers],
)
def _update_learning_rates(
self, interval: str, update_plateau_schedulers: bool, opt_indices: Optional[List[int]] = None
) -> None:
"""Update learning rates.
Args:
interval: either 'epoch' or 'step'.
update_plateau_schedulers: control whether ``ReduceLROnPlateau`` or non-plateau schedulers get updated.
This is used so non-plateau schedulers can be updated before running validation. Checkpoints are
commonly saved during validation, however, on-plateau schedulers might monitor a validation metric
so they have to be updated separately.
opt_indices: indices of the optimizers to update.
"""
if not self.trainer.lr_scheduler_configs or not self.trainer.lightning_module.automatic_optimization:
return
if opt_indices is None:
opt_indices = []
for config in self.trainer.lr_scheduler_configs:
if config.opt_idx not in opt_indices:
continue
if update_plateau_schedulers ^ config.reduce_on_plateau:
continue
current_idx = self.batch_idx if interval == "step" else self.trainer.current_epoch
current_idx += 1 # account for both batch and epoch starts from 0
# Take step if call to update_learning_rates matches the interval key and
# the current step modulo the schedulers frequency is zero
if config.interval == interval and current_idx % config.frequency == 0:
monitor_val = None
if config.reduce_on_plateau:
# If instance of ReduceLROnPlateau, we need a monitor
monitor_key = config.monitor
monitor_val = self._get_monitor_value(monitor_key)
if monitor_val is None:
if config.strict:
avail_metrics = list(self.trainer.callback_metrics)
raise MisconfigurationException(
f"ReduceLROnPlateau conditioned on metric {monitor_key}"
f" which is not available. Available metrics are: {avail_metrics}."
" Condition can be set using `monitor` key in lr scheduler dict"
)
rank_zero_warn(
f"ReduceLROnPlateau conditioned on metric {monitor_key}"
" which is not available but strict is set to `False`."
" Skipping learning rate update.",
category=RuntimeWarning,
)
continue
self.scheduler_progress.increment_ready()
# update LR
self.trainer._call_lightning_module_hook(
"lr_scheduler_step",
config.scheduler,
config.opt_idx,
monitor_val,
)
self.scheduler_progress.increment_completed()
def _get_monitor_value(self, key: str) -> Any:
# this is a separate method to aid in testing
return self.trainer.callback_metrics.get(key)
def _should_check_val_fx(self, batch_idx: int, is_last_batch: bool) -> bool:
"""Decide if we should run validation."""
if not self.trainer.enable_validation:
return False
is_val_check_epoch = (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0
if not is_val_check_epoch:
return False
# val_check_batch is inf for iterable datasets with no length defined
is_infinite_dataset = self.trainer.val_check_batch == float("inf")
if is_last_batch and is_infinite_dataset:
return True
if self.trainer.should_stop:
return True
# TODO(@awaelchli): let training/eval loop handle logic around limit_*_batches and val_check_batch
is_val_check_batch = is_last_batch
if isinstance(self.trainer.limit_train_batches, int) and is_infinite_dataset:
is_val_check_batch = (batch_idx + 1) % self.trainer.limit_train_batches == 0
elif self.trainer.val_check_batch != float("inf"):
is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0
return is_val_check_batch
def _save_loggers_on_train_batch_end(self) -> None:
"""Flushes loggers to disk."""
# this assumes that `batches_that_stepped` was increased before
should_flush = self._batches_that_stepped % self.trainer.flush_logs_every_n_steps == 0
if should_flush or self.trainer.should_stop:
for logger in self.trainer.loggers:
logger.save()
def _reload_dataloader_state_dict(self, data_fetcher: AbstractDataFetcher) -> None:
if self._dataloader_state_dict:
data_fetcher.dataloader.load_state_dict(self._dataloader_state_dict)
self._dataloader_state_dict = None
def _convert_optim_dict(outs: Dict[int, Dict[str, Any]], num_optimizers: int) -> List[Dict[str, Any]]:
"""Converts an optimizer dict to a list in which the key of the dict determines the position of the element.
Example::
>>> _convert_optim_dict({0: {"loss": 0.0}, 2: {"loss": 0.2}}, num_optimizers=3)
[{'loss': 0.0}, None, {'loss': 0.2}]
"""
return [outs[opt_idx] if opt_idx in outs else None for opt_idx in range(num_optimizers)]
@overload
def _recursive_unpad(nested: Any, value: Optional[Any] = None) -> Any:
...
@overload
def _recursive_unpad(nested: List[Any], value: Optional[Any] = None) -> List[Any]:
...
def _recursive_unpad(nested: Union[Any, List[Any]], value: Optional[Any] = None) -> Union[Any, List[Any]]:
"""Removes the given pad value from the nested list. Not strictly the reverse operation of
:func:`_recursive_pad` because it removes the padding element everywhere, not just from the end of a list.
Example::
>>> _recursive_unpad([[[0, 1, 0]], [2], [0, 0]], value=0)
[[[1]], [2], []]
"""
if not isinstance(nested, list):
return nested
return [_recursive_unpad(item, value) for item in nested if item != value]
def _recursive_pad(nested: List[Any], fill_value: Optional[Any] = None) -> np.array:
"""Pads a jagged nested list of lists with the given value such that a proper multi-dimensional array can be
formed with rectangular shape. The padding appends to the incomplete lists.
Example::
>>> _recursive_pad([[], [1], [2, 3], [4]], fill_value=0) # doctest: +NORMALIZE_WHITESPACE
array([[0, 0], [1, 0], [2, 3], [4, 0]], dtype=object)
"""
# code adapted from stackexchange:
# https://codereview.stackexchange.com/questions/222623/pad-a-ragged-multidimensional-array-to-rectangular-shape
dimensions = _get_max_shape(nested)
result = np.full(dimensions, fill_value, dtype=object)
for index, value in _iterate_nested_array(nested):
result[index] = value
return result
def _get_dimensions(array: List[Any], level: int = 0) -> Generator:
yield level, len(array)
if all(isinstance(row, list) for row in array):
for row in array:
yield from _get_dimensions(row, level + 1)
def _get_max_shape(array: List[Any]) -> List[int]:
"""Calculates the max size in each dimension of a jagged (non-rectangular) nested list of lists.
Example::
>>> _get_max_shape([[], [[1], [2]], []])
[3, 2, 1]
"""
dimensions = defaultdict(int)
for level, length in _get_dimensions(array):
dimensions[level] = max(dimensions[level], length)
return [value for _, value in sorted(dimensions.items())]
def _iterate_nested_array(array: List[Any], index: Tuple = ()) -> Generator:
if all(isinstance(item, list) for item in array):
for idx, row in enumerate(array):
yield from _iterate_nested_array(row, (*index, idx))
else: # final level
yield (*index, slice(len(array))), array
| 45.495826 | 120 | 0.663804 |
b00e67aa53174756381a2d974545f8c2c7d3fb18
| 802 |
py
|
Python
|
oo/pessoa.py
|
Cleiton2308/pythonbirds
|
a9fd2238f28572bb76794244d36ca44c2b39b1fd
|
[
"MIT"
] | null | null | null |
oo/pessoa.py
|
Cleiton2308/pythonbirds
|
a9fd2238f28572bb76794244d36ca44c2b39b1fd
|
[
"MIT"
] | null | null | null |
oo/pessoa.py
|
Cleiton2308/pythonbirds
|
a9fd2238f28572bb76794244d36ca44c2b39b1fd
|
[
"MIT"
] | null | null | null |
class Pessoa:
olhos = 2
def __init__(self, *filhos, nome = None, idade = 33):
self.idade = idade
self.nome = nome
self.filhos = list(filhos)
def cumprimentar(self):
return f'Olá {id(self)}'
if __name__ == '__main__':
cleiton = Pessoa(nome = 'Cleiton')
renzo = Pessoa(cleiton, nome='Renzo')
print(Pessoa.cumprimentar(cleiton))
print(id(cleiton))
print(cleiton.cumprimentar())
print(cleiton.nome)
print(cleiton.idade)
for filho in cleiton.filhos:
print(filho.nome)
cleiton.sobrenome = 'Teixeira'
del cleiton.filhos
print(cleiton.__dict__)
print(renzo.__dict__)
Pessoa.olhos = 3
print(cleiton.olhos)
print(renzo.olhos)
print(id(Pessoa.olhos), id(cleiton.olhos), id(renzo.olhos))
| 21.675676 | 63 | 0.63217 |
25507d28c9498680dafca216a70ee9de671a8e9f
| 2,046 |
py
|
Python
|
azure-common/setup.py
|
SUSE/azure-sdk-for-python
|
324f99d26dd6f4ee9793b9bf1d4d5f928e4b6c2f
|
[
"MIT"
] | 2 |
2020-07-29T14:22:17.000Z
|
2020-11-06T18:47:40.000Z
|
azure-common/setup.py
|
SUSE/azure-sdk-for-python
|
324f99d26dd6f4ee9793b9bf1d4d5f928e4b6c2f
|
[
"MIT"
] | 1 |
2016-08-01T07:37:04.000Z
|
2016-08-01T07:37:04.000Z
|
azure-common/setup.py
|
SUSE/azure-sdk-for-python
|
324f99d26dd6f4ee9793b9bf1d4d5f928e4b6c2f
|
[
"MIT"
] | 1 |
2020-12-12T21:04:41.000Z
|
2020-12-12T21:04:41.000Z
|
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
from setuptools import setup
import sys
try:
from azure_bdist_wheel import cmdclass
except ImportError:
from distutils import log as logger
logger.warn("Wheel is not available, disabling bdist_wheel hook")
cmdclass = {}
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
setup(
name='azure-common',
version='1.1.6',
description='Microsoft Azure Client Library for Python (Common)',
long_description=open('README.rst', 'r').read(),
license='MIT License',
author='Microsoft Corporation',
author_email='[email protected]',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=[
'azure',
'azure.common',
],
extras_require={
'autorest':[
'msrestazure>=0.4.0,<0.5.0',
]
},
cmdclass=cmdclass
)
| 30.537313 | 75 | 0.581623 |
0615cb45c8eb25e167781401939256a746c545e2
| 9,021 |
py
|
Python
|
vumi/demos/tests/test_hangman.py
|
rapidsms/vumi
|
f15c101b599cc1283c84592e8707b6a929f67cbd
|
[
"BSD-3-Clause"
] | null | null | null |
vumi/demos/tests/test_hangman.py
|
rapidsms/vumi
|
f15c101b599cc1283c84592e8707b6a929f67cbd
|
[
"BSD-3-Clause"
] | null | null | null |
vumi/demos/tests/test_hangman.py
|
rapidsms/vumi
|
f15c101b599cc1283c84592e8707b6a929f67cbd
|
[
"BSD-3-Clause"
] | 2 |
2018-03-05T18:01:45.000Z
|
2019-11-02T19:34:18.000Z
|
# -*- encoding: utf-8 -*-
"""Tests for vumi.demos.hangman."""
from twisted.trial import unittest
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet import reactor
from twisted.web.server import Site
from twisted.web.resource import Resource
from twisted.web.static import Data
from vumi.application.tests.utils import ApplicationTestCase
from vumi.demos.hangman import HangmanGame, HangmanWorker
from vumi.message import TransportUserMessage
import string
def mkstate(word, guesses, msg):
return {'word': word, 'guesses': guesses, 'msg': msg}
class TestHangmanGame(unittest.TestCase):
def test_easy_game(self):
game = HangmanGame(word='moo')
game.event('m')
game.event('o')
self.assertTrue(game.won())
self.assertEqual(
game.state(), mkstate('moo', 'mo', 'Flawless victory!'))
def test_incorrect_guesses(self):
game = HangmanGame(word='moo')
game.event('f')
game.event('g')
self.assertFalse(game.won())
self.assertEqual(
game.state(), mkstate('moo', 'fg', "Word contains no 'g'. :("))
def test_repeated_guesses(self):
game = HangmanGame(word='moo')
game.event('f')
game.event('f')
self.assertFalse(game.won())
self.assertEqual(
game.state(), mkstate('moo', 'f', "You've already guessed 'f'."))
def test_button_mashing(self):
game = HangmanGame(word='moo')
for event in string.lowercase.replace('o', ''):
game.event(event)
game.event('o')
self.assertTrue(game.won())
self.assertEqual(
game.state(), mkstate('moo', string.lowercase, "Button mashing!"))
def test_new_game(self):
game = HangmanGame(word='moo')
for event in ('m', 'o', '-'):
game.event(event)
self.assertEqual(
game.state(), mkstate('moo', 'mo', 'Flawless victory!'))
self.assertEqual(game.exit_code, game.DONE_WANTS_NEW)
def test_from_state(self):
game = HangmanGame.from_state(mkstate("bar", "xyz", "Eep?"))
self.assertEqual(game.word, "bar")
self.assertEqual(game.guesses, set("xyz"))
self.assertEqual(game.msg, "Eep?")
self.assertEqual(game.exit_code, game.NOT_DONE)
def test_from_state_non_ascii(self):
game = HangmanGame.from_state(
mkstate("b\xc3\xa4r".decode("utf-8"), "xyz", "Eep?"))
self.assertEqual(game.word, u"b\u00e4r")
self.assertEqual(game.guesses, set("xyz"))
self.assertEqual(game.msg, "Eep?")
self.assertEqual(game.exit_code, game.NOT_DONE)
def test_exit(self):
game = HangmanGame('elephant')
game.event('0')
self.assertEqual(game.exit_code, game.DONE)
self.assertEqual(game.draw_board(), "Adieu!")
def test_draw_board(self):
game = HangmanGame('word')
board = game.draw_board()
msg, word, guesses, prompt, end = board.split("\n")
self.assertEqual(msg, "New game!")
self.assertEqual(word, "Word: ____")
self.assertEqual(guesses, "Letters guessed so far: ")
self.assertEqual(prompt, "Enter next guess (0 to quit):")
def test_draw_board_at_end_of_game(self):
game = HangmanGame('m')
game.event('m')
board = game.draw_board()
msg, word, guesses, prompt, end = board.split("\n")
self.assertEqual(msg, "Flawless victory!")
self.assertEqual(word, "Word: m")
self.assertEqual(guesses, "Letters guessed so far: m")
self.assertEqual(prompt, "Enter anything to start a new game"
" (0 to quit):")
def test_displaying_word(self):
game = HangmanGame('word')
game.event('w')
game.event('r')
board = game.draw_board()
_msg, word, _guesses, _prompt, _end = board.split("\n")
self.assertEqual(word, "Word: w_r_")
def test_displaying_guesses(self):
game = HangmanGame('word')
game.event('w')
board = game.draw_board()
msg, _word, _guesses, _prompt, _end = board.split("\n")
self.assertEqual(msg, "Word contains at least one 'w'! :D")
game.event('w')
board = game.draw_board()
msg, _word, _guesses, _prompt, _end = board.split("\n")
self.assertEqual(msg, "You've already guessed 'w'.")
game.event('x')
board = game.draw_board()
msg, _word, _guesses, _prompt, _end = board.split("\n")
self.assertEqual(msg, "Word contains no 'x'. :(")
def test_garbage_input(self):
game = HangmanGame(word="zoo")
for garbage in [":", "!", "\x00", "+", "abc", ""]:
game.event(garbage)
self.assertEqual(game.guesses, set())
game.event('z')
game.event('o')
self.assertTrue(game.won())
class TestHangmanWorker(ApplicationTestCase):
application_class = HangmanWorker
@inlineCallbacks
def setUp(self):
super(TestHangmanWorker, self).setUp()
root = Resource()
# data is elephant with a UTF-8 encoded BOM
# it is a sad elephant (as seen in the wild)
root.putChild("word", Data('\xef\xbb\xbfelephant\r\n', 'text/html'))
site_factory = Site(root)
self.webserver = yield reactor.listenTCP(0, site_factory)
addr = self.webserver.getHost()
random_word_url = "http://%s:%s/word" % (addr.host, addr.port)
self.worker = yield self.get_application({
'worker_name': 'test_hangman',
'random_word_url': random_word_url,
})
yield self.worker.session_manager.redis._purge_all() # just in case
@inlineCallbacks
def send(self, content, session_event=None):
msg = self.mkmsg_in(content=content, session_event=session_event)
yield self.dispatch(msg)
@inlineCallbacks
def recv(self, n=0):
msgs = yield self.wait_for_dispatched_messages(n)
def reply_code(msg):
if msg['session_event'] == TransportUserMessage.SESSION_CLOSE:
return 'end'
return 'reply'
returnValue([(reply_code(msg), msg['content']) for msg in msgs])
@inlineCallbacks
def tearDown(self):
yield super(TestHangmanWorker, self).tearDown()
yield self.webserver.loseConnection()
@inlineCallbacks
def test_new_session(self):
yield self.send(None, TransportUserMessage.SESSION_NEW)
replies = yield self.recv(1)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertEqual(reply[0], 'reply')
self.assertEqual(reply[1],
"New game!\n"
"Word: ________\n"
"Letters guessed so far: \n"
"Enter next guess (0 to quit):\n")
@inlineCallbacks
def test_random_word(self):
word = yield self.worker.random_word()
self.assertEqual(word, 'elephant')
@inlineCallbacks
def test_full_session(self):
yield self.send(None, TransportUserMessage.SESSION_NEW)
for event in ('e', 'l', 'p', 'h', 'a', 'n', 'o', 't'):
yield self.send(event, TransportUserMessage.SESSION_RESUME)
replies = yield self.recv(9)
self.assertEqual(len(replies), 9)
last_reply = replies[-1]
self.assertEqual(last_reply[0], 'reply')
self.assertEqual(last_reply[1],
"Epic victory!\n"
"Word: elephant\n"
"Letters guessed so far: aehlnopt\n"
"Enter anything to start a new game (0 to quit):\n")
yield self.send('1')
replies = yield self.recv(10)
last_reply = replies[-1]
self.assertEqual(last_reply[0], 'reply')
self.assertEqual(last_reply[1],
"New game!\n"
"Word: ________\n"
"Letters guessed so far: \n"
"Enter next guess (0 to quit):\n")
yield self.send('0')
replies = yield self.recv(11)
last_reply = replies[-1]
self.assertEqual(last_reply[0], 'end')
self.assertEqual(last_reply[1], "Adieu!")
@inlineCallbacks
def test_close_session(self):
yield self.send(None, TransportUserMessage.SESSION_CLOSE)
replies = yield self.recv()
self.assertEqual(replies, [])
@inlineCallbacks
def test_non_ascii_input(self):
yield self.send(None, TransportUserMessage.SESSION_NEW)
for event in (u'ü', u'æ'):
yield self.send(event, TransportUserMessage.SESSION_RESUME)
replies = yield self.recv(3)
self.assertEqual(len(replies), 3)
for reply in replies[1:]:
self.assertEqual(reply[0], 'reply')
self.assertTrue(reply[1].startswith(
'Letters of the alphabet only please.'))
| 35.376471 | 78 | 0.594834 |
8604f5df6a1fe1e070933d7e6ac53d16097d4185
| 393 |
py
|
Python
|
subjects/models.py
|
unt-libraries/django-subjects
|
96f00a3d7aebf1554359cf8ab151b163c52a9ad1
|
[
"BSD-3-Clause"
] | null | null | null |
subjects/models.py
|
unt-libraries/django-subjects
|
96f00a3d7aebf1554359cf8ab151b163c52a9ad1
|
[
"BSD-3-Clause"
] | 9 |
2018-10-17T18:16:44.000Z
|
2020-11-30T20:16:39.000Z
|
subjects/models.py
|
unt-libraries/django-subjects
|
96f00a3d7aebf1554359cf8ab151b163c52a9ad1
|
[
"BSD-3-Clause"
] | 2 |
2018-10-30T21:10:50.000Z
|
2018-10-30T21:14:20.000Z
|
from django.db import models
# subjects Model
class Subject(models.Model):
name = models.TextField(editable=False)
parent = models.IntegerField(editable=False, default=0)
lft = models.IntegerField('left Traversal')
rght = models.IntegerField('right Traversal')
keywords = models.TextField()
notes = models.TextField()
def __str__(self):
return self.name
| 26.2 | 59 | 0.707379 |
e20e4bcf0840e89d7b7278eb31123aecc743c906
| 11,198 |
py
|
Python
|
redun/backends/db/alembic/versions/806f5dcb11bf_initial_schema.py
|
cclauss/redun
|
55792921b42b430571eafc30ab21eb50eb4f64b3
|
[
"Apache-2.0"
] | 303 |
2021-11-04T00:19:48.000Z
|
2022-03-31T18:44:02.000Z
|
redun/backends/db/alembic/versions/806f5dcb11bf_initial_schema.py
|
cclauss/redun
|
55792921b42b430571eafc30ab21eb50eb4f64b3
|
[
"Apache-2.0"
] | 16 |
2021-11-05T20:30:48.000Z
|
2022-03-30T22:21:19.000Z
|
redun/backends/db/alembic/versions/806f5dcb11bf_initial_schema.py
|
cclauss/redun
|
55792921b42b430571eafc30ab21eb50eb4f64b3
|
[
"Apache-2.0"
] | 14 |
2021-11-04T19:56:57.000Z
|
2022-02-20T16:23:19.000Z
|
"""initial schema
Revision ID: 806f5dcb11bf
Revises:
Create Date: 2020-05-01 10:10:30.843894
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "806f5dcb11bf"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"redun_version",
sa.Column("id", sa.String(), nullable=False),
sa.Column("version", sa.Integer(), nullable=False),
sa.Column("timestamp", sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"task",
sa.Column("hash", sa.String(length=40), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.Column("namespace", sa.String(), nullable=False),
sa.Column("source", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("hash"),
)
op.create_table(
"value",
sa.Column("value_hash", sa.String(length=40), nullable=False),
sa.Column("type", sa.String(length=100), nullable=False),
sa.Column("format", sa.String(length=100), nullable=False),
sa.Column("value", sa.LargeBinary(), nullable=False),
sa.PrimaryKeyConstraint("value_hash"),
)
op.create_table(
"call_node",
sa.Column("call_hash", sa.String(length=40), nullable=False),
sa.Column("task_name", sa.String(length=1024), nullable=False),
sa.Column("task_hash", sa.String(length=40), nullable=False),
sa.Column("args_hash", sa.String(length=40), nullable=False),
sa.Column("value_hash", sa.String(length=40), nullable=False),
sa.Column("timestamp", sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(
["task_hash"],
["task.hash"],
),
sa.ForeignKeyConstraint(
["value_hash"],
["value.value_hash"],
),
sa.PrimaryKeyConstraint("call_hash"),
)
op.create_index(op.f("ix_call_node_task_hash"), "call_node", ["task_hash"], unique=False)
op.create_index(op.f("ix_call_node_value_hash"), "call_node", ["value_hash"], unique=False)
op.create_table(
"file",
sa.Column("value_hash", sa.String(length=40), nullable=False),
sa.Column("path", sa.String(length=1024), nullable=False),
sa.ForeignKeyConstraint(
["value_hash"],
["value.value_hash"],
),
sa.PrimaryKeyConstraint("value_hash"),
)
op.create_table(
"handle",
sa.Column("hash", sa.String(length=40), nullable=False),
sa.Column("fullname", sa.String(length=1024), nullable=False),
sa.Column("value_hash", sa.String(length=40), nullable=False),
sa.Column("key", sa.String(length=1024), nullable=False),
sa.Column("is_valid", sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(
["value_hash"],
["value.value_hash"],
),
sa.PrimaryKeyConstraint("hash"),
)
op.create_index(op.f("ix_handle_fullname"), "handle", ["fullname"], unique=False)
op.create_index(op.f("ix_handle_value_hash"), "handle", ["value_hash"], unique=False)
op.create_table(
"subvalue",
sa.Column("value_hash", sa.String(length=40), nullable=False),
sa.Column("parent_value_hash", sa.String(length=40), nullable=False),
sa.ForeignKeyConstraint(
["parent_value_hash"],
["value.value_hash"],
),
sa.ForeignKeyConstraint(
["value_hash"],
["value.value_hash"],
),
sa.PrimaryKeyConstraint("value_hash", "parent_value_hash"),
)
op.create_index(
op.f("ix_subvalue_parent_value_hash"), "subvalue", ["parent_value_hash"], unique=False
)
op.create_index(op.f("ix_subvalue_value_hash"), "subvalue", ["value_hash"], unique=False)
op.create_table(
"argument",
sa.Column("arg_hash", sa.String(length=40), nullable=False),
sa.Column("call_hash", sa.String(length=40), nullable=False),
sa.Column("value_hash", sa.String(length=40), nullable=False),
sa.Column("arg_position", sa.Integer(), nullable=True),
sa.Column("arg_key", sa.String(length=100), nullable=True),
sa.ForeignKeyConstraint(
["call_hash"],
["call_node.call_hash"],
),
sa.ForeignKeyConstraint(
["value_hash"],
["value.value_hash"],
),
sa.PrimaryKeyConstraint("arg_hash"),
)
op.create_index(op.f("ix_argument_call_hash"), "argument", ["call_hash"], unique=False)
op.create_index(op.f("ix_argument_value_hash"), "argument", ["value_hash"], unique=False)
op.create_table(
"call_edge",
sa.Column("parent_id", sa.String(length=40), nullable=False),
sa.Column("child_id", sa.String(length=40), nullable=False),
sa.Column("call_order", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["child_id"],
["call_node.call_hash"],
),
sa.ForeignKeyConstraint(
["parent_id"],
["call_node.call_hash"],
),
sa.PrimaryKeyConstraint("parent_id", "child_id", "call_order"),
)
op.create_index(op.f("ix_call_edge_child_id"), "call_edge", ["child_id"], unique=False)
op.create_index(op.f("ix_call_edge_parent_id"), "call_edge", ["parent_id"], unique=False)
op.create_table(
"call_subtree_task",
sa.Column("call_hash", sa.String(length=40), nullable=False),
sa.Column("task_hash", sa.String(length=40), nullable=False),
sa.ForeignKeyConstraint(
["call_hash"],
["call_node.call_hash"],
),
sa.ForeignKeyConstraint(
["task_hash"],
["task.hash"],
),
sa.PrimaryKeyConstraint("call_hash", "task_hash"),
)
op.create_index(
op.f("ix_call_subtree_task_call_hash"), "call_subtree_task", ["call_hash"], unique=False
)
op.create_index(
op.f("ix_call_subtree_task_task_hash"), "call_subtree_task", ["task_hash"], unique=False
)
op.create_table(
"handle_edge",
sa.Column("parent_id", sa.String(length=40), nullable=False),
sa.Column("child_id", sa.String(length=40), nullable=False),
sa.ForeignKeyConstraint(
["child_id"],
["handle.hash"],
),
sa.ForeignKeyConstraint(
["parent_id"],
["handle.hash"],
),
sa.PrimaryKeyConstraint("parent_id", "child_id"),
)
op.create_index(op.f("ix_handle_edge_child_id"), "handle_edge", ["child_id"], unique=False)
op.create_index(op.f("ix_handle_edge_parent_id"), "handle_edge", ["parent_id"], unique=False)
op.create_table(
"job",
sa.Column("id", sa.String(), nullable=False),
sa.Column("start_time", sa.DateTime(), nullable=False),
sa.Column("end_time", sa.DateTime(), nullable=True),
sa.Column("task_hash", sa.String(length=40), nullable=False),
sa.Column("cached", sa.Boolean(), nullable=False),
sa.Column("call_hash", sa.String(length=40), nullable=True),
sa.Column("parent_id", sa.String(), nullable=True),
sa.ForeignKeyConstraint(
["call_hash"],
["call_node.call_hash"],
),
sa.ForeignKeyConstraint(
["parent_id"],
["job.id"],
),
sa.ForeignKeyConstraint(
["task_hash"],
["task.hash"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_job_call_hash"), "job", ["call_hash"], unique=False)
op.create_index(op.f("ix_job_parent_id"), "job", ["parent_id"], unique=False)
op.create_index(op.f("ix_job_task_hash"), "job", ["task_hash"], unique=False)
op.create_table(
"argument_result",
sa.Column("arg_hash", sa.String(length=40), nullable=False),
sa.Column("result_call_hash", sa.String(length=40), nullable=False),
sa.ForeignKeyConstraint(
["arg_hash"],
["argument.arg_hash"],
),
sa.ForeignKeyConstraint(
["result_call_hash"],
["call_node.call_hash"],
),
sa.PrimaryKeyConstraint("arg_hash", "result_call_hash"),
)
op.create_index(
op.f("ix_argument_result_arg_hash"), "argument_result", ["arg_hash"], unique=False
)
op.create_index(
op.f("ix_argument_result_result_call_hash"),
"argument_result",
["result_call_hash"],
unique=False,
)
op.create_table(
"execution",
sa.Column("id", sa.String(), nullable=False),
sa.Column("args", sa.String(), nullable=False),
sa.Column("job_id", sa.String(), nullable=False),
sa.ForeignKeyConstraint(
["job_id"],
["job.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_execution_job_id"), "execution", ["job_id"], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_execution_job_id"), table_name="execution")
op.drop_table("execution")
op.drop_index(op.f("ix_argument_result_result_call_hash"), table_name="argument_result")
op.drop_index(op.f("ix_argument_result_arg_hash"), table_name="argument_result")
op.drop_table("argument_result")
op.drop_index(op.f("ix_job_task_hash"), table_name="job")
op.drop_index(op.f("ix_job_parent_id"), table_name="job")
op.drop_index(op.f("ix_job_call_hash"), table_name="job")
op.drop_table("job")
op.drop_index(op.f("ix_handle_edge_parent_id"), table_name="handle_edge")
op.drop_index(op.f("ix_handle_edge_child_id"), table_name="handle_edge")
op.drop_table("handle_edge")
op.drop_index(op.f("ix_call_subtree_task_task_hash"), table_name="call_subtree_task")
op.drop_index(op.f("ix_call_subtree_task_call_hash"), table_name="call_subtree_task")
op.drop_table("call_subtree_task")
op.drop_index(op.f("ix_call_edge_parent_id"), table_name="call_edge")
op.drop_index(op.f("ix_call_edge_child_id"), table_name="call_edge")
op.drop_table("call_edge")
op.drop_index(op.f("ix_argument_value_hash"), table_name="argument")
op.drop_index(op.f("ix_argument_call_hash"), table_name="argument")
op.drop_table("argument")
op.drop_index(op.f("ix_subvalue_value_hash"), table_name="subvalue")
op.drop_index(op.f("ix_subvalue_parent_value_hash"), table_name="subvalue")
op.drop_table("subvalue")
op.drop_index(op.f("ix_handle_value_hash"), table_name="handle")
op.drop_index(op.f("ix_handle_fullname"), table_name="handle")
op.drop_table("handle")
op.drop_table("file")
op.drop_index(op.f("ix_call_node_value_hash"), table_name="call_node")
op.drop_index(op.f("ix_call_node_task_hash"), table_name="call_node")
op.drop_table("call_node")
op.drop_table("value")
op.drop_table("task")
op.drop_table("redun_version")
# ### end Alembic commands ###
| 40.136201 | 97 | 0.623236 |
cc863906effbf7ff091c4777e15d349679c97080
| 2,289 |
py
|
Python
|
tests/functional/pages/profile/case_study_basic.py
|
mayank-sfdc/directory-tests
|
6e978bc1a27c19389e99e454143122aa27e47b85
|
[
"MIT"
] | 4 |
2017-06-02T09:09:10.000Z
|
2018-01-25T19:06:12.000Z
|
tests/functional/pages/profile/case_study_basic.py
|
mayank-sfdc/directory-tests
|
6e978bc1a27c19389e99e454143122aa27e47b85
|
[
"MIT"
] | 53 |
2016-10-27T22:31:03.000Z
|
2022-03-07T11:18:25.000Z
|
tests/functional/pages/profile/case_study_basic.py
|
mayank-sfdc/directory-tests
|
6e978bc1a27c19389e99e454143122aa27e47b85
|
[
"MIT"
] | 3 |
2017-11-22T11:42:40.000Z
|
2022-02-21T01:20:04.000Z
|
# -*- coding: utf-8 -*-
"""Profile - Add Case Study - Basics details page"""
import logging
from requests import Response, Session
from directory_tests_shared import PageType, Service, URLs
from tests.functional.utils.context_utils import CaseStudy
from tests.functional.utils.request import Method, check_response, make_request
SERVICE = Service.PROFILE
NAME = "Add case study (basic details)"
TYPE = PageType.FORM
URL = URLs.PROFILE_CASE_STUDY_DETAILS.absolute
EDIT_URL = URLs.PROFILE_CASE_STUDY_EDIT.absolute
EXPECTED_STRINGS = ["Business showcase", "Title of your case study or project"]
def should_be_here(response: Response):
check_response(response, 200, body_contains=EXPECTED_STRINGS)
logging.debug("Supplier is on 'Create case study or project' - basic page")
def go_to(session: Session, *, case_number: int = None) -> Response:
"""Go to "Add Case Study" basic - page.
This requires:
* Supplier to be logged in
"""
if case_number:
url = EDIT_URL.format(case_number=case_number)
else:
url = URL
headers = {"Referer": URLs.PROFILE_BUSINESS_PROFILE.absolute}
response = make_request(Method.GET, url, session=session, headers=headers)
logging.debug("Supplier is on the Add Case Study - Basic page")
return response
def prepare_form_data(token: str, case_study: CaseStudy) -> dict:
"""Prepare form data based on the flags and custom values provided."""
data = {
"csrfmiddlewaretoken": token,
"case_study_wizard_create_view-current_step": "details",
"details-title": case_study.title,
"details-short_summary": case_study.summary,
"details-description": case_study.description,
"details-sector": case_study.sector,
"details-website": case_study.website,
"details-keywords": case_study.keywords,
}
return data
def submit(session: Session, token: str, case_study: CaseStudy) -> Response:
"""Submit the form with basic case study data."""
data = prepare_form_data(token, case_study)
headers = {"Referer": URL}
response = make_request(
Method.POST, URL, session=session, headers=headers, data=data
)
logging.debug("Supplier successfully submitted basic case study data: %s", data)
return response
| 35.765625 | 84 | 0.714723 |
e02944ff9ede745b9816c5794f8ae0b853eaa7df
| 751 |
py
|
Python
|
pip_api/_hash.py
|
sugatoray/pip-api
|
dec3a5e30c911b794763483ed985960a6732a40e
|
[
"Apache-2.0"
] | 81 |
2018-03-21T02:09:38.000Z
|
2022-02-11T09:30:13.000Z
|
pip_api/_hash.py
|
sugatoray/pip-api
|
dec3a5e30c911b794763483ed985960a6732a40e
|
[
"Apache-2.0"
] | 67 |
2018-09-27T16:03:02.000Z
|
2022-03-11T20:05:37.000Z
|
pip_api/_hash.py
|
sugatoray/pip-api
|
dec3a5e30c911b794763483ed985960a6732a40e
|
[
"Apache-2.0"
] | 15 |
2018-03-31T01:15:18.000Z
|
2022-03-10T08:13:23.000Z
|
import os
from pip_api._vendor.packaging.version import Version # type: ignore
import pip_api
from pip_api._call import call
from pip_api.exceptions import Incompatible, InvalidArguments
incompatible = pip_api.PIP_VERSION < Version("8.0.0")
def hash(filename: os.PathLike, algorithm: str = "sha256") -> str:
"""
Hash the given filename. Unavailable in `pip<8.0.0`
"""
if incompatible:
raise Incompatible
if algorithm not in ["sha256", "sha384", "sha512"]:
raise InvalidArguments("Algorithm {} not supported".format(algorithm))
result = call("hash", "--algorithm", algorithm, filename)
# result is of the form:
# <filename>:\n--hash=<algorithm>:<hash>\n
return result.strip().split(":")[-1]
| 27.814815 | 78 | 0.683089 |
4a136cfb44b6e5433294f2d54f886b3cedb65a0c
| 2,953 |
py
|
Python
|
tests/test_config.py
|
admdev8/darglint
|
d2d0f45861cfe7ed8d0a916eca181b144ed77cba
|
[
"MIT"
] | 1 |
2020-08-30T11:18:40.000Z
|
2020-08-30T11:18:40.000Z
|
tests/test_config.py
|
admdev8/darglint
|
d2d0f45861cfe7ed8d0a916eca181b144ed77cba
|
[
"MIT"
] | 4 |
2020-08-30T11:18:52.000Z
|
2020-08-30T12:18:17.000Z
|
tests/test_config.py
|
Smirenost/darglint
|
d2d0f45861cfe7ed8d0a916eca181b144ed77cba
|
[
"MIT"
] | null | null | null |
"""Tests configuration scripts."""
from random import (
choice,
randint,
)
from string import ascii_letters
from unittest import (
mock,
TestCase,
)
from darglint.config import (
walk_path,
POSSIBLE_CONFIG_FILENAMES,
find_config_file_in_path,
get_logger,
LogLevel,
)
from .utils import (
ConfigurationContext,
)
class WalkPathTestCase(TestCase):
"""Tests the walk_path function."""
@mock.patch('darglint.config.os.getcwd')
def test_at_root_yields_only_once(self, mock_getcwd):
"""We should only get root once. # noqa"""
mock_getcwd.return_value = '/'
path_walker = walk_path()
self.assertEqual(next(path_walker), '/')
with self.assertRaises(StopIteration):
next(path_walker)
@mock.patch('darglint.config.os.getcwd')
def test_really_long_path(self, mock_getcwd):
directories = [
''.join([
choice(ascii_letters + '_-')
for _ in range(randint(1, 10))
])
for __ in range(randint(10, 30))
]
cwd = '/' + '/'.join(directories)
mock_getcwd.return_value = cwd
path_walker = walk_path()
paths_walked = [x for x in path_walker]
self.assertEqual(
len(paths_walked),
len(directories) + 1,
'Should have had {} but had {} paths.'.format(
len(directories),
len(paths_walked) + 1,
)
)
class FindConfigFileInPathTestCase(TestCase):
"""Test that the config file is being found."""
@mock.patch('darglint.config.configparser.ConfigParser')
@mock.patch('darglint.config.os.listdir')
def test_filename_checked(self, mock_listdir, mock_ConfigParser):
"""Check that only the necessary filenames are identified. # noqa """
fake_files = [
''.join([choice(ascii_letters + '_-')
for _ in range(randint(5, 10))]) for _ in range(10)
]
mock_listdir.return_value = (
fake_files + list(POSSIBLE_CONFIG_FILENAMES)
)
config_parser = mock.MagicMock()
mock_ConfigParser.return_value = config_parser
contents_checked = list()
def read_file(filename):
contents_checked.append(filename)
return mock.MagicMock()
config_parser.read = read_file
find_config_file_in_path('./')
self.assertEqual(
set(contents_checked),
{'./' + x for x in POSSIBLE_CONFIG_FILENAMES}
)
class LoggingTestCase(TestCase):
def test_log_level_set_by_config(self):
with ConfigurationContext():
logger = get_logger()
self.assertEqual(logger.level, LogLevel.CRITICAL.value)
with ConfigurationContext(log_level=LogLevel.ERROR):
logger = get_logger()
self.assertEqual(logger.level, LogLevel.ERROR.value)
| 28.669903 | 78 | 0.608534 |
ddd32321d270aecd4ff03d0cf551220f76f0da3f
| 1,448 |
py
|
Python
|
lib/python/treadmill/tests/spawn/tree_test.py
|
vrautela/treadmill
|
05e47fa8acdf8bad7af78e737efb26ea6488de82
|
[
"Apache-2.0"
] | 133 |
2016-09-15T13:36:12.000Z
|
2021-01-18T06:29:13.000Z
|
lib/python/treadmill/tests/spawn/tree_test.py
|
vrautela/treadmill
|
05e47fa8acdf8bad7af78e737efb26ea6488de82
|
[
"Apache-2.0"
] | 108 |
2016-12-28T23:41:27.000Z
|
2020-03-05T21:20:37.000Z
|
lib/python/treadmill/tests/spawn/tree_test.py
|
evreng/treadmill
|
05e47fa8acdf8bad7af78e737efb26ea6488de82
|
[
"Apache-2.0"
] | 69 |
2016-09-23T20:38:58.000Z
|
2020-11-11T02:31:21.000Z
|
"""Unit test for treadmill.spawn.tree.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import shutil
import unittest
import mock
# Disable W0611: Unused import
import treadmill.tests.treadmill_test_skip_windows # pylint: disable=W0611
from treadmill import fs
from treadmill import supervisor
from treadmill import templates
from treadmill.spawn import tree as spawn_tree
class TreeTest(unittest.TestCase):
"""Tests for teadmill.spawn.tree."""
@mock.patch('os.listdir', mock.Mock())
@mock.patch('shutil.rmtree', mock.Mock())
@mock.patch('treadmill.fs.mkdir_safe', mock.Mock())
@mock.patch('treadmill.templates.create_script', mock.Mock())
@mock.patch('treadmill.supervisor.create_environ_dir', mock.Mock())
@mock.patch('treadmill.subproc.get_aliases', mock.Mock(return_value={}))
def test_create(self):
"""Tests creating tree."""
os.listdir.side_effect = [
['testing'], ['testing'],
]
tree = spawn_tree.Tree('/does/not/exist', 2, 5)
tree.create()
self.assertEqual(1, supervisor.create_environ_dir.call_count)
self.assertEqual(8, fs.mkdir_safe.call_count)
self.assertEqual(6, templates.create_script.call_count)
self.assertEqual(2, shutil.rmtree.call_count)
if __name__ == '__main__':
unittest.main()
| 28.96 | 76 | 0.71616 |
301abb7ac4fe4f26fae1c98f228a02bf2cdf8e92
| 5,163 |
py
|
Python
|
modules/preprocessing/text/NeMo/nemo_text_processing/text_normalization/ar/taggers/time.py
|
serkhanekarim/AI
|
0a13880ae8e608cd00fa819dc590097abdb7ae6e
|
[
"Apache-2.0"
] | null | null | null |
modules/preprocessing/text/NeMo/nemo_text_processing/text_normalization/ar/taggers/time.py
|
serkhanekarim/AI
|
0a13880ae8e608cd00fa819dc590097abdb7ae6e
|
[
"Apache-2.0"
] | null | null | null |
modules/preprocessing/text/NeMo/nemo_text_processing/text_normalization/ar/taggers/time.py
|
serkhanekarim/AI
|
0a13880ae8e608cd00fa819dc590097abdb7ae6e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.ar.graph_utils import (
NEMO_DIGIT,
GraphFst,
convert_space,
delete_space,
insert_space,
)
from nemo_text_processing.text_normalization.ar.utils import get_abs_path
try:
import pynini
from pynini.lib import pynutil
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
class TimeFst(GraphFst):
"""
Finite state transducer for classifying time, e.g.
12:30 a.m. est -> time { hours: "twelve" minutes: "thirty" suffix: "a m" zone: "e s t" }
2.30 a.m. -> time { hours: "two" minutes: "thirty" suffix: "a m" }
02.30 a.m. -> time { hours: "two" minutes: "thirty" suffix: "a m" }
2.00 a.m. -> time { hours: "two" suffix: "a m" }
2 a.m. -> time { hours: "two" suffix: "a m" }
02:00 -> time { hours: "two" }
2:00 -> time { hours: "two" }
10:00:05 a.m. -> time { hours: "ten" minutes: "zero" seconds: "five" suffix: "a m" }
Args:
cardinal: CardinalFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal: GraphFst, deterministic: bool = True):
super().__init__(name="time", kind="classify", deterministic=deterministic)
suffix_graph = pynini.string_file(get_abs_path("data/time_suffix.tsv"))
time_zone_graph = pynini.string_file(get_abs_path("data/time_zone.tsv"))
# only used for < 1000 thousand -> 0 weight
cardinal = cardinal.graph
labels_hour = [str(x) for x in range(0, 24)]
labels_minute_single = [str(x) for x in range(1, 10)]
labels_minute_double = [str(x) for x in range(10, 60)]
delete_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (
pynini.closure(pynutil.delete("0"), 0, 1) + NEMO_DIGIT
)
graph_hour = delete_leading_zero_to_double_digit @ pynini.union(*labels_hour) @ cardinal
graph_minute_single = pynini.union(*labels_minute_single) @ cardinal
graph_minute_double = pynini.union(*labels_minute_double) @ cardinal
final_graph_hour = pynutil.insert("hours: \"") + graph_hour + pynutil.insert("\"")
final_graph_minute = (
pynutil.insert("minutes: \"")
+ (pynini.cross("0", "o") + insert_space + graph_minute_single | graph_minute_double)
+ pynutil.insert("\"")
)
final_graph_second = (
pynutil.insert("seconds: \"")
+ (pynini.cross("0", "o") + insert_space + graph_minute_single | graph_minute_double)
+ pynutil.insert("\"")
)
final_suffix = pynutil.insert("suffix: \"") + convert_space(suffix_graph) + pynutil.insert("\"")
final_suffix_optional = pynini.closure(delete_space + insert_space + final_suffix, 0, 1)
final_time_zone_optional = pynini.closure(
delete_space
+ insert_space
+ pynutil.insert("zone: \"")
+ convert_space(time_zone_graph)
+ pynutil.insert("\""),
0,
1,
)
# 2:30 pm, 02:30, 2:00
graph_hm = (
final_graph_hour
+ pynutil.delete(":")
+ (pynutil.delete("00") | insert_space + final_graph_minute)
+ final_suffix_optional
+ final_time_zone_optional
)
# 10:30:05 pm,
graph_hms = (
final_graph_hour
+ pynutil.delete(":")
+ (pynini.cross("00", " minutes: \"zero\"") | insert_space + final_graph_minute)
+ pynutil.delete(":")
+ (pynini.cross("00", " seconds: \"zero\"") | insert_space + final_graph_second)
+ final_suffix_optional
+ final_time_zone_optional
)
# 2.xx pm/am
graph_hm2 = (
final_graph_hour
+ pynutil.delete(".")
+ (pynutil.delete("00") | insert_space + final_graph_minute)
+ delete_space
+ insert_space
+ final_suffix
+ final_time_zone_optional
)
# 2 pm est
graph_h = final_graph_hour + delete_space + insert_space + final_suffix + final_time_zone_optional
final_graph = (graph_hm | graph_h | graph_hm2 | graph_hms).optimize()
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| 38.819549 | 106 | 0.617277 |
2cdd588701a0bdac9dd8e327abf0ccdde6afeb5c
| 1,072 |
py
|
Python
|
runtests.py
|
violuke/django-readonly-field
|
ac6a3824e56e5e17492951aaf6deec8e5e3274d9
|
[
"MIT"
] | 12 |
2018-02-24T21:25:46.000Z
|
2021-02-01T06:19:26.000Z
|
runtests.py
|
violuke/django-readonly-field
|
ac6a3824e56e5e17492951aaf6deec8e5e3274d9
|
[
"MIT"
] | 6 |
2018-03-01T16:19:40.000Z
|
2022-01-17T15:08:05.000Z
|
runtests.py
|
violuke/django-readonly-field
|
ac6a3824e56e5e17492951aaf6deec8e5e3274d9
|
[
"MIT"
] | 6 |
2018-03-01T15:10:28.000Z
|
2021-12-16T21:18:01.000Z
|
#!/usr/bin/env python
import sys
import os
import contextlib
@contextlib.contextmanager
def cover():
do_coverage = "COVERAGE" in os.environ
if do_coverage:
import coverage
cov = coverage.Coverage(source=["django_readonly_field"])
cov.start()
print("Coverage will be generated")
try:
yield
finally:
if do_coverage:
cov.stop()
cov.save()
def run_tests(*test_args):
with cover():
try:
from django import setup
except ImportError:
import traceback
traceback.print_exc()
msg = ("To fix this error, run: "
"pip install -r requirements_test.txt")
raise ImportError(msg)
module = "tests.readonly_project.settings"
os.environ["DJANGO_SETTINGS_MODULE"] = module
setup()
from django.core.management import execute_from_command_line
execute_from_command_line(["", "test", ] + sys.argv[1:])
if __name__ == '__main__':
run_tests(*sys.argv[1:])
| 22.808511 | 68 | 0.596082 |
97034963e94ade02f5a804f39b5b0df02cc5c058
| 14,561 |
py
|
Python
|
sncosmo/spectral.py
|
sofiatti/sncosmo
|
ed0e231b5cff08ad24e420a43ba99c93c59722ca
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null |
sncosmo/spectral.py
|
sofiatti/sncosmo
|
ed0e231b5cff08ad24e420a43ba99c93c59722ca
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null |
sncosmo/spectral.py
|
sofiatti/sncosmo
|
ed0e231b5cff08ad24e420a43ba99c93c59722ca
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import abc
import math
from copy import deepcopy
import numpy as np
from astropy.utils import OrderedDict, lazyproperty
from astropy.io import ascii
import astropy.units as u
import astropy.constants as const
from astropy import cosmology
from . import registry
__all__ = ['get_bandpass', 'get_magsystem', 'read_bandpass', 'Bandpass',
'Spectrum', 'MagSystem', 'SpectralMagSystem', 'ABMagSystem']
def get_bandpass(name):
"""Get a Bandpass from the registry by name."""
return registry.retrieve(Bandpass, name)
def get_magsystem(name):
"""Get a MagSystem from the registery by name."""
return registry.retrieve(MagSystem, name)
def read_bandpass(fname, fmt='ascii', wave_unit=u.AA, name=None):
"""Read two-column bandpass. First column is assumed to be wavelength
in Angstroms."""
if fmt != 'ascii':
raise ValueError("format {0} not supported. Supported formats: 'ascii'"
.format(fmt))
t = ascii.read(fname, names=['wave', 'trans'])
return Bandpass(t['wave'], t['trans'], wave_unit=wave_unit, name=name)
class Bandpass(object):
"""Transmission as a function of spectral wavelength.
Parameters
----------
wave : list_like
Wavelength. Monotonically increasing values.
trans : list_like
Transmission fraction.
wave_unit : `~astropy.units.Unit` or str, optional
Wavelength unit. Default is Angstroms.
name : str, optional
Identifier. Default is `None`.
Examples
--------
>>> b = Bandpass([4000., 4200., 4400.], [0.5, 1.0, 0.5])
>>> b.wave
array([ 4000., 4200., 4400.])
>>> b.trans
array([ 0.5, 1. , 0.5])
>>> b.dwave
array([ 200., 200., 200.])
>>> b.wave_eff
4200.0
"""
def __init__(self, wave, trans, wave_unit=u.AA, name=None):
wave = np.asarray(wave, dtype=np.float64)
trans = np.asarray(trans, dtype=np.float64)
if wave.shape != trans.shape:
raise ValueError('shape of wave and trans must match')
if wave.ndim != 1:
raise ValueError('only 1-d arrays supported')
if wave_unit is not u.AA:
wave_unit = u.Unit(wave_unit)
wave = wave_unit.to(u.AA, wave, u.spectral())
# Check that values are monotonically increasing.
# We could sort them, but if this happens, it is more likely a user
# error or faulty bandpass definition. So we leave it to the user to
# sort them.
if not np.all(np.ediff1d(wave) > 0.):
raise ValueError('bandpass wavelength values must be monotonically'
' increasing when supplied in wavelength or '
'decreasing when supplied in energy/frequency.')
self.wave = wave
self._dwave = np.gradient(wave)
self.trans = trans
self.name = name
@property
def dwave(self):
"""Gradient of wavelengths, numpy.gradient(wave)."""
return self._dwave
@lazyproperty
def wave_eff(self):
"""Effective wavelength of bandpass in Angstroms."""
weights = self.trans * np.gradient(self.wave)
return np.sum(self.wave * weights) / np.sum(weights)
def to_unit(self, unit):
"""Return wavelength and transmission in new wavelength units.
If the requested units are the same as the current units, self is
returned.
Parameters
----------
unit : `~astropy.units.Unit` or str
Target wavelength unit.
Returns
-------
wave : `~numpy.ndarray`
trans : `~numpy.ndarray`
"""
if unit is u.AA:
return self.wave, self.trans
d = u.AA.to(unit, self.wave, u.spectral())
t = self.trans
if d[0] > d[-1]:
d = np.flipud(d)
t = np.flipud(t)
return d, t
def __repr__(self):
name = ''
if self.name is not None:
name = ' {0!r:s}'.format(self.name)
return "<Bandpass{0:s} at 0x{1:x}>".format(name, id(self))
class Spectrum(object):
"""A spectrum, representing wavelength and spectral density values.
Parameters
----------
wave : list_like
Wavelength values.
flux : list_like
Spectral flux density values.
error : list_like, optional
1 standard deviation uncertainty on flux density values.
wave_unit : `~astropy.units.Unit`
Units.
unit : `~astropy.units.BaseUnit`
For now, only units with flux density in energy (not photon counts).
z : float, optional
Redshift of spectrum (default is `None`)
dist : float, optional
Luminosity distance in Mpc, used to adjust flux upon redshifting.
The default is ``None``.
meta : OrderedDict, optional
Metadata.
"""
def __init__(self, wave, flux, error=None,
unit=(u.erg / u.s / u.cm**2 / u.AA), wave_unit=u.AA,
z=None, dist=None, meta=None):
self._wave = np.asarray(wave)
self._flux = np.asarray(flux)
self._wunit = wave_unit
self._unit = unit
self._z = z
self._dist = dist
if error is not None:
self._error = np.asarray(error)
if self._wave.shape != self._error.shape:
raise ValueError('shape of wavelength and variance must match')
else:
self._error = None
if meta is None:
self.meta = OrderedDict()
else:
self.meta = deepcopy(meta)
if self._wave.shape != self._flux.shape:
raise ValueError('shape of wavelength and flux must match')
if self._wave.ndim != 1:
raise ValueError('only 1-d arrays supported')
@property
def wave(self):
"""Wavelength values."""
return self._wave
@property
def flux(self):
"""Spectral flux density values"""
return self._flux
@property
def error(self):
"""Uncertainty on flux density."""
return self._error
@property
def wave_unit(self):
"""Units of wavelength."""
return self._wunit
@property
def unit(self):
"""Units of flux density."""
return self._unit
@property
def z(self):
"""Redshift of spectrum."""
return self._z
@z.setter
def z(self, value):
self._z = value
@property
def dist(self):
"""Distance to object in Mpc."""
return self._dist
@dist.setter
def dist(self, value):
self._dist = value
def bandflux(self, band):
"""Perform synthentic photometry in a given bandpass.
The bandpass transmission is interpolated onto the wavelength grid
of the spectrum. The result is a weighted sum of the spectral flux
density values (weighted by transmission values).
Parameters
----------
band : Bandpass object or name of registered bandpass.
Returns
-------
bandflux : float
Total flux in ph/s/cm^2. If part of bandpass falls
outside the spectrum, `None` is returned instead.
bandfluxerr : float
Error on flux. Only returned if the `error` attribute is not
`None`.
"""
band = get_bandpass(band)
bwave, btrans = band.to_unit(self._wunit)
if (bwave[0] < self._wave[0] or bwave[-1] > self._wave[-1]):
return None
mask = ((self._wave > bwave[0]) & (self._wave < bwave[-1]))
d = self._wave[mask]
f = self._flux[mask]
# First convert to ergs/s/cm^2/(wavelength unit)...
target_unit = u.erg / u.s / u.cm**2 / self._wunit
if self._unit != target_unit:
f = self._unit.to(target_unit, f,
u.spectral_density(self._wunit, d))
# Then convert ergs to photons: photons = Energy / (h * nu).
f = f / const.h.cgs.value / self._wunit.to(u.Hz, d, u.spectral())
trans = np.interp(d, bwave, btrans)
binw = np.gradient(d)
ftot = np.sum(f * trans * binw)
if self._error is None:
return ftot
else:
e = self._error[mask]
# Do the same conversion as above
if self._unit != target_unit:
e = self._unit.to(target_unit, e,
u.spectral_density(self._wunit, d))
e = e / const.h.cgs.value / self._wunit.to(u.Hz, d, u.spectral())
etot = np.sqrt(np.sum((e * binw) ** 2 * trans))
return ftot, etot
def redshifted_to(self, z, adjust_flux=False, dist=None, cosmo=None):
"""Return a new Spectrum object at a new redshift.
The current redshift must be defined (self.z cannot be `None`).
A factor of (1 + z) / (1 + self.z) is applied to the wavelength.
The inverse factor is applied to the flux so that the bolometric
flux (e.g., erg/s/cm^2) remains constant.
.. note:: Currently this only works for units in ergs.
Parameters
----------
z : float
Target redshift.
adjust_flux : bool, optional
If True, the bolometric flux is adjusted by
``F_out = F_in * (D_in / D_out) ** 2``, where ``D_in`` and
``D_out`` are current and target luminosity distances,
respectively. ``D_in`` is self.dist. If self.dist is ``None``,
the distance is calculated from the current redshift and
given cosmology.
dist : float, optional
Output distance in Mpc. Used to adjust bolometric flux if
``adjust_flux`` is ``True``. Default is ``None`` which means
that the distance is calculated from the redshift and the
cosmology.
cosmo : `~astropy.cosmology.Cosmology` instance, optional
The cosmology used to estimate distances if dist is not given.
Default is ``None``, which results in using the default
cosmology.
Returns
-------
spec : Spectrum object
A new spectrum object at redshift z.
"""
if self._z is None:
raise ValueError('Must set current redshift in order to redshift'
' spectrum')
if self._wunit.physical_type == u.m.physical_type:
factor = (1. + z) / (1. + self._z)
elif self._wunit.physical_type == u.Hz.physical_type:
factor = (1. + self._z) / (1. + z)
else:
raise ValueError('wavelength must be in wavelength or frequency')
d = self._wave * factor
f = self._flux / factor
if self._error is not None:
e = self._error / factor
else:
e = None
if adjust_flux:
if self._dist is None and self._z == 0.:
raise ValueError("When current redshift is 0 and adjust_flux "
"is requested, current distance must be "
"defined")
if dist is None and z == 0.:
raise ValueError("When redshift is 0 and adjust_flux "
"is requested, dist must be defined")
if cosmo is None:
cosmo = cosmology.get_current()
if self._dist is None:
dist_in = cosmo.luminosity_distance(self._z)
else:
dist_in = self._dist
if dist is None:
dist = cosmo.luminosity_distance(z)
if dist_in <= 0. or dist <= 0.:
raise ValueError("Distances must be greater than 0.")
# Adjust the flux
factor = (dist_in / dist) ** 2
f *= factor
if e is not None:
e *= factor
return Spectrum(d, f, error=e, z=z, dist=dist, meta=self.meta,
unit=self._unit, wave_unit=self._wunit)
class MagSystem(object):
"""An abstract base class for magnitude systems."""
__metaclass__ = abc.ABCMeta
def __init__(self, name=None):
self._zpbandflux = {}
self._name = name
@abc.abstractmethod
def _refspectrum_bandflux(self, band):
"""Flux of the fundamental spectrophotometric standard."""
pass
@property
def name(self):
"""Name of magnitude system."""
return self._name
@name.setter
def name(self, value):
self._name = value
def zpbandflux(self, band):
"""Flux of an object with magnitude zero in the given bandpass.
Parameters
----------
bandpass : `~sncosmo.spectral.Bandpass` or str
Returns
-------
bandflux : float
Flux in photons / s / cm^2.
"""
band = get_bandpass(band)
try:
return self._zpbandflux[band]
except KeyError:
bandflux = self._refspectrum_bandflux(band)
self._zpbandflux[band] = bandflux
return bandflux
def band_flux_to_mag(self, flux, band):
"""Convert flux (photons / s / cm^2) to magnitude."""
return -2.5 * math.log10(flux / self.zpbandflux(band))
def band_mag_to_flux(self, mag, band):
"""Convert magnitude to flux in photons / s / cm^2"""
return self.zpbandflux(band) * 10.**(-0.4 * mag)
class SpectralMagSystem(MagSystem):
"""A magnitude system defined by a fundamental spectrophotometric
standard.
Parameters
----------
refspectrum : `sncosmo.Spectrum`
The spectrum of the fundamental spectrophotometric standard.
"""
def __init__(self, refspectrum, name=None):
super(SpectralMagSystem, self).__init__(name)
self._refspectrum = refspectrum
def _refspectrum_bandflux(self, band):
return self._refspectrum.bandflux(band)
class ABMagSystem(MagSystem):
"""Magnitude system where a source with F_nu = 3631 Jansky at all
frequencies has magnitude 0 in all bands."""
def _refspectrum_bandflux(self, band):
bwave, btrans = band.to_unit(u.Hz)
# AB spectrum is 3631 x 10^{-23} erg/s/cm^2/Hz
# Get spectral values in photons/cm^2/s/Hz at bandpass wavelengths
# by dividing by (h \nu).
f = 3631.e-23 / const.h.cgs.value / bwave
binw = np.gradient(bwave)
return np.sum(f * btrans * binw)
| 31.313978 | 79 | 0.573518 |
fefe7e8c0c3536cbd5d88a1ad3e8d7dfa3ff17f0
| 5,782 |
py
|
Python
|
train-model.py
|
TanakitInt/FSRCNN-anime
|
51028294f0fdfefb84bc1118a0ecf7e27e8f42e5
|
[
"MIT"
] | null | null | null |
train-model.py
|
TanakitInt/FSRCNN-anime
|
51028294f0fdfefb84bc1118a0ecf7e27e8f42e5
|
[
"MIT"
] | null | null | null |
train-model.py
|
TanakitInt/FSRCNN-anime
|
51028294f0fdfefb84bc1118a0ecf7e27e8f42e5
|
[
"MIT"
] | null | null | null |
# import library
import sys
import keras
import tensorflow as tf
print("Python version : " + sys.version)
print("Keras version : " + keras.__version__)
# import model packages
from keras.models import Sequential
from keras.layers import Conv2D, Conv2DTranspose, Input, Activation, LeakyReLU
from keras.optimizers import SGD, Adam
from keras.callbacks import ModelCheckpoint
from tensorflow.keras.utils import plot_model
import numpy as np
import math
import os
import h5py
# import visualization packages
import json
import pydotplus
from matplotlib import pyplot as plt
from keras.utils.vis_utils import model_to_dot
keras.utils.vis_utils.pydot = pydotplus
#os.environ["PATH"] += os.pathsep + 'C:/Program Files/Graphviz 2.44.1/bin/'
# define the FSRCNN model
def model():
# define model type
FSRCNN = Sequential()
# add model layers
FSRCNN.add(Conv2D(filters=56, kernel_size = (5, 5), strides = (1, 1), kernel_initializer='glorot_uniform', padding='same', use_bias=True, input_shape=(None, None, 1)))
FSRCNN.add(LeakyReLU(alpha=0.1))
FSRCNN.add(Conv2D(filters=16, kernel_size = (1, 1), strides = (1, 1), kernel_initializer='glorot_uniform', padding='same', use_bias=True))
FSRCNN.add(LeakyReLU(alpha=0.1))
FSRCNN.add(Conv2D(filters=12, kernel_size = (3, 3), strides = (1, 1), kernel_initializer='glorot_uniform', padding='same', use_bias=True))
FSRCNN.add(LeakyReLU(alpha=0.1))
FSRCNN.add(Conv2D(filters=12, kernel_size = (3, 3), strides = (1, 1), kernel_initializer='glorot_uniform', padding='same', use_bias=True))
FSRCNN.add(LeakyReLU(alpha=0.1))
FSRCNN.add(Conv2D(filters=12, kernel_size = (3, 3), strides = (1, 1), kernel_initializer='glorot_uniform', padding='same', use_bias=True))
FSRCNN.add(LeakyReLU(alpha=0.1))
FSRCNN.add(Conv2D(filters=12, kernel_size = (3, 3), strides = (1, 1), kernel_initializer='glorot_uniform', padding='same', use_bias=True))
FSRCNN.add(LeakyReLU(alpha=0.1))
FSRCNN.add(Conv2D(filters=12, kernel_size = (3, 3), strides = (1, 1), kernel_initializer='glorot_uniform', padding='same', use_bias=True))
FSRCNN.add(LeakyReLU(alpha=0.1))
FSRCNN.add(Conv2D(filters=12, kernel_size = (3, 3), strides = (1, 1), kernel_initializer='glorot_uniform', padding='same', use_bias=True))
FSRCNN.add(LeakyReLU(alpha=0.1))
FSRCNN.add(Conv2D(filters=56, kernel_size = (1, 1), strides = (1, 1), kernel_initializer='glorot_uniform', padding='same', use_bias=True))
FSRCNN.add(LeakyReLU(alpha=0.1))
FSRCNN.add(Conv2DTranspose(filters=1, kernel_size = (9, 9), strides = (1, 1), kernel_initializer='glorot_uniform', padding='same', use_bias=True))
FSRCNN.add(Activation("sigmoid"))
model = FSRCNN
# dot_img_file = 'Diagram/fsrcnn-anime_model.png'
# tf.keras.utils.plot_model(model, to_file=dot_img_file, show_shapes=True, dpi=120)
# print("Saved model diagram.")
# define optimizer
adam = Adam(lr=0.003)
# compile model
FSRCNN.compile(optimizer=adam, loss='mse', metrics=['mean_squared_error'])
return FSRCNN
def read_training_data(file):
# read training data
with h5py.File(file, 'r') as hf:
data = np.array(hf.get('data'))
label = np.array(hf.get('label'))
train_data = np.transpose(data, (0, 2, 3, 1))
train_label = np.transpose(label, (0, 2, 3, 1))
return train_data, train_label
def train():
# ----------Training----------
fsrcnn_model = model()
#fsrcnn_model.load_weights("model-checkpoint/fsrcnn-anime-tanakitint-weights-improvement-00032.hdf5")
print(fsrcnn_model.summary())
DATA_TRAIN = "h5-dataset/train.h5"
DATA_TEST = "h5-dataset/test.h5"
CHECKPOINT_PATH = "model-checkpoint/fsrcnn-anime-tanakitint-weights-improvement-{epoch:05d}.hdf5"
ILR_train, HR_train = read_training_data(DATA_TRAIN)
ILR_test, HR_test = read_training_data(DATA_TEST)
# checkpoint
checkpoint = ModelCheckpoint(CHECKPOINT_PATH, monitor='mean_squared_error', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
# fit model
history = fsrcnn_model.fit(ILR_train, HR_train, epochs=25, batch_size=32, callbacks=callbacks_list, validation_data=(ILR_test, HR_test))
# save h5 model
fsrcnn_model.save("my_model-fsrcnn-anime-tanakitint.h5")
print("Saved h5 model to disk")
# ----------Visualization----------
# training visualization
training_data = history.history
print(training_data.keys())
# text file
f = open('Diagram/training.txt', 'w')
f.write(str(training_data))
f.close()
# json file
f = open('Diagram/training.json', 'w')
training_data = str(training_data)
f.write(str(training_data.replace("\'", "\"")))
f.close()
print("Training Data Saved.")
# summarize history for val_loss
fig = plt.figure()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('val_loss')
plt.ylabel('val_loss')
plt.xlabel('epoch')
plt.legend(['train', 'validate'], loc='upper right')
# save fig and show
plt.savefig('Diagram/fsrcnn-anime_model_loss.png', dpi=120)
plt.show()
print("Training Fig Saved.")
# summarize history for val_mean_squared_error
fig = plt.figure()
plt.plot(history.history['mean_squared_error'])
plt.plot(history.history['val_mean_squared_error'])
plt.title('val_mean_squared_error')
plt.ylabel('val_mean_squared_error')
plt.xlabel('epoch')
plt.legend(['train', 'validate'], loc='upper right')
# save fig and show
plt.savefig('Diagram/fsrcnn-anime_model_mean_squared_error.png', dpi=120)
plt.show()
print("Training Fig Saved.")
if __name__ == "__main__":
train()
| 33.421965 | 171 | 0.689727 |
fd8112e7e5a14a0fbe90d6dc695b4413f99e12a3
| 360 |
py
|
Python
|
eLegislative/elegislative_app/migrations/0033_remove_olddocumentsmodel_last_modified_date.py
|
lloydgarcia77/eLMS_San_Pedro_Laguna
|
dd6b59ba971301a8af40f262db1f651b7a7a6e0f
|
[
"MIT"
] | null | null | null |
eLegislative/elegislative_app/migrations/0033_remove_olddocumentsmodel_last_modified_date.py
|
lloydgarcia77/eLMS_San_Pedro_Laguna
|
dd6b59ba971301a8af40f262db1f651b7a7a6e0f
|
[
"MIT"
] | null | null | null |
eLegislative/elegislative_app/migrations/0033_remove_olddocumentsmodel_last_modified_date.py
|
lloydgarcia77/eLMS_San_Pedro_Laguna
|
dd6b59ba971301a8af40f262db1f651b7a7a6e0f
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.5 on 2021-02-11 21:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('elegislative_app', '0032_auto_20210211_2018'),
]
operations = [
migrations.RemoveField(
model_name='olddocumentsmodel',
name='last_modified_date',
),
]
| 20 | 56 | 0.627778 |
86080a57a063a6316ad05479509eec3fb043dbf3
| 6,196 |
py
|
Python
|
kubernetes_asyncio/client/api/apiregistration_api.py
|
tomplus/kubernetes-asyncio
|
11c3eb4d50ae822545572aa7b8c15f7153f65a1c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/api/apiregistration_api.py
|
tomplus/kubernetes-asyncio
|
11c3eb4d50ae822545572aa7b8c15f7153f65a1c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/api/apiregistration_api.py
|
tomplus/kubernetes-asyncio
|
11c3eb4d50ae822545572aa7b8c15f7153f65a1c
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.23.6
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from kubernetes_asyncio.client.api_client import ApiClient
from kubernetes_asyncio.client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class ApiregistrationApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_api_group(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: V1APIGroup
"""
kwargs['_return_http_data_only'] = True
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_group" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = dict(local_var_params.get('_headers', {}))
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
response_types_map = {
200: "V1APIGroup",
401: None,
}
return self.api_client.call_api(
'/apis/apiregistration.k8s.io/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
| 37.325301 | 124 | 0.599096 |
f3a345e3ceb2fa198283370ff3ae6bed5aa9fcf9
| 1,718 |
py
|
Python
|
tests/core/test_container.py
|
tim-sueberkrueb/grout
|
b19bce094e97464087390206eabbc5e770d61799
|
[
"MIT"
] | null | null | null |
tests/core/test_container.py
|
tim-sueberkrueb/grout
|
b19bce094e97464087390206eabbc5e770d61799
|
[
"MIT"
] | 12 |
2017-03-28T11:22:40.000Z
|
2017-06-14T12:39:21.000Z
|
tests/core/test_container.py
|
tim-sueberkrueb/baka
|
b19bce094e97464087390206eabbc5e770d61799
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os.path
import pytest
from baka.core import Project
from baka.core import Container, NotReadyError
class TestContainer:
_test_backend_options = {
'name': 'test-case-container'
}
_temp_dir = os.path.join('/tmp', 'baka-tests')
def test_options(self):
p = Project()
c = Container(
p, backend_options=self._test_backend_options
)
assert c.name == self._test_backend_options['name']
assert not c.ready
def test_run(self):
# Setup
p = Project()
c = Container(
p, backend_options=self._test_backend_options
)
c.init()
assert c.ready
c.setup()
# Test push/pull & exec
filepath = os.path.join(self._temp_dir, 'test.txt')
if not os.path.isdir(self._temp_dir):
os.mkdir(self._temp_dir)
with open(filepath, 'w') as file:
file.write('lorem ipsum dolor sit amet')
c.push(filepath, '/home/baka/')
c.exec('mv', '/home/baka/test.txt', '/home/baka/test2.txt')
c.pull('/home/baka/test2.txt', self._temp_dir)
assert os.path.isfile(os.path.join(self._temp_dir, 'test2.txt'))
# Finish
c.perform()
c.finish()
# Destroy
c.destroy()
assert not c.ready
def test_not_ready(self):
p = Project()
c = Container(p, backend_options=self._test_backend_options)
assert not c.ready
methods = (
c.run, c.setup, c.perform, c.finish,
c.destroy, c.exec, c.push, c.pull
)
for m in methods:
with pytest.raises(NotReadyError):
m()
| 26.84375 | 72 | 0.563446 |
ad675f3e92b942dae14db67efd73c7954439ad26
| 5,647 |
py
|
Python
|
basicsr/models/archs/dfdnet_util.py
|
My-Zhu/BasicSR
|
c26dd943191a3c2528e1b924c366acb221ead86f
|
[
"Apache-2.0"
] | 1,421 |
2019-04-10T14:38:55.000Z
|
2022-03-28T02:32:17.000Z
|
basicsr/models/archs/dfdnet_util.py
|
My-Zhu/BasicSR
|
c26dd943191a3c2528e1b924c366acb221ead86f
|
[
"Apache-2.0"
] | 198 |
2019-05-20T06:34:21.000Z
|
2022-01-28T13:06:47.000Z
|
basicsr/models/archs/dfdnet_util.py
|
My-Zhu/BasicSR
|
c26dd943191a3c2528e1b924c366acb221ead86f
|
[
"Apache-2.0"
] | 353 |
2019-05-11T16:07:34.000Z
|
2022-03-29T12:01:34.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.spectral_norm as SpectralNorm
from torch.autograd import Function
class BlurFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, kernel, kernel_flip):
ctx.save_for_backward(kernel, kernel_flip)
grad_input = F.conv2d(
grad_output, kernel_flip, padding=1, groups=grad_output.shape[1])
return grad_input
@staticmethod
def backward(ctx, gradgrad_output):
kernel, kernel_flip = ctx.saved_tensors
grad_input = F.conv2d(
gradgrad_output,
kernel,
padding=1,
groups=gradgrad_output.shape[1])
return grad_input, None, None
class BlurFunction(Function):
@staticmethod
def forward(ctx, x, kernel, kernel_flip):
ctx.save_for_backward(kernel, kernel_flip)
output = F.conv2d(x, kernel, padding=1, groups=x.shape[1])
return output
@staticmethod
def backward(ctx, grad_output):
kernel, kernel_flip = ctx.saved_tensors
grad_input = BlurFunctionBackward.apply(grad_output, kernel,
kernel_flip)
return grad_input, None, None
blur = BlurFunction.apply
class Blur(nn.Module):
def __init__(self, channel):
super().__init__()
kernel = torch.tensor([[1, 2, 1], [2, 4, 2], [1, 2, 1]],
dtype=torch.float32)
kernel = kernel.view(1, 1, 3, 3)
kernel = kernel / kernel.sum()
kernel_flip = torch.flip(kernel, [2, 3])
self.kernel = kernel.repeat(channel, 1, 1, 1)
self.kernel_flip = kernel_flip.repeat(channel, 1, 1, 1)
def forward(self, x):
return blur(x, self.kernel.type_as(x), self.kernel_flip.type_as(x))
def calc_mean_std(feat, eps=1e-5):
"""Calculate mean and std for adaptive_instance_normalization.
Args:
feat (Tensor): 4D tensor.
eps (float): A small value added to the variance to avoid
divide-by-zero. Default: 1e-5.
"""
size = feat.size()
assert len(size) == 4, 'The input feature should be 4D tensor.'
n, c = size[:2]
feat_var = feat.view(n, c, -1).var(dim=2) + eps
feat_std = feat_var.sqrt().view(n, c, 1, 1)
feat_mean = feat.view(n, c, -1).mean(dim=2).view(n, c, 1, 1)
return feat_mean, feat_std
def adaptive_instance_normalization(content_feat, style_feat):
"""Adaptive instance normalization.
Adjust the reference features to have the similar color and illuminations
as those in the degradate features.
Args:
content_feat (Tensor): The reference feature.
style_feat (Tensor): The degradate features.
"""
size = content_feat.size()
style_mean, style_std = calc_mean_std(style_feat)
content_mean, content_std = calc_mean_std(content_feat)
normalized_feat = (content_feat -
content_mean.expand(size)) / content_std.expand(size)
return normalized_feat * style_std.expand(size) + style_mean.expand(size)
def AttentionBlock(in_channel):
return nn.Sequential(
SpectralNorm(nn.Conv2d(in_channel, in_channel, 3, 1, 1)),
nn.LeakyReLU(0.2, True),
SpectralNorm(nn.Conv2d(in_channel, in_channel, 3, 1, 1)))
def conv_block(in_channels,
out_channels,
kernel_size=3,
stride=1,
dilation=1,
bias=True):
"""Conv block used in MSDilationBlock."""
return nn.Sequential(
SpectralNorm(
nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) // 2) * dilation,
bias=bias)),
nn.LeakyReLU(0.2),
SpectralNorm(
nn.Conv2d(
out_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) // 2) * dilation,
bias=bias)),
)
class MSDilationBlock(nn.Module):
"""Multi-scale dilation block."""
def __init__(self,
in_channels,
kernel_size=3,
dilation=[1, 1, 1, 1],
bias=True):
super(MSDilationBlock, self).__init__()
self.conv_blocks = nn.ModuleList()
for i in range(4):
self.conv_blocks.append(
conv_block(
in_channels,
in_channels,
kernel_size,
dilation=dilation[i],
bias=bias))
self.conv_fusion = SpectralNorm(
nn.Conv2d(
in_channels * 4,
in_channels,
kernel_size=kernel_size,
stride=1,
padding=(kernel_size - 1) // 2,
bias=bias))
def forward(self, x):
out = []
for i in range(4):
out.append(self.conv_blocks[i](x))
out = torch.cat(out, 1)
out = self.conv_fusion(out) + x
return out
class UpResBlock(nn.Module):
def __init__(self, in_channel):
super(UpResBlock, self).__init__()
self.body = nn.Sequential(
nn.Conv2d(in_channel, in_channel, 3, 1, 1),
nn.LeakyReLU(0.2, True),
nn.Conv2d(in_channel, in_channel, 3, 1, 1),
)
def forward(self, x):
out = x + self.body(x)
return out
| 30.197861 | 77 | 0.571808 |
c098ddb2123bc32c9ffaf98cd013b18e416f068c
| 797 |
py
|
Python
|
phr/models.py
|
kwzofc/phr
|
04e87daf182783c99191a320c3d83f5abdd60da1
|
[
"MIT"
] | null | null | null |
phr/models.py
|
kwzofc/phr
|
04e87daf182783c99191a320c3d83f5abdd60da1
|
[
"MIT"
] | null | null | null |
phr/models.py
|
kwzofc/phr
|
04e87daf182783c99191a320c3d83f5abdd60da1
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class PHR(models.Model):
user_name = models.ForeignKey(User, on_delete=models.CASCADE)
first_name = models.CharField(max_length=30, blank=False)
last_name = models.CharField(max_length=30, blank=False)
email = models.EmailField(blank=True)
tel_number = models.CharField(max_length=10, blank=False)
citizen_id = models.CharField(max_length=13, blank=False)
weight = models.FloatField(blank=False)
height = models.FloatField(blank=False)
pressure = models.CharField(max_length=10, blank=True)
blood_group = models.CharField(max_length=5, blank=True)
created = models.DateTimeField(auto_now_add=True)
last_updated = models.DateTimeField(auto_now=True)
| 46.882353 | 65 | 0.761606 |
aad870ce4120c080fe6a89202f35eb5ed995c76a
| 4,277 |
py
|
Python
|
metrics/bleurt/bleurt.py
|
yashmaherwal/datasets
|
fe52b678819d1f41a4ff4b5994c9ff53324bf4fe
|
[
"Apache-2.0"
] | 7 |
2021-01-04T22:18:26.000Z
|
2021-07-10T09:13:29.000Z
|
metrics/bleurt/bleurt.py
|
yashmaherwal/datasets
|
fe52b678819d1f41a4ff4b5994c9ff53324bf4fe
|
[
"Apache-2.0"
] | null | null | null |
metrics/bleurt/bleurt.py
|
yashmaherwal/datasets
|
fe52b678819d1f41a4ff4b5994c9ff53324bf4fe
|
[
"Apache-2.0"
] | 3 |
2021-09-19T08:20:42.000Z
|
2022-02-19T16:32:40.000Z
|
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" BLEURT metric. """
import os
from logging import getLogger
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
logger = getLogger(__name__)
_CITATION = """\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
"""
_DESCRIPTION = """\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the [README.md] file at https://github.com/google-research/bleurt for more information.
"""
_KWARGS_DESCRIPTION = """
BLEURT score.
Args:
predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
checkpoint: BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
'scores': List of scores.
"""
CHECKPOINT_URLS = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
}
class BLEURT(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
homepage="https://github.com/google-research/bleurt",
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Value("string", id="sequence"),
}
),
codebase_urls=["https://github.com/google-research/bleurt"],
reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"],
)
def _download_and_prepare(self, dl_manager):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512')."
)
self.config_name = "bleurt-base-128"
if self.config_name not in CHECKPOINT_URLS.keys():
raise KeyError(
f"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}"
)
# download the model checkpoint specified by self.config_name and set up the scorer
model_path = dl_manager.download_and_extract(CHECKPOINT_URLS[self.config_name])
self.scorer = score.BleurtScorer(os.path.join(model_path, self.config_name))
def _compute(self, predictions, references):
scores = self.scorer.score(references=references, candidates=predictions)
return {"scores": scores}
| 39.971963 | 180 | 0.695815 |
4a71483e17ff91093b8af7655b7679eba3dc4743
| 2,366 |
py
|
Python
|
web/apps/admin/mail.py
|
JW709/zoom
|
3b26a22e569bf44a9856b587771589413b52e81b
|
[
"MIT"
] | null | null | null |
web/apps/admin/mail.py
|
JW709/zoom
|
3b26a22e569bf44a9856b587771589413b52e81b
|
[
"MIT"
] | null | null | null |
web/apps/admin/mail.py
|
JW709/zoom
|
3b26a22e569bf44a9856b587771589413b52e81b
|
[
"MIT"
] | 1 |
2019-02-06T16:10:56.000Z
|
2019-02-06T16:10:56.000Z
|
"""
admin.mail
mail queue viewer
"""
import uuid
import zoom
from zoom.context import context
from zoom.mvc import View, Controller
from zoom.mail import get_mail_store, send, Attachment
from zoom.browse import browse
import zoom.fields as f
from zoom.forms import Form
from zoom.components import success
from zoom.tools import home
from zoom.alerts import success
mail_form = Form([
f.TextField('Recipient', size=60, maxlength=60, default=(context.user.email)),
f.TextField('Subject', default='a subject ' + uuid.uuid4().hex),
f.MemoField('Message', value='this is the message body\n' + uuid.uuid4().hex),
# f.FileField('Attachment'),
f.ButtonField('Send'),
])
class MyView(View):
def index(self):
actions = ['Compose']
site = zoom.system.request.site
mail_settings = ' '.join([
'%s: %s' % (k, v) for k, v in dict(
host=site.smtp_host,
user=site.smtp_user,
port=site.smtp_port,
passwd=('*' * (len(site.smtp_passwd) - 2)) + site.smtp_passwd[-2:],
).items() if v
])
content = mail_settings + '<h2>Waiting</h2>' + browse(get_mail_store(context.site))
return zoom.page(content, title='Mail', actions=actions)
def compose(self):
site = zoom.system.request.site
return zoom.page(content='Send mail as "{} <{}>"<br><br>{}'.format(
site.mail_from_name,
site.mail_from_addr,
mail_form.edit(),
), title='Send Mail')
class MyController(Controller):
def send_button(self, *args, **input):
if mail_form.validate(input):
if False and 'attachment' in input and hasattr(input['attachment'], 'filename'):
send(
input['recipient'],
input['subject'],
input['message'],
[Attachment(
input['attachment'].filename,
input['attachment'].file,
)],
)
success('message sent with attachment')
else:
send(input['recipient'], input['subject'], input['message'])
success('message sent')
return home('mail')
view = MyView()
controller = MyController()
| 29.949367 | 92 | 0.565089 |
3311e73e3186d8ad8606863ecb7b939622cdd3d0
| 370 |
py
|
Python
|
AdvancedPythonObjectsandDataStructures/advanced_dictonaries.py
|
theprogrammingthinker/Python-practice
|
fef11a7fbd5082a0614b01f88a13ea29d68860bf
|
[
"Unlicense"
] | 1 |
2017-05-02T10:28:36.000Z
|
2017-05-02T10:28:36.000Z
|
AdvancedPythonObjectsandDataStructures/advanced_dictonaries.py
|
theprogrammingthinker/Python-practice
|
fef11a7fbd5082a0614b01f88a13ea29d68860bf
|
[
"Unlicense"
] | null | null | null |
AdvancedPythonObjectsandDataStructures/advanced_dictonaries.py
|
theprogrammingthinker/Python-practice
|
fef11a7fbd5082a0614b01f88a13ea29d68860bf
|
[
"Unlicense"
] | null | null | null |
d = {"k": 1, "k2": 2}
print({x: x ** 2 for x in range(10)})
# {0: 0, 1: 1, 2: 4, 3: 9, 4: 16, 5: 25, 6: 36, 7: 49, 8: 64, 9: 81}
var = {k: v ** 2 for k, v in zip(['a', 'b', 'c'], range(10))}
print(var)
# {'a': 0, 'b': 1}
for k in d.items():
print(k)
for k in d.keys():
print(k)
for k in d.values():
print(k)
print(d.values())
print(d.keys())
print(d)
| 16.086957 | 68 | 0.467568 |
d1923b1d1375f7198d2174f2de3050d89dcf29f4
| 870 |
py
|
Python
|
app/models/constants.py
|
cmd-ntrf/mc-hub
|
a636c9019d2af29727c95f5a13ade83f89de3821
|
[
"BSD-3-Clause"
] | null | null | null |
app/models/constants.py
|
cmd-ntrf/mc-hub
|
a636c9019d2af29727c95f5a13ade83f89de3821
|
[
"BSD-3-Clause"
] | null | null | null |
app/models/constants.py
|
cmd-ntrf/mc-hub
|
a636c9019d2af29727c95f5a13ade83f89de3821
|
[
"BSD-3-Clause"
] | null | null | null |
from os import path, environ
# Regular constants
INSTANCE_CATEGORIES = ["mgmt", "login", "node"]
STORAGE_SPACES = ["home", "project", "scratch"]
AUTO_ALLOCATED_IP_LABEL = "Automatic allocation"
# Magic Castle
MAGIC_CASTLE_MODULE_SOURCE = "git::https://github.com/ComputeCanada/magic_castle.git"
MAGIC_CASTLE_PUPPET_CONFIGURATION_URL = (
"https://github.com/ComputeCanada/puppet-magic_castle.git"
)
MAGIC_CASTLE_VERSION_TAG = "10.2"
TERRAFORM_REQUIRED_VERSION = ">= 0.14.2"
# Paths and filenames
CLUSTERS_PATH = path.join(environ["HOME"], "clusters")
APP_PATH = path.join(environ["HOME"], "app")
DATABASE_PATH = path.join(environ["HOME"], "database", "database.db")
SCHEMA_MIGRATIONS_DIRECTORY = path.join(APP_PATH, "database", "migrations")
TERRAFORM_STATE_FILENAME = "terraform.tfstate"
CONFIGURATION_FILE_PATH = path.join(environ["HOME"], "configuration.json")
| 37.826087 | 85 | 0.765517 |
c0265f2e2fbbe36f26504539e7373d4d76447e4a
| 1,889 |
py
|
Python
|
rest_api_framework/models/validators.py
|
boblefrag/python-rest-api-framework
|
0caf33e4b07dbcce3a1cd891c64fdb359d88eb33
|
[
"MIT"
] | 7 |
2015-05-31T08:35:24.000Z
|
2022-01-29T21:09:19.000Z
|
rest_api_framework/models/validators.py
|
boblefrag/python-rest-api-framework
|
0caf33e4b07dbcce3a1cd891c64fdb359d88eb33
|
[
"MIT"
] | 1 |
2017-05-24T11:56:46.000Z
|
2017-05-24T11:56:46.000Z
|
rest_api_framework/models/validators.py
|
boblefrag/python-rest-api-framework
|
0caf33e4b07dbcce3a1cd891c64fdb359d88eb33
|
[
"MIT"
] | 13 |
2015-07-15T06:29:53.000Z
|
2022-03-28T17:54:39.000Z
|
"""
Validators to check the values of Fields instances
"""
from abc import ABCMeta, abstractmethod
class Validator(object):
"""
Base Validator class
Used to validate data format
"""
__metaclass__ = ABCMeta
@abstractmethod
def validate(self, field, *args):
"""
Method to validate that a field is formated as expected or is
of correct type/class
"""
raise NotImplementedError
class IntegerValidator(Validator):
"""
Validate that a value is of type int
"""
def validate(self, field):
"""
Check if field is an instance of type 'int'
"""
if isinstance(field, int):
return True
return False
class StringValidator(Validator):
"""
Validate that a value is of type basestring (either str or unicode)
"""
def validate(self, field):
if isinstance(field, basestring):
return True
return False
class FloatValidator(Validator):
"""
Validate that a value is of float type
"""
def validate(self, field):
if isinstance(field, float):
return True
return False
class SQLiteForeign(Validator):
"""
Validate that the foreign row exists
"""
need_datastore = True
def __init__(self, **options):
self.options = options
def validate(self, field, datastore):
cursor = datastore.conn.cursor()
cursor.execute("SELECT * FROM sqlite_master WHERE type='table';")
# cursor = datastore.conn.cursor()
query = "SELECT {0} FROM {1} WHERE {2}=?".format(
self.options["foreign"]["column"],
self.options["foreign"]["table"],
self.options["foreign"]["column"],
)
cursor.execute(query, (field, ))
if cursor.fetchone():
return True
return False
| 22.759036 | 73 | 0.592906 |
85bc38de8ebd16eeacb3db8743290425cf927488
| 11,696 |
py
|
Python
|
api/tests.py
|
hyusuf4/FriendZone2
|
342a2f86295341ea98bbb9dd596ef823eb509962
|
[
"Apache-2.0"
] | null | null | null |
api/tests.py
|
hyusuf4/FriendZone2
|
342a2f86295341ea98bbb9dd596ef823eb509962
|
[
"Apache-2.0"
] | null | null | null |
api/tests.py
|
hyusuf4/FriendZone2
|
342a2f86295341ea98bbb9dd596ef823eb509962
|
[
"Apache-2.0"
] | null | null | null |
from django.test import TestCase
from django.utils import timezone
from .models import Author, FriendRequest, Friends,Post,Comment, Following
from django.test import Client
from django.urls import reverse
from django.db.models import Q
import json
""""""
from api.models import Author, FriendRequest, Friends,Post,Comment
from api.serializers import AuthorSerializer, FriendRequestSerializer, FriendsSerializer,PostSerializer,CommentSerializer, FollowingSerializer
from rest_framework import status
from rest_framework.decorators import api_view,permission_classes
from rest_framework.response import Response
from rest_framework.parsers import JSONParser
from django.utils.timezone import get_current_timezone, make_aware
from django.core import serializers
from django.utils.dateparse import parse_datetime
from rest_framework.permissions import IsAuthenticated
import sys
import unittest
from django.utils import timezone
import pytz
""""""
from .views import enroll_following, make_them_friends, unfollow, friend_request_to_remote
# Create your tests here.
def create_author(f_name="A", l_name="B", u_name="101", pwd=101):
return Author.objects.create(\
firstName=f_name,\
lastName=l_name,\
userName=u_name,\
password=pwd
)
def create_friend_request(author_one, author_two):
return FriendRequest.objects.create(\
from_author=author_one,\
to_author=author_two,\
created=timezone.now()
)
class GetAllAuthorListViewTest(TestCase):
def test_get_authors(self):
data = {'username': 'u3','password': 'u3', 'email':'[email protected]'}
response = self.client.post(reverse('api:signup'), data=data, format='json')
body = response.content.decode('utf-8')
body = json.loads(body)
credentials = body.get('token')
self.client.defaults['HTTP_AUTHORIZATION'] = 'Token ' + credentials
response = self.client.post('api:authors_list')
print(00000000, response.content, 11111111)
self.assertEqual(response.status_code, 404)
class SignupViewTest(TestCase):
def test_signup(self):
# response = self.client.login(username="admin", password="admin")
data = {'username': 'u3','password': 'u3', 'email':'[email protected]'}
response = self.client.post(reverse('api:signup'), data=data, format='json')
self.assertEqual(response.status_code, 200)
class LoginViewTest(TestCase):
def test_login_inactive_user(self):
# login first
data = {'username': 'u3','password': 'u3', 'email':'[email protected]'}
response = self.client.post(reverse('api:signup'), data=data, format='json')
body = response.content.decode('utf-8')
body = json.loads(body)
credentials = body.get('token')
data = {'username': 'u3','password': 'u3'}
self.client.defaults['HTTP_AUTHORIZATION'] = 'Token ' + credentials
response = self.client.post(reverse('api:login'), data=data, format='json')
# print(11111111111,response, 222222222222)
self.assertEqual(response.status_code, 401)
def test_login_active_user(self):
data = {'username': 'u3','password': 'u3', 'email':'[email protected]'}
response = self.client.post(reverse('api:signup'), data=data, format='json')
# body = JSONParser().parse(response.content.decode('utf-8'))
body = response.content.decode('utf-8')
body = json.loads(body)
credentials = body.get('token')
data = {'username': 'u3','password': 'u3'}
self.client.defaults['HTTP_AUTHORIZATION'] = 'Token ' + credentials
response = self.client.post(reverse('api:login'), data=data, format='json')
# print(11111111111,response, 222222222222)
self.assertEqual(response.status_code, 401)
class FriendRequestViewTests(TestCase):
# def test_create_first_frequest(self):
# a1 = create_author(f_name="a1", l_name="a1", u_name="101", pwd=101)
# a1.save()
# a2 = create_author(f_name="a2", l_name="a2", u_name="102", pwd=101)
# a2.save()
# fr = create_friend_request(a1, a2)
# fr.save()
# make_them_friends(a1, a2, fr)
#
# try:
# check_user = Author.objects.get(pk=a2.pk)
# #print("Saved")
# except Exception as e:
# print("Error!!")
#
# # no friend request
# result0 = False
# try:
# result0 = FriendRequest.objects.get(pk=fr.pk)
# except FriendRequest.DoesNotExist:
# result0 = False
# self.assertFalse(result0)
# # have entry in friends
# result = False
# try:
# result = Friends.objects.filter( Q(author1=a1, author2=a2) | Q(author2=a1, author1=a2)).exists()
#
# self.assertTrue(result)
# # print(111,Friends.objects.filter( Q(author1=a1), Q(author2=a2) | Q(author2=a1), Q(author1=a2)),222)
# except Friends.DoesNotExist:
# result = False
# self.assertTrue(result)
#
# def test_create_duplicate_frequest(self):
# a1 = create_author(f_name="a1", l_name="a1", u_name="101", pwd=101)
# a1.save()
# a2 = create_author(f_name="a2", l_name="a2", u_name="102", pwd=101)
# a2.save()
# fr = create_friend_request(a1, a2)
# fr.save()
# make_them_friends(a1, a2, fr)
#
# try:
# check_user = Author.objects.get(pk=a2.pk)
# #print("Saved")
# except Exception as e:
# print("Error!!")
#
# # no friend request
# result0 = False
# try:
# result0 = FriendRequest.objects.get(pk=fr.pk)
# except FriendRequest.DoesNotExist:
# result0 = False
# self.assertFalse(result0)
# # have entry in friends
# result = False
# try:
# result = Friends.objects.filter( Q(author1=a1, author2=a2) | Q(author2=a1, author1=a2)).exists()
#
# self.assertTrue(result)
# # print(111,Friends.objects.filter( Q(author1=a1), Q(author2=a2) | Q(author2=a1), Q(author1=a2)),222)
# except Friends.DoesNotExist:
# result = False
# self.assertTrue(result)
# def test_make_friends(self):
# a1 = create_author(f_name="a1", l_name="a1", u_name="101", pwd=101)
# a1.save()
# a2 = create_author(f_name="a2", l_name="a2", u_name="102", pwd=101)
# a2.save()
# fr = create_friend_request(a1, a2)
# fr.save()
# make_them_friends(a1, a2, fr)
#
# try:
# check_user = Author.objects.get(pk=a2.pk)
# #print("Saved")
# except Exception as e:
# print("Error!!")
#
# # no friend request
# result0 = False
# try:
# result0 = FriendRequest.objects.get(pk=fr.pk)
# except FriendRequest.DoesNotExist:
# result0 = False
# self.assertFalse(result0)
# # have entry in friends
# result = False
# try:
# result = Friends.objects.filter( Q(author1=a1, author2=a2) | Q(author2=a1, author1=a2)).exists()
#
# self.assertTrue(result)
# # print(111,Friends.objects.filter( Q(author1=a1), Q(author2=a2) | Q(author2=a1), Q(author1=a2)),222)
# except Friends.DoesNotExist:
# result = False
# self.assertTrue(result)
#
pass
class CheckFriendshipViewTests(TestCase):
# def test_existing_friendship(self):
# response = self.client.get(reverse('api:author/<authorid>/friends/<authorid2>/', kwargs={}))
pass
class FriendResultViewTests(TestCase):
pass
class UnfriendViewTests(TestCase):
pass
class RemoteServerTests(TestCase):
def test_friend_request_to_remote(self):
print("TEST REMOTE")
a1 = create_author(f_name="a1", l_name="a1", u_name="101", pwd=101)
a1.save()
a2 = create_author(f_name="a2", l_name="a2", u_name="102", pwd=101)
a2.save()
temp_dict = {"from_author":a1.author_id, "to_author":a2.author_id}
# print(temp_dict)
friend_request_to_remote(temp_dict)
print("TEST REMOTE END")
#
# class UtilityTests(TestCase):
# def test_getAuthor(self):
# # Asserts Author is being created
# try:
# a1 = Author.objects.create(firstName='test_user', lastName='test_user_lastname', userName='test_userName', password='test')
#
# self.assertTrue(Author.objects.get(firstName='test_user'))
# self.assertTrue(Author.objects.get(userName='test_userName'))
#
#
# except Exception as e:
# print("Error!!!")
#
# def test_createPost(self):
# try:
# a1 = Author.objects.create(firstName='test_user', lastName='test_user_lastname', userName='test_userName', password='test')
# self.assertTrue(Author.objects.get(firstName='test_user'))
# self.assertTrue(Author.objects.get(userName='test_userName'))
#
# except Exception as e:
# print("Error!!!")
#
# p1 = Post.objects.create(publicationDate= timezone.now() ,content='this is a test', title='test', permission = "P", author = a1)
#
# self.assertTrue(Post.objects.get(title='test'))
#
# def test_make_them_friends(self):
# a1 = create_author(f_name="a1", l_name="a1", u_name="101", pwd=101)
# a1.save()
# a2 = create_author(f_name="a2", l_name="a2", u_name="102", pwd=101)
# a2.save()
# fr = create_friend_request(a1, a2)
# fr.save()
# make_them_friends(a1, a2, fr)
#
# try:
# check_user = Author.objects.get(pk=a2.pk)
# #print("Saved")
# except Exception as e:
# print("Error!!")
#
# # no friend request
# result0 = False
# try:
# result0 = FriendRequest.objects.get(pk=fr.pk)
# except FriendRequest.DoesNotExist:
# result0 = False
# self.assertFalse(result0)
# # have entry in friends
# result = False
# try:
# result = Friends.objects.filter( Q(author1=a1, author2=a2) | Q(author2=a1, author1=a2)).exists()
#
# self.assertTrue(result)
# # print(111,Friends.objects.filter( Q(author1=a1), Q(author2=a2) | Q(author2=a1), Q(author1=a2)),222)
# except Friends.DoesNotExist:
# result = False
# self.assertTrue(result)
#
#
# def test_enroll_following(self):
# a1 = create_author(f_name="a1", l_name="a1", u_name="101", pwd=101)
# a1.save()
# a2 = create_author(f_name="a2", l_name="a2", u_name="102", pwd=101)
# a2.save()
# temp_dict = {"requester_id" :a1 , "requestee_id":a2}
# enroll_following(temp_dict)
# try:
# result = Following.objects.filter(follower=a1, following=a2)
# except Friends.DoesNotExist:
# result = False
# self.assertTrue(result)
#
# def test_unfollow(self):
# a1 = create_author(f_name="a1", l_name="a1", u_name="101", pwd=101)
# a1.save()
# a2 = create_author(f_name="a2", l_name="a2", u_name="102", pwd=101)
# a2.save()
# temp_dict = {"requester_id" :a1 , "requestee_id":a2}
# enroll_following(temp_dict)
# temp_dict = {"follower" :a1 , "following":a2}
# unfollow(temp_dict)
# try:
# result = Following.objects.filter(follower=a1, following=a2).exists()
# #print(result)
# except Friends.DoesNotExist:
# result = True
# self.assertFalse(result)
| 38.473684 | 142 | 0.606703 |
f271ddf0a45b041befe73de8c05292229fb25154
| 917 |
py
|
Python
|
people/migrations/0002_ormlogintoken.py
|
jordifierro/pachatary-api
|
c03ad67ceb856068daa6d082091372eb1ed3d009
|
[
"MIT"
] | 3 |
2018-12-05T16:44:59.000Z
|
2020-08-01T14:12:32.000Z
|
people/migrations/0002_ormlogintoken.py
|
jordifierro/pachatary-api
|
c03ad67ceb856068daa6d082091372eb1ed3d009
|
[
"MIT"
] | 6 |
2020-06-03T15:56:59.000Z
|
2022-02-10T07:23:55.000Z
|
people/migrations/0002_ormlogintoken.py
|
jordifierro/pachatary-api
|
c03ad67ceb856068daa6d082091372eb1ed3d009
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.1 on 2018-05-03 14:30
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('people', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ORMLoginToken',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('token', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='people.ORMPerson')),
],
options={
'verbose_name': 'Login token',
'verbose_name_plural': 'Login tokens',
},
),
]
| 31.62069 | 114 | 0.589967 |
e35836ed9d2673c66208949f4bf1b88d95e3b110
| 2,224 |
py
|
Python
|
backend/backend/settings.py
|
berserg2010/test_vk_api
|
c7a3814dd3cdb18d11527a2b8613b77a0ace6e37
|
[
"Apache-2.0"
] | null | null | null |
backend/backend/settings.py
|
berserg2010/test_vk_api
|
c7a3814dd3cdb18d11527a2b8613b77a0ace6e37
|
[
"Apache-2.0"
] | 4 |
2021-06-04T23:51:35.000Z
|
2021-09-22T19:33:33.000Z
|
backend/backend/settings.py
|
berserg2010/test_vk_api
|
c7a3814dd3cdb18d11527a2b8613b77a0ace6e37
|
[
"Apache-2.0"
] | null | null | null |
import os
ACCESS_TOKEN_VK = os.getenv('ACCESS_TOKEN_VK')
SECRET_KEY = os.getenv('DJANGO_SECRET_KEY', 'SECRET')
DEBUG = os.getenv('DJANGO_DEBUG', False)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ALLOWED_HOSTS = [
'0.0.0.0',
'localhost',
'127.0.0.1',
'vk-api.ddns.net',
]
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'service',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = False
STATIC_URL = '/static/'
| 25.272727 | 91 | 0.661421 |
974f4fa73c4006566f5ac31223586880ae1b7d25
| 13,874 |
py
|
Python
|
hwt/hdl/ifContainter.py
|
mgielda/hwt
|
e6c699fb154f93ac03523bfe40a3d4fc1912d28b
|
[
"MIT"
] | null | null | null |
hwt/hdl/ifContainter.py
|
mgielda/hwt
|
e6c699fb154f93ac03523bfe40a3d4fc1912d28b
|
[
"MIT"
] | null | null | null |
hwt/hdl/ifContainter.py
|
mgielda/hwt
|
e6c699fb154f93ac03523bfe40a3d4fc1912d28b
|
[
"MIT"
] | null | null | null |
from functools import reduce
from itertools import compress
from operator import and_
from typing import List, Tuple, Dict, Union
from hwt.hdl.sensitivityCtx import SensitivityCtx
from hwt.hdl.statementUtils import fill_stm_list_with_enclosure
from hwt.hdl.statements import HdlStatement, statementsAreSame,\
isSameStatementList, seqEvalCond
from hwt.hdl.value import Value
from hwt.synthesizer.rtlLevel.mainBases import RtlSignalBase
from hwt.doc_markers import internal
class IfContainer(HdlStatement):
"""
Structural container of if statement for hdl rendering
:ivar _ifTrue_enclosed_for: set of signals for which if ifTrue branch enclosed
(has not branch where signal is not assignment)
:ivar _elIfs_enclosed_for: list of sets of enclosed signals for each elif
:ivar _ifFalse_enclosed_for: set of enclosed signals for ifFalse branch
"""
def __init__(self, cond, ifTrue=None, ifFalse=None, elIfs=None,
parentStm=None, is_completly_event_dependent=False):
"""
:param cond: RtlSignal as conditions for this if
:param ifTrue: list of statements which should be active if cond.
is met
:param elIfs: list of tuples (list of conditions, list of statements)
:param ifFalse: list of statements which should be active if cond.
and any other cond. in elIfs is met
"""
assert isinstance(cond, RtlSignalBase)
self.cond = cond
super(IfContainer, self).__init__(
parentStm,
is_completly_event_dependent=is_completly_event_dependent)
if ifTrue is None:
self.ifTrue = []
else:
self.ifTrue = ifTrue
if elIfs is None:
self.elIfs = []
else:
self.elIfs = elIfs
self.ifFalse = ifFalse
self._ifTrue_enclosed_for = None
self._elIfs_enclosed_for = None
self._ifFalse_enclosed_for = None
@internal
def _collect_io(self):
raise NotImplementedError()
@internal
def _clean_signal_meta(self):
self._sensitivity = None
self._ifTrue_enclosed_for = None
self._elIfs_enclosed_for = None
self._ifFalse_enclosed_for = None
HdlStatement._clean_signal_meta(self)
@internal
def _cut_off_drivers_of(self, sig: RtlSignalBase):
"""
Doc on parent class :meth:`HdlStatement._cut_off_drivers_of`
"""
if len(self._outputs) == 1 and sig in self._outputs:
self.parentStm = None
return self
# try to cut off all statements which are drivers of specified signal
# in all branches
child_keep_mask = []
newIfTrue = []
all_cut_off = True
all_cut_off &= self._cut_off_drivers_of_list(
sig, self.ifTrue, child_keep_mask, newIfTrue)
self.ifTrue = list(compress(self.ifTrue, child_keep_mask))
newElifs = []
anyElifHit = False
for cond, stms in self.elIfs:
newCase = []
child_keep_mask.clear()
all_cut_off &= self._cut_off_drivers_of_list(
sig, stms, child_keep_mask, newCase)
_stms = list(compress(stms, child_keep_mask))
stms.clear()
stms.extend(_stms)
if newCase:
anyElifHit = True
newElifs.append((cond, newCase))
newIfFalse = None
if self.ifFalse:
newIfFalse = []
child_keep_mask.clear()
all_cut_off &= self._cut_off_drivers_of_list(
sig, self.ifFalse, child_keep_mask, newIfFalse)
self.ifFalse = list(compress(self.ifFalse, child_keep_mask))
assert not all_cut_off, "everything was cut of but this should be already known at start"
if newIfTrue or newIfFalse or anyElifHit or newIfFalse:
# parts were cut off
# generate new statement for them
cond_sig = self.cond
n = self.__class__(cond_sig, newIfTrue)
for c, stms in newElifs:
assert len(c) == 1
c_sig = c[0]
n.Elif(c_sig, stms)
if newIfFalse is not None:
n.Else(newIfFalse)
if self.parentStm is None:
ctx = n._get_rtl_context()
ctx.statements.add(n)
# update io of this
self._inputs.clear()
self._inputs.append(cond_sig)
for c, _ in self.elIfs:
self._inputs.extend(c)
self._inputs.append(cond_sig)
self._outputs.clear()
out_add = self._outputs.append
in_add = self._inputs.append
for stm in self._iter_stms():
for inp in stm._inputs:
in_add(inp)
for outp in stm._outputs:
out_add(outp)
if self._sensitivity is not None or self._enclosed_for is not None:
raise NotImplementedError(
"Sensitivity and enclosure has to be cleaned first")
return n
@internal
def _discover_enclosure(self):
"""
Doc on parent class :meth:`HdlStatement._discover_enclosure`
"""
outputs = self._outputs
self._ifTrue_enclosed_for = self._discover_enclosure_for_statements(
self.ifTrue, outputs)
elif_encls = self._elIfs_enclosed_for = []
for _, stms in self.elIfs:
e = self._discover_enclosure_for_statements(
stms, outputs)
elif_encls.append(e)
self._ifFalse_enclosed_for = self._discover_enclosure_for_statements(
self.ifFalse, outputs)
assert self._enclosed_for is None
encl = self._enclosed_for = set()
for s in self._ifTrue_enclosed_for:
enclosed = True
for elif_e in elif_encls:
if s not in elif_e:
enclosed = False
break
if enclosed and s in self._ifFalse_enclosed_for:
encl.add(s)
@internal
def _discover_sensitivity(self, seen: set) -> None:
"""
Doc on parent class :meth:`HdlStatement._discover_sensitivity`
"""
assert self._sensitivity is None, self
ctx = self._sensitivity = SensitivityCtx()
self._discover_sensitivity_sig(self.cond, seen, ctx)
if ctx.contains_ev_dependency:
return
for stm in self.ifTrue:
stm._discover_sensitivity(seen)
ctx.extend(stm._sensitivity)
# elifs
for cond, stms in self.elIfs:
if ctx.contains_ev_dependency:
break
self._discover_sensitivity_sig(cond, seen, ctx)
if ctx.contains_ev_dependency:
break
for stm in stms:
if ctx.contains_ev_dependency:
break
stm._discover_sensitivity(seen)
ctx.extend(stm._sensitivity)
if not ctx.contains_ev_dependency and self.ifFalse:
# else
for stm in self.ifFalse:
stm._discover_sensitivity(seen)
ctx.extend(stm._sensitivity)
else:
assert not self.ifFalse, "can not negate event"
@internal
def _fill_enclosure(self, enclosure: Dict[RtlSignalBase, Union[Value, RtlSignalBase]]) -> None:
enc = []
outputs = self._outputs
for e in enclosure.keys():
if e in outputs and e not in self._enclosed_for:
enc.append(e)
if not enc:
return
fill_stm_list_with_enclosure(self, self._ifTrue_enclosed_for,
self.ifTrue, enc, enclosure)
for (_, stms), e in zip(self.elIfs, self._elIfs_enclosed_for):
fill_stm_list_with_enclosure(self, e, stms, enc, enclosure)
self.ifFalse = fill_stm_list_with_enclosure(self, self._ifFalse_enclosed_for,
self.ifFalse, enc, enclosure)
self._enclosed_for.update(enc)
def _iter_stms(self):
"""
Doc on parent class :meth:`HdlStatement._iter_stms`
"""
yield from self.ifTrue
for _, stms in self.elIfs:
yield from stms
if self.ifFalse is not None:
yield from self.ifFalse
@internal
def _try_reduce(self) -> Tuple[bool, List[HdlStatement]]:
"""
Doc on parent class :meth:`HdlStatement._try_reduce`
"""
# flag if IO of statement has changed
io_change = False
self.ifTrue, rank_decrease, _io_change = self._try_reduce_list(
self.ifTrue)
self.rank -= rank_decrease
io_change |= _io_change
new_elifs = []
for cond, statements in self.elIfs:
_statements, rank_decrease, _io_change = self._try_reduce_list(
statements)
self.rank -= rank_decrease
io_change |= _io_change
new_elifs.append((cond, _statements))
if self.ifFalse is not None:
self.ifFalse, rank_decrease, _io_update_required = self._try_reduce_list(
self.ifFalse)
self.rank -= rank_decrease
io_change |= _io_change
reduce_self = not self.condHasEffect(
self.ifTrue, self.ifFalse, self.elIfs)
if reduce_self:
res = self.ifTrue
else:
res = [self, ]
self._on_reduce(reduce_self, io_change, res)
# try merge nested ifs as elifs
if self.ifFalse is not None and len(self.ifFalse) == 1:
child = self.ifFalse[0]
if isinstance(child, IfContainer):
self._merge_nested_if_from_else(child)
return res, io_change
@internal
def _merge_nested_if_from_else(self, ifStm: "IfContainer"):
"""
Merge nested IfContarner form else branch to this IfContainer
as elif and else branches
"""
self.elIfs.append((ifStm.cond, ifStm.ifTrue))
self.elIfs.extend(ifStm.elIfs)
self.ifFalse = ifStm.ifFalse
@internal
def _is_mergable(self, other: HdlStatement) -> bool:
if not isinstance(other, IfContainer):
return False
if (self.cond is not other.cond
or not self._is_mergable_statement_list(self.ifTrue, other.ifTrue)):
return False
if len(self.elIfs) != len(other.elIfs):
return False
for (a_c, a_stm), (b_c, b_stm) in zip(self.elIfs, other.elIfs):
if a_c is not b_c or self._is_mergable_statement_list(a_stm, b_stm):
return False
if not self._is_mergable_statement_list(self.ifFalse, other.ifFalse):
return False
return True
@internal
def _merge_with_other_stm(self, other: "IfContainer") -> None:
"""
:attention: statements has to be mergable (to check use _is_mergable method)
"""
merge = self._merge_statement_lists
self.ifTrue = merge(self.ifTrue, other.ifTrue)
new_elifs = []
for ((c, elifA), (_, elifB)) in zip(self.elIfs, other.elIfs):
new_elifs.append((c, merge(elifA, elifB)))
self.elIfs = new_elifs
self.ifFalse = merge(self.ifFalse, other.ifFalse)
other.ifTrue = []
other.elIfs = []
other.ifFalse = None
self._on_merge(other)
@internal
@staticmethod
def condHasEffect(ifTrue, ifFalse, elIfs):
stmCnt = len(ifTrue)
if ifFalse is not None \
and stmCnt == len(ifFalse) \
and reduce(and_,
[len(stm) == stmCnt
for _, stm in elIfs],
True):
for stms in zip(ifTrue, ifFalse, *map(lambda x: x[1], elIfs)):
if not statementsAreSame(stms):
return True
return False
return True
def isSame(self, other: HdlStatement) -> bool:
"""
:return: True if other has same meaning as this statement
"""
if self is other:
return True
if self.rank != other.rank:
return False
if isinstance(other, IfContainer):
if self.cond is other.cond:
if len(self.ifTrue) == len(other.ifTrue) \
and len(self.ifFalse) == len(other.ifFalse) \
and len(self.elIfs) == len(other.elIfs):
if not isSameStatementList(self.ifTrue,
other.ifTrue) \
or not isSameStatementList(self.ifFalse,
other.ifFalse):
return False
for (ac, astms), (bc, bstms) in zip(self.elIfs,
other.elIfs):
if not (ac == bc) or\
not isSameStatementList(astms, bstms):
return False
return True
return False
@internal
def seqEval(self):
if seqEvalCond(self.cond):
for s in self.ifTrue:
s.seqEval()
else:
for c in self.elIfs:
if seqEvalCond(c[0]):
for s in c[1]:
s.seqEval()
return
for s in self.ifFalse:
s.seqEval()
def __repr__(self):
from hwt.serializer.hwt.serializer import HwtSerializer
ctx = HwtSerializer.getBaseContext()
return HwtSerializer.IfContainer(self, ctx)
| 33.033333 | 99 | 0.570419 |
bafb1c0e7a78ed44e7a6028354ea078c2ecf70e8
| 1,161 |
py
|
Python
|
shop/migrations/0004_billitemstest.py
|
abhishekmandloi/django-product-management-system
|
e89138e77c53b2ed51599e43e5d74bdf8671ae9e
|
[
"MIT"
] | null | null | null |
shop/migrations/0004_billitemstest.py
|
abhishekmandloi/django-product-management-system
|
e89138e77c53b2ed51599e43e5d74bdf8671ae9e
|
[
"MIT"
] | null | null | null |
shop/migrations/0004_billitemstest.py
|
abhishekmandloi/django-product-management-system
|
e89138e77c53b2ed51599e43e5d74bdf8671ae9e
|
[
"MIT"
] | 1 |
2021-07-02T04:18:17.000Z
|
2021-07-02T04:18:17.000Z
|
# Generated by Django 3.1 on 2020-08-11 08:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shop', '0003_auto_20200811_1337'),
]
operations = [
migrations.CreateModel(
name='BillItemsTest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('productBatch', models.CharField(max_length=50, null=True)),
('productPacking', models.CharField(max_length=50, null=True)),
('productQuantity', models.IntegerField(default=1)),
('productPrice', models.FloatField(default=0)),
('productTotalPrice', models.FloatField(default=0)),
('productName', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bills2', to='shop.productdetailbatch')),
('purchaseno', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bills2', to='shop.bill')),
],
),
]
| 41.464286 | 150 | 0.611542 |
d1a356237e86102a1bd68d491a667abe77f97096
| 9,646 |
py
|
Python
|
test/test_live_auth.py
|
ubragg/endpoints-python
|
1ca9b366d75644787a57b21942f77797a1b96667
|
[
"Apache-2.0"
] | null | null | null |
test/test_live_auth.py
|
ubragg/endpoints-python
|
1ca9b366d75644787a57b21942f77797a1b96667
|
[
"Apache-2.0"
] | null | null | null |
test/test_live_auth.py
|
ubragg/endpoints-python
|
1ca9b366d75644787a57b21942f77797a1b96667
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from future import standard_library
standard_library.install_aliases()
from builtins import object
import base64
import io
import importlib
import os
import shutil
import subprocess
import sys
import tempfile
import zipfile
import requests # provided by endpoints-management-python
import pytest
import yaml
JSON_HEADERS = {'content-type': 'application/json'}
TESTDIR = os.path.dirname(os.path.realpath(__file__))
def _find_setup_py(some_path):
while not os.path.isfile(os.path.join(some_path, 'setup.py')):
some_path = os.path.dirname(some_path)
return some_path
PKGDIR = _find_setup_py(TESTDIR)
@pytest.fixture(scope='session')
def integration_project_id():
if 'INTEGRATION_PROJECT_ID' not in os.environ:
raise KeyError('INTEGRATION_PROJECT_ID required in environment. Set it to the appropriate project id.')
return os.environ['INTEGRATION_PROJECT_ID']
@pytest.fixture(scope='session')
def service_account_keyfile():
if 'SERVICE_ACCOUNT_KEYFILE' not in os.environ:
raise KeyError('SERVICE_ACCOUNT_KEYFILE required in environment. Set it to the path to the service account key.')
value = os.environ['SERVICE_ACCOUNT_KEYFILE']
if not os.path.isfile(value):
raise ValueError('SERVICE_ACCOUNT_KEYFILE must point to a file containing the service account key.')
return value
@pytest.fixture(scope='session')
def api_key():
if 'PROJECT_API_KEY' not in os.environ:
raise KeyError('PROJECT_API_KEY required in environment. Set it to a valid api key for the specified project.')
return os.environ['PROJECT_API_KEY']
@pytest.fixture(scope='session')
def gcloud_driver_module(request):
"""This fixture provides the gcloud test driver. It is not normally installable, since it lacks a setup.py"""
cache_key = 'live_auth/driver_zip'
driver_zip_data = request.config.cache.get(cache_key, None)
if driver_zip_data is None:
url = "https://github.com/GoogleCloudPlatform/cloudsdk-test-driver/archive/master.zip"
driver_zip_data = requests.get(url).content
request.config.cache.set(cache_key, base64.b64encode(driver_zip_data))
else:
driver_zip_data = base64.b64decode(driver_zip_data)
extract_path = tempfile.mkdtemp()
with zipfile.ZipFile(io.StringIO(driver_zip_data)) as driver_zip:
driver_zip.extractall(path=extract_path)
# have to rename the subfolder
os.rename(os.path.join(extract_path, 'cloudsdk-test-driver-master'), os.path.join(extract_path, 'cloudsdk_test_driver'))
sys.path.append(extract_path)
driver_module = importlib.import_module('cloudsdk_test_driver.driver')
yield driver_module
sys.path.pop()
shutil.rmtree(extract_path)
@pytest.fixture(scope='session')
def gcloud_driver(gcloud_driver_module):
with gcloud_driver_module.Manager(additional_components=['app-engine-python']):
yield gcloud_driver_module
@pytest.fixture(scope='session')
def gcloud_sdk(gcloud_driver, integration_project_id, service_account_keyfile):
return gcloud_driver.SDKFromArgs(project=integration_project_id, service_account_keyfile=service_account_keyfile)
class TestAppManager(object):
# This object will manage the test app. It needs to be told what
# kind of app to make; such methods are named `become_*_app`,
# because they mutate the manager object rather than returning
# some new object.
def __init__(self):
self.cleanup_path = tempfile.mkdtemp()
self.app_path = os.path.join(self.cleanup_path, 'app')
def cleanup(self):
shutil.rmtree(self.cleanup_path)
def become_apikey_app(self, project_id):
source_path = os.path.join(TESTDIR, 'testdata', 'sample_app')
shutil.copytree(source_path, self.app_path)
self.update_app_yaml(project_id)
def update_app_yaml(self, project_id, version=None):
yaml_path = os.path.join(self.app_path, 'app.yaml')
app_yaml = yaml.load(open(yaml_path))
env = app_yaml['env_variables']
env['ENDPOINTS_SERVICE_NAME'] = '{}.appspot.com'.format(project_id)
if version is not None:
env['ENDPOINTS_SERVICE_VERSION'] = version
with open(yaml_path, 'w') as outfile:
yaml.dump(app_yaml, outfile, default_flow_style=False)
@pytest.fixture(scope='class')
def apikey_app(gcloud_sdk, integration_project_id):
app = TestAppManager()
app.become_apikey_app(integration_project_id)
path = app.app_path
os.mkdir(os.path.join(path, 'lib'))
# Install the checked-out endpoints repo
subprocess.check_call(['python', '-m', 'pip', 'install', '-t', 'lib', PKGDIR, '--ignore-installed'], cwd=path)
print(path)
subprocess.check_call(['python', 'lib/endpoints/endpointscfg.py', 'get_openapi_spec', 'main.IataApi', '--hostname', '{}.appspot.com'.format(integration_project_id)], cwd=path)
out, err, code = gcloud_sdk.RunGcloud(['endpoints', 'services', 'deploy', os.path.join(path, 'iatav1openapi.json')])
assert code == 0
version = out['serviceConfig']['id'].encode('ascii')
app.update_app_yaml(integration_project_id, version)
out, err, code = gcloud_sdk.RunGcloud(['app', 'deploy', os.path.join(path, 'app.yaml')])
assert code == 0
base_url = 'https://{}.appspot.com/_ah/api/iata/v1'.format(integration_project_id)
yield base_url
app.cleanup()
@pytest.fixture()
def clean_apikey_app(apikey_app, api_key):
url = '/'.join([apikey_app, 'reset'])
r = requests.post(url, params={'key': api_key})
assert r.status_code == 204
return apikey_app
@pytest.mark.livetest
class TestApikeyRequirement(object):
def test_get_airport(self, clean_apikey_app):
url = '/'.join([clean_apikey_app, 'airport', 'YYZ'])
r = requests.get(url, headers=JSON_HEADERS)
actual = r.json()
expected = {u'iata': u'YYZ', u'name': u'Lester B. Pearson International Airport'}
assert actual == expected
def test_list_airports(self, clean_apikey_app):
url = '/'.join([clean_apikey_app, 'airports'])
r = requests.get(url, headers=JSON_HEADERS)
raw = r.json()
assert 'airports' in raw
actual = {a['iata']: a['name'] for a in raw['airports']}
assert actual[u'YYZ'] == u'Lester B. Pearson International Airport'
assert u'ZZT' not in actual
def test_create_airport(self, clean_apikey_app, api_key):
url = '/'.join([clean_apikey_app, 'airport'])
r = requests.get('/'.join([url, 'ZZT']), headers=JSON_HEADERS)
assert r.status_code == 404
data = {u'iata': u'ZZT', u'name': u'Town Airport'}
r = requests.post(url, json=data, params={'key': api_key})
assert data == r.json()
r = requests.get('/'.join([url, 'ZZT']), headers=JSON_HEADERS)
assert r.status_code == 200
assert data == r.json()
def test_create_airport_key_required(self, clean_apikey_app):
url = '/'.join([clean_apikey_app, 'airport'])
data = {u'iata': u'ZZT', u'name': u'Town Airport'}
r = requests.post(url, json=data)
assert r.status_code == 401
r = requests.get('/'.join([url, 'ZZT']), headers=JSON_HEADERS)
assert r.status_code == 404
def test_modify_airport(self, clean_apikey_app, api_key):
url = '/'.join([clean_apikey_app, 'airport', 'YYZ'])
r = requests.get(url, headers=JSON_HEADERS)
actual = r.json()
expected = {u'iata': u'YYZ', u'name': u'Lester B. Pearson International Airport'}
assert actual == expected
data = {u'iata': u'YYZ', u'name': u'Torontoland'}
r = requests.post(url, json=data, params={'key': api_key})
assert data == r.json()
r = requests.get(url, headers=JSON_HEADERS)
assert data == r.json()
def test_modify_airport_key_required(self, clean_apikey_app):
url = '/'.join([clean_apikey_app, 'airport', 'YYZ'])
data = {u'iata': u'YYZ', u'name': u'Torontoland'}
r = requests.post(url, json=data)
assert r.status_code == 401
r = requests.get(url, headers=JSON_HEADERS)
actual = r.json()
expected = {u'iata': u'YYZ', u'name': u'Lester B. Pearson International Airport'}
assert actual == expected
def test_delete_airport(self, clean_apikey_app, api_key):
url = '/'.join([clean_apikey_app, 'airport', 'YYZ'])
r = requests.delete(url, headers=JSON_HEADERS, params={'key': api_key})
assert r.status_code == 204
r = requests.get(url, headers=JSON_HEADERS)
assert r.status_code == 404
def test_delete_airport_key_required(self, clean_apikey_app):
url = '/'.join([clean_apikey_app, 'airport', 'YYZ'])
r = requests.delete(url, headers=JSON_HEADERS)
assert r.status_code == 401
r = requests.get(url, headers=JSON_HEADERS)
actual = r.json()
expected = {u'iata': u'YYZ', u'name': u'Lester B. Pearson International Airport'}
assert actual == expected
| 41.222222 | 179 | 0.690234 |
1402d01f4809c17edfca62de955ba1ba342a55a7
| 13,668 |
py
|
Python
|
medicalai/chief/networks.py
|
gitter-badger/medicalAI
|
5892003489d62a0cb74486add6afc37485e8967c
|
[
"Apache-2.0"
] | null | null | null |
medicalai/chief/networks.py
|
gitter-badger/medicalAI
|
5892003489d62a0cb74486add6afc37485e8967c
|
[
"Apache-2.0"
] | null | null | null |
medicalai/chief/networks.py
|
gitter-badger/medicalAI
|
5892003489d62a0cb74486add6afc37485e8967c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020-2022 AIBharata Emerging Technologies Pvt. Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from .nnets import resnet,covid_net,densenet,vgg16,mobilenet,mobilenetv2,xception,inceptionv3,inceptionResnet
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import sys
import copy
import numpy as np
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.models import Sequential
import tensorflow as tf
class NetworkInit(object):
"""Base class for parameter Network initializers.
The :class:`NetworkInit` class represents a network initializer used
to initialize network/model parameters for numerous medical ai networks. It should be
subclassed when implementing new types of network initializers.
"""
def __call__(self, inputSize, OutputSize, convLayers=None):
"""Makes :class:`NetworkInit` instances callable like a function, invoking
their :meth:`call()` method.
"""
return self.call(inputSize, OutputSize, convLayers)
def call(self, inputSize, OutputSize, convLayers=None):
"""Sample should return model initialized with input and output Sizes.
Parameters
----------
inputSize : tuple or int.
Integer or tuple specifying the input of network.
OutputSize : tuple or int.
Integer or tuple specifying the output classes of network.
Returns
-------
numpy.array.
Initialized Model.
"""
raise NotImplementedError()
def __str__(self):
return self.__class__.__name__
class tinyMedNet(NetworkInit):
"""tinyMedNet is a classification network that consumes very less resources and can be trained even on CPUs. This network can be used to demonstrate the framework working.
Additionally this acts a starting point for example/tutorial for getting started to know the Medical AI library.
"""
def call(self, inputSize, OutputSize, convLayers=None):
try:
model = Sequential([
Conv2D(64, kernel_size=(5, 5), strides=(1, 1),activation='relu', padding = 'valid',input_shape=inputSize, name='CNN1'),
MaxPooling2D(pool_size=(3, 3), strides=(2, 2)),
Conv2D(64, kernel_size=(5, 5), strides=(1, 1),activation='relu', padding = 'valid', name='CNN2'),
MaxPooling2D(pool_size=(3, 3), strides=(2, 2)),
Conv2D(64, kernel_size=(5, 5), strides=(1, 1),activation='relu', padding = 'valid', name='CNN3'),
MaxPooling2D(pool_size=(3, 3), strides=(2, 2)),
Flatten(),
Dense(384, activation='relu', name='FC1'),
Dense(192, activation='relu', name='FC2'),
Dense(OutputSize, activation='softmax', name='FC3')
])
except ValueError:
model = Sequential([
Conv2D(64, kernel_size=(5, 5), strides=(1, 1),activation='relu', padding = 'valid',input_shape=inputSize, name='CNN1'),
MaxPooling2D(pool_size=(3, 3), strides=(2, 2)),
Conv2D(64, kernel_size=(5, 5), strides=(1, 1),activation='relu', padding = 'valid', name='CNN2'),
MaxPooling2D(pool_size=(3, 3), strides=(2, 2)),
Conv2D(64, kernel_size=(5, 5), strides=(1, 1),activation='relu', padding = 'same', name='CNN3'),
MaxPooling2D(pool_size=(3, 3), strides=(2, 2)),
Flatten(),
Dense(384, activation='relu', name='FC1'),
Dense(192, activation='relu', name='FC2'),
Dense(OutputSize, activation='softmax', name='FC3')
])
return model
class tinyMedNet_v2(NetworkInit):
"""tinyMedNet_v2 allows users to configure the number of Conv/CNN layers.
tinyMedNet_v2 is a classification network that consumes very less resources and can be trained even on CPUs. This network can be used to demonstrate the framework working.
Additionally this acts a starting point for example/tutorial for getting started to know the Medical AI library.
"""
def call(self, inputSize, OutputSize, convLayers=2):
try:
model = Sequential()
model.add(Conv2D(64, kernel_size=(3, 3), strides=(1, 1),activation='relu', padding = 'valid',input_shape=inputSize, name='CNN1'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
for cnnLayerNum in range(0,convLayers-1):
model.add(Conv2D(64, kernel_size=(3, 3), strides=(1, 1),activation='relu', padding = 'valid', name='CNN'+str(cnnLayerNum+2)))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(384, activation='relu', name='FC1'))
model.add(Dense(192, activation='relu', name='FC2'))
model.add(Dense(OutputSize, activation='softmax', name='FC3'))
return model
except ValueError:
print(20*'-')
print('Dimension Error Occured')
print('SOLUTION: Try increasing the Input Dimension or Reducing the number of Layers')
print(20*'-')
sys.exit(1)
class tinyMedNet_v3(NetworkInit):
"""tinyMedNet_v3 has 3 FC layers with Dropout and Configurable number of Conv/CNN Layers.
"""
def call(self, inputSize, OutputSize, convLayers=2):
try:
model = Sequential()
model.add(Conv2D(64, kernel_size=(3, 3), strides=(1, 1),activation='relu', padding = 'valid',input_shape=inputSize, name='CNN1'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
for cnnLayerNum in range(0,convLayers-1):
model.add(Conv2D(128, kernel_size=(3, 3), strides=(1, 1),activation='relu', padding = 'valid', name='CNN'+str(cnnLayerNum+2)))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(Dropout(rate=0.2))
model.add(Flatten())
# model.add(Dense(512, activation='relu', name='FC1'))
# model.add(Dropout(rate=0.5))
model.add(Dense(384, activation='relu', name='FC2'))
model.add(Dropout(rate=0.5))
model.add(Dense(192, activation='relu', name='FC3'))
model.add(Dropout(rate=0.5))
model.add(Dense(OutputSize, activation='softmax', name='FC4'))
return model
except ValueError as err:
print(err)
print(20*'-')
print('Dimension Error Occured')
print('SOLUTION: Try increasing the Input Dimension or Reducing the number of Layers')
print(20*'-')
sys.exit(1)
class resNet20(NetworkInit):
"""resnet20
"""
def call(self, inputSize, OutputSize, convLayers=None):
img_input = tf.keras.layers.Input(shape=inputSize)
return resnet.resnet20(img_input=img_input,classes=OutputSize)
class resNet32(NetworkInit):
"""resnet32
"""
def call(self, inputSize, OutputSize, convLayers=None):
img_input = tf.keras.layers.Input(shape=inputSize)
return resnet.resnet32(img_input=img_input,classes=OutputSize)
class resNet56(NetworkInit):
"""RESNET56
"""
def call(self, inputSize, OutputSize, convLayers=None):
img_input = tf.keras.layers.Input(shape=inputSize)
return resnet.resnet56(img_input=img_input,classes=OutputSize)
class resNet110(NetworkInit):
"""resnet110
"""
def call(self, inputSize, OutputSize, convLayers=None):
img_input = tf.keras.layers.Input(shape=inputSize)
return resnet.resnet110(img_input=img_input,classes=OutputSize)
class megaNet(NetworkInit):
"""
megaNet is based on COVID-NET.
This is a tensorflow 2.0 network variant for COVID-Net described in Paper "COVID-Net: A Tailored Deep Convolutional Neural Network Design for Detection of COVID-19 Cases from Chest Radiography Images" by Linda Wang et al.
Reference: https://github.com/busyyang/COVID-19/
"""
def call(self, inputSize, OutputSize, convLayers=None):
return covid_net.COVIDNET_Keras(img_input=inputSize,classes=OutputSize)
class DenseNet121(NetworkInit):
"""
DenseNet121 model, with weights pre-trained on ImageNet
inputSize: input image size tuple
outputSize: Number of classes for prediction
"""
def call(self, inputSize, OutputSize, convLayers=None):
return densenet.DenseNet121_Model(img_input=inputSize,classes=OutputSize)
class VGG16(NetworkInit):
"""
VGG16 model, with weights pre-trained on ImageNet
inputSize: input image size tuple,default : (224,223,3)
outputSize: Number of classes for prediction
"""
def call(self, inputSize, OutputSize, convLayers=None):
return vgg16.VGG16_Model(img_input=inputSize,classes=OutputSize)
class MobileNet(NetworkInit):
"""
MobileNet model, with weights pre-trained on ImageNet
inputSize: input image size tuple,default : (224,223,3)
outputSize: Number of classes for prediction
"""
def call(self, inputSize, OutputSize, convLayers=None):
return mobilenet.MobileNet(img_input=inputSize,classes=OutputSize)
class MobileNetV2(NetworkInit):
"""
MobileNet model, with weights pre-trained on ImageNet
inputSize: input image size tuple,default : (224,223,3)
outputSize: Number of classes for prediction
"""
def call(self, inputSize, OutputSize, convLayers=None):
return mobilenetv2.MobileNetV2(img_input=inputSize,classes=OutputSize)
class Xception(NetworkInit):
"""
Xception model, with weights pre-trained on ImageNet
inputSize: input image size tuple,default : (224,223,3)
outputSize: Number of classes for prediction
"""
def call(self, inputSize, OutputSize, convLayers=None):
return xception.Xception(img_input=inputSize,classes=OutputSize)
class InceptionV3(NetworkInit):
"""
InceptionV3 model, with weights pre-trained on ImageNet
inputSize: input image size tuple,default : (224,223,3)
outputSize: Number of classes for prediction
"""
def call(self, inputSize, OutputSize, convLayers=None):
return inceptionv3.InceptionV3(img_input=inputSize,classes=OutputSize)
class InceptionResNetV2(NetworkInit):
"""
InceptionResNetV2 model, with weights pre-trained on ImageNet
inputSize: input image size tuple,default : (224,223,3)
outputSize: Number of classes for prediction
"""
def call(self, inputSize, OutputSize, convLayers=None):
return inceptionResnet.InceptionResNetV2_Model(img_input=inputSize,classes=OutputSize)
def get(networkInitialization):
if networkInitialization.__class__.__name__ == 'str':
if networkInitialization in ['tinyMedNet', 'tiny_Medical_Network']:
return tinyMedNet()
elif networkInitialization in ['tinyMedNet_v2', 'tiny_Medical_Network_v2']:
return tinyMedNet_v2()
elif networkInitialization in ['tinyMedNet_v3', 'tiny_Medical_Network_v3']:
return tinyMedNet_v3()
elif networkInitialization in ['resNet20', 'resnet20']:
return resNet20()
elif networkInitialization in ['resNet32', 'resnet32']:
return resNet32()
elif networkInitialization in ['resNet56', 'resnet56']:
return resNet56()
elif networkInitialization in ['resNet110', 'resnet110']:
return resNet110()
elif networkInitialization in ['megaNet', 'meganet']:
return megaNet()
elif networkInitialization in ['densenet','DenseNet','DenseNet121']:
return DenseNet121()
elif networkInitialization in ['vgg16','VGG16','vgg','VGG']:
return VGG16()
elif networkInitialization in ['mobilenet','MobileNet']:
return MobileNet()
elif networkInitialization in ['mobilenetv2','MobileNetV2']:
return MobileNetV2()
elif networkInitialization in ['xception','Xception']:
return Xception()
elif networkInitialization in ['inception','InceptionV3','inceptionv3']:
return InceptionV3()
elif networkInitialization in ['inceptionresnet','InceptionResNet','InceptionReset','InceptionResNetV2']:
return InceptionResNetV2()
raise ValueError('Unknown network Initialization name: {}.'.format(networkInitialization))
elif isinstance(networkInitialization, NetworkInit):
return copy.deepcopy(networkInitialization)
else:
raise ValueError("Unknown type: {}.".format(networkInitialization.__class__.__name__))
if __name__ == "__main__":
v=get('resNet56')
print(10*'~', 'Tiny Net V1')
INPUT_DIM= 96
m = v((INPUT_DIM,INPUT_DIM,3),10)
m.summary()
v=get('tinyMedNet_v2')
print(10*'~', 'Tiny Net V2')
for i in range(1,10):
print(10*'-', 'CNN LAYERS =', i)
m = v((INPUT_DIM,INPUT_DIM,3),10,i)
m.summary()
| 45.258278 | 226 | 0.65657 |
52d85a81f779fc64d1efa05cc37fbad3f65ec362
| 1,161 |
py
|
Python
|
awwwards/urls.py
|
james-muriithi/awards
|
01c34d516b36def2589ac88c815b022a408cff62
|
[
"Unlicense"
] | null | null | null |
awwwards/urls.py
|
james-muriithi/awards
|
01c34d516b36def2589ac88c815b022a408cff62
|
[
"Unlicense"
] | null | null | null |
awwwards/urls.py
|
james-muriithi/awards
|
01c34d516b36def2589ac88c815b022a408cff62
|
[
"Unlicense"
] | null | null | null |
"""awwwards URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
from django_registration.backends.activation.views import RegistrationView
from app.forms import MyCustomUserForm
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/register/',
RegistrationView.as_view(form_class=MyCustomUserForm), name='register'),
path('accounts/', include('django_registration.backends.activation.urls')),
path('accounts/', include('django.contrib.auth.urls')),
path('', include('app.urls'))
]
| 38.7 | 81 | 0.723514 |
6ef32a9962d561347a2679054bd1378d1fdee006
| 36 |
py
|
Python
|
workers/__init__.py
|
oliverroyknox/rvdl
|
bdf0bc2fea9f9dd9820759a4595cc8285ec8b2fc
|
[
"MIT"
] | null | null | null |
workers/__init__.py
|
oliverroyknox/rvdl
|
bdf0bc2fea9f9dd9820759a4595cc8285ec8b2fc
|
[
"MIT"
] | null | null | null |
workers/__init__.py
|
oliverroyknox/rvdl
|
bdf0bc2fea9f9dd9820759a4595cc8285ec8b2fc
|
[
"MIT"
] | null | null | null |
from .download import DownloadWorker
| 36 | 36 | 0.888889 |
e47732cebadf2991007e5fb04074afe04309e5ca
| 808 |
py
|
Python
|
test_projects.py
|
botasakhi/applied_ds
|
b70cf83b2fbaf78664950d990555a22e3e286f0d
|
[
"BSD-3-Clause"
] | null | null | null |
test_projects.py
|
botasakhi/applied_ds
|
b70cf83b2fbaf78664950d990555a22e3e286f0d
|
[
"BSD-3-Clause"
] | null | null | null |
test_projects.py
|
botasakhi/applied_ds
|
b70cf83b2fbaf78664950d990555a22e3e286f0d
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
import os
import papermill as pm
import glob
@pytest.mark.parametrize('folder', [x[0] for x in os.walk(os.getcwd())\
if (len(x[0].split('/'))\
== len(os.getcwd().split('/'))+1\
and x[0].split('/')[-1][0] not in ['_', '.'])])
def test(folder):
if folder == 'lecture_notes':
pass
os.chdir(folder)
print(os.getcwd())
for notebook in glob.glob('*.ipynb'):
try:
pm.execute_notebook(
notebook,
'result.ipynb')
finally:
assert(os.path.isfile('%s/result.ipynb' % folder)), "Notebook did not run"
os.remove('%s/result.ipynb' % folder)
os.chdir(os.path.dirname(os.getcwd()))
pass
| 33.666667 | 87 | 0.486386 |
0868bb16aa281c16b89b76eb86dae4632851ce39
| 5,152 |
py
|
Python
|
SimpleHTMLExtractor.py
|
jesh-anand/Tools
|
4b6ffc96492d49ea1b20cad2f65d8da8b2ff2ab1
|
[
"MIT"
] | null | null | null |
SimpleHTMLExtractor.py
|
jesh-anand/Tools
|
4b6ffc96492d49ea1b20cad2f65d8da8b2ff2ab1
|
[
"MIT"
] | null | null | null |
SimpleHTMLExtractor.py
|
jesh-anand/Tools
|
4b6ffc96492d49ea1b20cad2f65d8da8b2ff2ab1
|
[
"MIT"
] | null | null | null |
from html.parser import HTMLParser
"""SimpleHTMLExtractor.py: A HTML parser that extracts text and outputs to file
"""
__author__ = "Prajesh Ananthan"
__copyright__ = "Copyright 2016, Python"
__license__ = "GPL"
class MyHTMLParser(HTMLParser):
# def handle_starttag(self, tag, attrs):
# print("Encountered a start tag:", tag)
#
# def handle_endtag(self, tag):
# print("Encountered an end tag :", tag)
def handle_data(self, data):
data = str(data)
if not data.startswith(' '):
f.write(data + "\n")
################################### Main Function ##############################################
if __name__ == '__main__':
parser = MyHTMLParser()
f = open('htmldata.txt', 'a')
# TODO: Insert a GUI input text box
# -- Insert HTML element here
parser.feed(
'<ul><li><a href="http://www.mkyong.com/spring/spring-bean-reference-example/">Spring bean reference example</a><br> How beans access to each other by specify the bean references in the same or different bean configuration file.</li><li><a href="http://www.mkyong.com/spring/how-to-define-bean-properties-in-spring/">Inject value into bean properties in Spring</a><br> Three ways to inject value into bean properties.</li><li><a href="http://www.mkyong.com/spring/load-multiple-spring-bean-configuration-file/">Load multiple Spring bean configuration file</a><br> Developers always categorize different bean configuration files in different modules folder, here’s a tip to show you how to load multiple Spring bean configuration files.</li><li><a href="http://www.mkyong.com/spring/spring-inner-bean-examples/">Spring inner bean examples</a><br> Whenever a bean is used for one particular property only, it’s always advised to declare it as an inner bean.</li><li><a href="http://www.mkyong.com/spring/spring-bean-scopes-examples/">Spring bean scopes examples</a><br> Bean scope is used to decide which type of bean instance should be return from the Spring container back to the caller.</li><li><a href="http://www.mkyong.com/spring/spring-collections-list-set-map-and-properties-example/">Spring Collections (List, Set, Map, and Properties) example</a><br> Example to inject values into collections type (List, Set, Map, and Properties).</li><li><a href="http://www.mkyong.com/spring/spring-listfactorybean-example/">ListFactoryBean example</a><br> Create a concrete List collection class (ArrayList and LinkedList), and inject it into bean property.</li><li><a href="http://www.mkyong.com/spring/spring-setfactorybean-example/">SetFactoryBean example</a><br> Create a concrete Set collection class (HashSet and TreeSet), and inject it into bean property.</li><li><a href="http://www.mkyong.com/spring/spring-mapfactorybean-example/">MapFactoryBean example</a><br> Create a concrete Map collection class (HashMap and TreeMap), and inject it into bean property.</li><li><a href="http://www.mkyong.com/spring/spring-how-to-pass-a-date-into-bean-property-customdateeditor/">Spring inject Date into bean property – CustomDateEditor</a><br> Normally, Spring is accepting date variable, here’s a tip to use CustomDateEditor to work around it.</li><li><a href="http://www.mkyong.com/spring/spring-propertyplaceholderconfigurer-example/">Spring PropertyPlaceholderConfigurer example</a><br> Externalize the deployment details into a properties file, and access from a bean configuration file via a special format – ${variable}.</li><li><a href=" http://www.mkyong.com/spring/spring-bean-configuration-inheritance/">Spring bean configuration inheritance</a><br> Inheritance is very useful for a bean to share common values, properties or configuration.</li><li><a href="http://www.mkyong.com/spring/spring-properties-dependency-checking/">Spring dependency checking</a><br> Spring comes with 4 dependency checking modes to make sure the required properties have been set in bean.</li><li><a href="http://www.mkyong.com/spring/spring-dependency-checking-with-required-annotation/">Spring dependency checking with @Required Annotation</a><br> Dependency checking in annotation mode.</li><li><a href="http://www.mkyong.com/spring/spring-define-custom-required-style-annotation/">Custom @Required-style annotation</a><br> Create a custom @Required-style annotation ,which is equivalent to @Required annotation.</li><li><a href="http://www.mkyong.com/spring/spring-initializingbean-and-disposablebean-example/">Bean InitializingBean and DisposableBean example</a><br> Perform certain actions upon bean initialization and destruction. (interface)</li><li><a href="http://www.mkyong.com/spring/spring-init-method-and-destroy-method-example/">Bean init-method and destroy-method example</a><br> Perform certain actions upon bean initialization and destruction. (XML)</li><li><a href="http://www.mkyong.com/spring/spring-postconstruct-and-predestroy-example/">Bean @PostConstruct and @PreDestroy example</a><br> Perform certain actions upon bean initialization and destruction. (Annotation)</li></ul>')
f.close()
################################# End Main Function #############################################
| 135.578947 | 4,194 | 0.734278 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.