blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
43caf1de2da7fac86bcfdb234a60cee558ff0e0a | 7d23056a789ded9ff2b9e14f9c57e59295cdfd6d | /samples/src/com/zoho/crm/api/initializer/init.py | e6722a40d14a4971a467de5fc2f5fd8877382104 | [] | no_license | L1nuxFNC/zohocrm-python-sdk | 2e825fe4d7c6fb1374a5747cbd1e39b0dd4b706d | bba7328de07b137d2cb6e2aac31b8f57e0803026 | refs/heads/master | 2023-06-05T09:17:35.549980 | 2021-05-13T12:45:59 | 2021-05-13T12:45:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,893 | py | from zcrmsdk.src.com.zoho.crm.api.user_signature import UserSignature
from zcrmsdk.src.com.zoho.crm.api.dc import INDataCenter, USDataCenter, EUDataCenter, CNDataCenter, AUDataCenter
from zcrmsdk.src.com.zoho.api.authenticator.store import DBStore, FileStore
from zcrmsdk.src.com.zoho.api.logger import Logger
from zcrmsdk.src.com.zoho.crm.api.initializer import Initializer
from zcrmsdk.src.com.zoho.api.authenticator.oauth_token import OAuthToken, TokenType
class SDKInitializer(object):
@staticmethod
def initialize():
"""
Create an instance of Logger Class that takes two parameters
1 -> Level of the log messages to be logged. Can be configured by typing Logger.Levels "." and choose any level from the list displayed.
2 -> Absolute file path, where messages need to be logged.
"""
logger = Logger.get_instance(level=Logger.Levels.INFO, file_path="/Users/user_name/Documents/python_sdk_log.log")
# Create an UserSignature instance that takes user Email as parameter
user = UserSignature(email="[email protected]")
"""
Configure the environment
which is of the pattern Domain.Environment
Available Domains: USDataCenter, EUDataCenter, INDataCenter, CNDataCenter, AUDataCenter
Available Environments: PRODUCTION(), DEVELOPER(), SANDBOX()
"""
environment = USDataCenter.PRODUCTION()
"""
Create a Token instance that takes the following parameters
1 -> OAuth client id.
2 -> OAuth client secret.
3 -> OAuth redirect URL.
4 -> REFRESH/GRANT token.
5 -> token type.
"""
token = OAuthToken(client_id="clientId", client_secret="clientSecret", redirect_url="redirectURL", token="REFRESH/ GRANT Token", token_type=TokenType.REFRESH / TokenType.GRANT)
"""
Create an instance of TokenStore
1 -> Absolute file path of the file to persist tokens
"""
store = FileStore(file_path='/Users/username/Documents/python_sdk_tokens.txt')
"""
Create an instance of TokenStore
1 -> DataBase host name. Default value "localhost"
2 -> DataBase name. Default value "zohooauth"
3 -> DataBase user name. Default value "root"
4 -> DataBase password. Default value ""
5 -> DataBase port number. Default value "3306"
"""
store = DBStore()
store = DBStore(host='host_name', database_name='database_name', user_name='user_name', password='password',
port_number='port_number')
"""
A Boolean value for the key (auto_refresh_fields) to allow or prevent auto-refreshing of the modules' fields in the background.
if True - all the modules' fields will be auto-refreshed in the background whenever there is any change.
if False - the fields will not be auto-refreshed in the background. The user can manually delete the file(s) or the specific module's fields using methods from ModuleFieldsHandler
"""
auto_refresh_fields = True
"""
The path containing the absolute directory path (in the key resource_path) to store user-specific files containing information about fields in modules.
"""
resource_path = '/Users/user_name/Documents/python-app'
"""
Call the static initialize method of Initializer class that takes the following arguments
1 -> UserSignature instance
2 -> Environment instance
3 -> Token instance
4 -> TokenStore instance
5 -> Logger instance
6 -> auto_refresh_fields
7 -> resource_path
"""
Initializer.initialize(user=user, environment=environment, token=token, store=store, logger=logger, auto_refresh_fields=auto_refresh_fields, resource_path=resource_path)
SDKInitializer.initialize()
| [
"[email protected]"
] | |
72ba22a91588bf1a22d08ecacb33ec336da6f0d5 | 78d35bb7876a3460d4398e1cb3554b06e36c720a | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/operations/_vpn_server_configurations_operations.py | c68e670e8370ac82125f9b7a40a73e128c19f475 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | catchsrinivas/azure-sdk-for-python | e35f59b60318a31b3c940a7a3a07b61b28118aa5 | 596227a7738a5342274486e30489239d539b11d1 | refs/heads/main | 2023-08-27T09:08:07.986249 | 2021-11-11T11:13:35 | 2021-11-11T11:13:35 | 427,045,896 | 0 | 0 | MIT | 2021-11-11T15:14:31 | 2021-11-11T15:14:31 | null | UTF-8 | Python | false | false | 28,466 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VpnServerConfigurationsOperations(object):
"""VpnServerConfigurationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
vpn_server_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VpnServerConfiguration"
"""Retrieves the details of a VpnServerConfiguration.
:param resource_group_name: The resource group name of the VpnServerConfiguration.
:type resource_group_name: str
:param vpn_server_configuration_name: The name of the VpnServerConfiguration being retrieved.
:type vpn_server_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnServerConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.VpnServerConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnServerConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
vpn_server_configuration_name, # type: str
vpn_server_configuration_parameters, # type: "_models.VpnServerConfiguration"
**kwargs # type: Any
):
# type: (...) -> "_models.VpnServerConfiguration"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnServerConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_server_configuration_parameters, 'VpnServerConfiguration')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnServerConfiguration', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
vpn_server_configuration_name, # type: str
vpn_server_configuration_parameters, # type: "_models.VpnServerConfiguration"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnServerConfiguration"]
"""Creates a VpnServerConfiguration resource if it doesn't exist else updates the existing
VpnServerConfiguration.
:param resource_group_name: The resource group name of the VpnServerConfiguration.
:type resource_group_name: str
:param vpn_server_configuration_name: The name of the VpnServerConfiguration being created or
updated.
:type vpn_server_configuration_name: str
:param vpn_server_configuration_parameters: Parameters supplied to create or update
VpnServerConfiguration.
:type vpn_server_configuration_parameters: ~azure.mgmt.network.v2019_12_01.models.VpnServerConfiguration
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnServerConfiguration or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_12_01.models.VpnServerConfiguration]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnServerConfiguration"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
vpn_server_configuration_name=vpn_server_configuration_name,
vpn_server_configuration_parameters=vpn_server_configuration_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
vpn_server_configuration_name, # type: str
vpn_server_configuration_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.VpnServerConfiguration"
"""Updates VpnServerConfiguration tags.
:param resource_group_name: The resource group name of the VpnServerConfiguration.
:type resource_group_name: str
:param vpn_server_configuration_name: The name of the VpnServerConfiguration being updated.
:type vpn_server_configuration_name: str
:param vpn_server_configuration_parameters: Parameters supplied to update
VpnServerConfiguration tags.
:type vpn_server_configuration_parameters: ~azure.mgmt.network.v2019_12_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnServerConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.VpnServerConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnServerConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_server_configuration_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
vpn_server_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
vpn_server_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a VpnServerConfiguration.
:param resource_group_name: The resource group name of the VpnServerConfiguration.
:type resource_group_name: str
:param vpn_server_configuration_name: The name of the VpnServerConfiguration being deleted.
:type vpn_server_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
vpn_server_configuration_name=vpn_server_configuration_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVpnServerConfigurationsResult"]
"""Lists all the vpnServerConfigurations in a resource group.
:param resource_group_name: The resource group name of the VpnServerConfiguration.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnServerConfigurationsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_12_01.models.ListVpnServerConfigurationsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnServerConfigurationsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnServerConfigurationsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVpnServerConfigurationsResult"]
"""Lists all the VpnServerConfigurations in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnServerConfigurationsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_12_01.models.ListVpnServerConfigurationsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnServerConfigurationsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnServerConfigurationsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/vpnServerConfigurations'} # type: ignore
| [
"[email protected]"
] | |
dd6398e4756bc1d70633d09a2c01a4591bf45d5a | dc99d95671170444cd7bf02e37da6ecda4a5f19e | /apps/courses/forms.py | 7c3607216ee4eed7c75516ebceebca0b96f618d5 | [] | no_license | bbright3493/python_real_war | 734d49ed9f7e1800d24dc754424a07b69d7d8c1f | 6e43bb7d814920222f3310bd6fd9f04cb3d5bbf1 | refs/heads/master | 2020-03-30T06:08:40.249185 | 2018-10-22T07:33:41 | 2018-10-22T07:33:41 | 150,841,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 18-5-7 上午10:05
# @Author : Ztsong
from django import forms
from .models import ProgramUpload
#
# class ProgramUploadForm(forms.ModelForm):
# class Meta:
# model = ProgramUpload
# fields = ['image']
class ProgramUploadForm(forms.Form):
image = forms.ImageField()
| [
"[email protected]"
] | |
ec626fcce05227e389111ecdb0c34538cbe6e418 | 0090756d7a6eb6ab8389ad23b20e89cd68dbd0e4 | /배열insert.py | b895832beb6bb14ce872d0f5f7be1610194d477c | [] | no_license | ssh6189/2019.12.16 | 5c3093e03ac793d5f0a93cf99e78c6483fcee6d8 | c1021bb72b3fdc05d7f5e8ae350bbd6eee65b0d3 | refs/heads/master | 2020-12-13T19:19:04.558270 | 2020-01-17T08:47:04 | 2020-01-17T08:47:04 | 234,507,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | import numpy as np
a = np.arange(1, 10).reshape(3,3)
print(a)
#a 배열을 일차원 ㅐ열로 변환하고 1번 index에 99추가
np.insert(a, 1, 999)
#a배열의 axis 0방향 1번 인덱스에 추가
#인덱스가 1인 row에 999가 추가됨
np.insert(a, 1, 999, axis=0)
#a배열의 axis 1방향 1번 인덱스에 추가
#index가 1인 column에 999가 추가됨
np.insert(a, 1, 999, axis=1)
| [
"[email protected]"
] | |
2dd51e433b8b72c118cd5ab02697d1acc729de11 | c7979f4f6435fe8d0d07fff7a430da55e3592aed | /AGC023/A2.py | 1cc626666ce7eb2d39610edc6e55643e3ba653a1 | [] | no_license | banboooo044/AtCoder | cee87d40bb98abafde19017f4f4e2f984544b9f8 | 7541d521cf0da848ecb5eb10ffea7d75a44cbbb6 | refs/heads/master | 2020-04-14T11:35:24.977457 | 2019-09-17T03:20:27 | 2019-09-17T03:20:27 | 163,818,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | N = int(input())
A = list(map(int,input().split(" ")))
for i in range(N):
| [
"[email protected]"
] | |
44685fe6f9efa4068a850e9767859e5f04694261 | 1564d12d61f669ce9f772f3ef7563167f7fe13bf | /codeforces/606/B.MakeThemOdd.py | 9c2d937efa9b206dced25914e93f323bacc2266a | [] | no_license | sakshamk6999/codingPractice | 73ec4873defb0f0d2e47173150a589ee12e5e0a1 | f727aac6d87448b19fc9d48660dc6978fe5edc14 | refs/heads/master | 2020-12-01T20:22:36.299535 | 2020-02-04T05:55:53 | 2020-02-04T05:55:53 | 230,757,937 | 0 | 0 | null | 2020-02-12T20:38:12 | 2019-12-29T14:00:22 | Python | UTF-8 | Python | false | false | 849 | py | from collections import defaultdict
import heapq
for _ in range(int(input())):
n = int(input())
rec = {}
rec = defaultdict(lambda : 0, rec)
a = sorted(list(map(int, input().split())))
e = []
l = 0
for i in a:
if i % 2 == 0 and rec[-1 * i] == 0:
e.append(-1 * i)
rec[-1 * i] = 1
l += 1
heapq.heapify(e)
ans = 0
while l > 0:
# print(e)
ans += 1
temp = heapq.heappop(e)
# print("temp", -1 * temp)
rec[temp] = 0
temp = (-1 * temp) // 2
if temp % 2 == 0:
if rec[-1 * temp] == 1:
# print("temp is in", - 1 * temp)
l -= 1
else:
rec[-1 * temp] = 1
heapq.heappush(e, -1 * temp)
else:
l -= 1
print(ans) | [
"[email protected]"
] | |
0d00be6ffa67dcb44dadf1e7fb59c96d3cefdc76 | dabc9c7ec7cce125a12c6243ff67fd91e620d636 | /tap/tests/test_pytest_plugin.py | c91e8b40631e9c79c21ada77df44a0db95c9ba65 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | Mark-E-Hamilton/tappy | 7634209c2862c9e837b58602d4b59636fd9a8e89 | 62c1a4ef1d9e724d3c7bbb31361c17c3bf071d04 | refs/heads/master | 2021-01-15T09:04:09.813683 | 2016-03-21T04:51:45 | 2016-03-21T04:51:45 | 53,630,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,332 | py | # Copyright (c) 2016, Matt Layman
try:
from unittest import mock
except ImportError:
import mock
import tempfile
from tap.plugins import _pytest
from tap.tests import TestCase
from tap.tracker import Tracker
class TestPytestPlugin(TestCase):
def setUp(self):
"""The pytest plugin uses module scope so a fresh tracker
must be installed each time."""
# When running this suite with pytest, save and restore the tracker.
self._tracker = _pytest.tracker
_pytest.tracker = Tracker()
def tearDown(self):
_pytest.tracker = self._tracker
def _make_config(self):
config = mock.Mock()
config.option.tap_stream = False
config.option.tap_files = False
config.option.tap_outdir = None
config.option.tap_combined = False
return config
def test_includes_options(self):
group = mock.Mock()
parser = mock.Mock()
parser.getgroup.return_value = group
_pytest.pytest_addoption(parser)
self.assertEqual(group.addoption.call_count, 4)
def test_tracker_stream_set(self):
config = self._make_config()
config.option.tap_stream = True
_pytest.pytest_configure(config)
self.assertTrue(_pytest.tracker.streaming)
def test_tracker_outdir_set(self):
outdir = tempfile.mkdtemp()
config = self._make_config()
config.option.tap_outdir = outdir
_pytest.pytest_configure(config)
self.assertEqual(_pytest.tracker.outdir, outdir)
def test_tracker_combined_set(self):
config = self._make_config()
config.option.tap_combined = True
_pytest.pytest_configure(config)
self.assertTrue(_pytest.tracker.combined)
def test_track_when_call_report(self):
"""Only the call reports are tracked."""
_pytest.tracker = mock.Mock()
report = mock.Mock(when='setup', outcome='passed')
_pytest.pytest_runtest_logreport(report)
self.assertFalse(_pytest.tracker.add_ok.called)
def test_tracks_ok(self):
_pytest.tracker = mock.Mock()
location = ('test_file.py', 1, 'TestFake.test_me')
report = mock.Mock(when='call', outcome='passed', location=location)
_pytest.pytest_runtest_logreport(report)
_pytest.tracker.add_ok.assert_called_once_with(
'TestFake', 'TestFake.test_me')
def test_tracks_not_ok(self):
_pytest.tracker = mock.Mock()
location = ('test_file.py', 1, 'TestFake.test_me')
report = mock.Mock(when='call', outcome='failed', location=location)
_pytest.pytest_runtest_logreport(report)
_pytest.tracker.add_not_ok.assert_called_once_with(
'TestFake', 'TestFake.test_me', diagnostics='')
def test_tracks_skip(self):
_pytest.tracker = mock.Mock()
location = ('test_file.py', 1, 'TestFake.test_me')
longrepr = ('', '', 'Skipped: a reason')
report = mock.Mock(
when='call', outcome='skipped', location=location,
longrepr=longrepr)
_pytest.pytest_runtest_logreport(report)
_pytest.tracker.add_skip.assert_called_once_with(
'TestFake', 'TestFake.test_me', 'a reason')
def test_generates_reports_for_stream(self):
config = self._make_config()
config.option.tap_stream = True
_pytest.tracker = mock.Mock()
_pytest.pytest_unconfigure(config)
_pytest.tracker.generate_tap_reports.assert_called_once_with()
def test_generates_reports_for_files(self):
config = self._make_config()
config.option.tap_files = True
_pytest.tracker = mock.Mock()
_pytest.pytest_unconfigure(config)
_pytest.tracker.generate_tap_reports.assert_called_once_with()
def test_generates_reports_for_combined(self):
config = self._make_config()
config.option.tap_combined = True
_pytest.tracker = mock.Mock()
_pytest.pytest_unconfigure(config)
_pytest.tracker.generate_tap_reports.assert_called_once_with()
def test_skips_reporting_with_no_output_option(self):
config = self._make_config()
_pytest.tracker = mock.Mock()
_pytest.pytest_unconfigure(config)
self.assertFalse(_pytest.tracker.generate_tap_reports.called)
| [
"[email protected]"
] | |
e36cba2db79f18ed6432af22f03c4f53dd4f61b1 | 2dfbb97b47fd467f29ffb26faf9a9f6f117abeee | /leetcode/242.py | 0b7a2589d14a456369352fe3820fb247d6675b0b | [] | no_license | liuweilin17/algorithm | 0e04b2d36dfb6b7b1b0e0425daf69b62273c54b5 | d3e8669f932fc2e22711e8b7590d3365d020e189 | refs/heads/master | 2020-12-30T11:03:40.085105 | 2020-04-10T03:46:01 | 2020-04-10T03:46:01 | 98,844,919 | 3 | 1 | null | 2018-10-05T03:01:02 | 2017-07-31T03:35:14 | C++ | UTF-8 | Python | false | false | 1,014 | py | ###########################################
# Let's Have Some Fun
# File Name: 242.py
# Author: Weilin Liu
# Mail: [email protected]
# Created Time: Fri Oct 19 00:40:47 2018
###########################################
#coding=utf-8
#!/usr/bin/python
# valid anagram
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
dt = {}
l1 = len(s)
l2 = len(t)
if l1 != l2:
return False
for c in s:
if c in dt.keys():
dt[c] += 1
else:
dt[c] = 1
for c in t:
if c in dt.keys():
dt[c] -= 1
if dt[c] < 0:
return False
else:
return False
return True
if __name__ == '__main__':
so = Solution()
s = "anagram"
t = "nagaram"
print so.isAnagram(s, t)
s = "rat"
t = "car"
print so.isAnagram(s, t)
| [
"[email protected]"
] | |
818b20ac454ef8f772d87fb729b7474c68a5f9a6 | d024ccbb4cc04af3866a4db1ac1d8c1d7395d909 | /boj/1152.py | 8af0d3fc1c7b3184f1a6c89454aee7a18af2623a | [] | no_license | demetoir/ps-solved-code | ff0418dddd10f3b053c9b8d32af48027b10c8481 | f4d4fd2183176b083f2287c9d89c6d5a1e983cc5 | refs/heads/master | 2022-10-14T20:11:34.581439 | 2020-06-12T11:24:11 | 2020-06-12T11:24:11 | 68,782,768 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | print(len(list(map(str,input().split())))) | [
"[email protected]"
] | |
7eacb9ca621e2a660599a473bfdbc1136d01a7a6 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1/bbbeebun/codejam_01.py | 7a489d790a6fd40192e6c72e498da86daa2ff2b1 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,308 | py | def has_completed(mapping):
count = 0
for key in mapping:
count += mapping[key]
if count == 10:
return True
else:
return False
def update_mapping(current_n, mapping):
current_n_str = str(current_n)
for each in current_n_str:
if mapping[each] == 0:
mapping[each] = 1
def counting_sheep(n):
if n == 0:
return 'INSOMNIA'
mapping = {
'0':0, '1':0, '2':0,
'3':0, '4':0, '5':0,
'6':0, '7':0, '8':0,
'9':0
}
current_n = n
update_mapping(current_n, mapping)
while not has_completed(mapping):
current_n += n
update_mapping(current_n, mapping)
return current_n
i = 1
dataset = [0,1,2,11,1692,213858,999995,292164,265199,1000000,10,663708,25,674735,762196,519439,205639,686594,851051,506636,72961,571071,380018,721364,271918,124,362718,40,779467,125000,9,4,104652,20,999998,34,133688,911210,71670,403183,3,999999,777164,999991,999996,954404,999997,200,771909,535557,621518,246569,816478,12500,854110,434198,610249,562071,679849,999992,5,427795,889527,739756,866179,8,513404,125,211763,408914,1250,225473,541210,687079,839403,6,557598,816751,584871,857249,999993,999994,467549,364901,988598,659695,402255,657006,637531,224284,441246,192103,166,565718,300682,596698,584551,410726,7,90188]
for each in dataset:
print 'Case #'+str(i) +': ' + str(counting_sheep(each))
i += 1 | [
"[[email protected]]"
] | |
08d60b0fdf4f6abfda5e2ac10591021283fc44bf | 8e1be167066e30eff91c26c0757211cf3cf8b016 | /django/orm/book_authors_proj/apps/books_authors_app/migrations/0001_initial.py | 5818682c02c3c18e31b135482e2c1adb636304db | [] | no_license | dojo-solutions/online-ft-python | 074d0ba968f5a77eaec1bca0904232f2aa29051a | b4f6941d0bba376d121a40a6429b815d5b03c32f | refs/heads/master | 2020-04-21T11:52:31.390772 | 2019-03-02T01:27:54 | 2019-03-02T01:27:54 | 169,542,448 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,422 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2019-02-21 18:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=45)),
('last_name', models.CharField(max_length=45)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('desc', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.AddField(
model_name='author',
name='books',
field=models.ManyToManyField(related_name='authors', to='books_authors_app.Book'),
),
]
| [
"[email protected]"
] | |
2ba20372fe4994021d5cf6e43c9b163c1c106b64 | 05a9e0bb7e33099f94dfc8af53b4837bc5c9d287 | /python/ext_examples/torch/bench/linear.py | 1a840cbd0db9eba60313d59db7025e1b6a7852df | [] | no_license | HiroIshida/snippets | 999c09efadae80397cb82a424328bb1dbda4915f | f64dcd793184be64682b55bdaee7392fd97a0916 | refs/heads/master | 2023-09-01T08:18:42.523625 | 2023-09-01T04:08:20 | 2023-09-01T04:08:20 | 207,662,767 | 7 | 2 | null | 2022-08-01T23:20:42 | 2019-09-10T21:04:01 | C++ | UTF-8 | Python | false | false | 782 | py | import torch.nn as nn
import tqdm
import numpy as np
import matplotlib.pyplot as plt
import time
import torch
import threadpoolctl
def measure_perf(depth, with_grad: bool = False):
lst = []
for _ in range(depth):
lst.append(nn.Linear(40, 40))
lst.append(nn.ReLU())
lst.append(nn.Linear(40, 1))
lst.append(nn.Sigmoid())
net = nn.Sequential(*lst)
arr = np.random.randn(1, 40)
ten = torch.from_numpy(arr).float()
ten.requires_grad_(with_grad)
ts = time.time()
n_trial = 100
for _ in range(n_trial):
val1 = net(ten)
if with_grad:
val1.backward()
perf = (time.time() - ts) / n_trial
return perf
perfs = [measure_perf(n, True) for n in tqdm.tqdm(range(50))]
plt.plot(perfs)
plt.show()
| [
"[email protected]"
] | |
b5ed3013b2eafda68318a223d46dce0287cafaff | 32fdc94d1b8d98085db5d1e8caae4161d3e70667 | /3rd_party/python3.7/lib/python3.7/site-packages/mining-0.2.2-py3.7-linux-x86_64.egg/mining/utils/listc.py | 298f9060e40d10832fcab747bdea37497e80d1e6 | [
"Python-2.0"
] | permissive | czfdlut/ticket_proxy | fa0f1924a86babfa7ce96cf97e929f7bf78643b7 | 0d7c19448741bc9030484a97c1b8f118098213ad | refs/heads/master | 2022-12-23T05:25:58.207123 | 2019-11-20T03:58:31 | 2019-11-20T03:58:31 | 174,579,562 | 1 | 3 | null | 2022-12-18T01:18:07 | 2019-03-08T17:22:48 | Python | UTF-8 | Python | false | false | 310 | py | def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, 'listc.cpython-37m-x86_64-linux-gnu.so')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
| [
"[email protected]"
] | |
32fb9a2a330ac6fa993cae29751e0c894fb2e922 | 1af44bdcbc3c15d3f6e436a7924dfd45f504ab3a | /01.jump to python/chpter 2/62em.py | 4db6566d344540379fdc05693d0ca4cb074461b8 | [] | no_license | wql7654/bigdata_exam | f57c8b475690cbc5978009dbf8008bedff602e2a | c07ee711bb84407428ba31165185b9607b6825e8 | refs/heads/master | 2023-04-07T00:50:59.563714 | 2021-05-25T02:46:43 | 2021-05-25T02:46:43 | 180,915,985 | 0 | 0 | null | 2023-03-25T01:08:09 | 2019-04-12T02:36:08 | Jupyter Notebook | UTF-8 | Python | false | false | 191 | py |
a=['life','is','too','hard']
re=" ".join(a)
print(re)
re=re.split()
print(re)
re=','.join(a)
print(re)
re=re.split(',')
print(re)
re.sort()
print(re)
re=" ".join(re)
print(re)
| [
"[email protected]"
] | |
ede66e2d33e041a80cec2a8771ccc87fe440f7af | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/148/usersdata/268/99980/submittedfiles/testes.py | c1a570ae015ca648546489e96edebf5c24b3fe5c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | # -*- coding: utf-8 -*-
n=int(input('ooo'))
i=0
while i*(i+1)*(i+2) < n:
i=i+1
if i*(i+1)*(i+2)==n:
print('S')
else :
print('N')
| [
"[email protected]"
] | |
60c93a4684a8e005d11c1dc1ee26fb60e25dd162 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03393/s891507939.py | 38993f4b41b75bc140544df5c2618f773831c0e9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | s = input()
c = list(s)
c2 = set(c)
al = sorted(list("qwertyuiopasdfghjklzxcvbnm"))
#26文字未満なら追加
if len(c)<26:
for i in range(26):
if al[i] not in c2:
print(s+al[i])
exit()
if s == "zyxwvutsrqponmlkjihgfedcba":
print(-1)
exit()
rev = "zyxwvutsrqponmlkjihgfedcba"
for i in range(25,-1,-1):
x = sorted(c[i:])
for j in x:
if ord(s[i])<ord(j):
print(s[:i]+j)
exit() | [
"[email protected]"
] | |
ebbc9f436c2f66f730686c9789e0cb9cb7aa1ee8 | 5ac72c8484d8b7c2ecb94217e70ffa96c8c83053 | /server/account/models.py | 0661b22685cb7c013e9dce0dd4cb818a1fc07399 | [
"MIT"
] | permissive | buffalos0721/Super-Neutron-Drive | 975b6a9d20f9dc28d85632f87f50dd37da199f1f | d3cbeeae113722099032fb651dd4148670cb86e9 | refs/heads/master | 2020-03-26T08:40:40.409045 | 2016-08-18T16:20:36 | 2016-08-18T16:20:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,280 | py | import urllib
import datetime
from importlib import import_module
from collections import OrderedDict
from django.db import models
from django.conf import settings
from django.utils import timezone
from django.core import validators
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, UserManager
from ndrive.utils.lib import cached_method
from ndrive.utils.email import send_mail
import jwt
from paypal.standard.ipn.signals import subscription_signup
SESSION_ENGINE = import_module(settings.SESSION_ENGINE)
class User (AbstractBaseUser, PermissionsMixin):
verified_email = models.EmailField('verified email address', null=True, blank=True)
verified = models.BooleanField(default=False)
newsletter = models.BooleanField('Subscribe to Newsletter', default=False)
first_name = models.CharField('first name', max_length=30, blank=True)
last_name = models.CharField('last name', max_length=30, blank=True)
username = models.CharField('Username', max_length=30, unique=True,
help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.',
validators=[
validators.RegexValidator(r'^[\w.@+-]+$', 'Enter a valid username.', 'invalid')
])
email = models.EmailField('E-Mail', unique=True)
is_staff = models.BooleanField('staff status', default=False,
help_text='Designates whether the user can log into this admin site.')
is_active = models.BooleanField('active', default=True,
help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.')
date_joined = models.DateTimeField('date joined', default=timezone.now)
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
objects = UserManager()
def __unicode__ (self):
return self.username
def get_short_name (self):
return self.username
@staticmethod
def autocomplete_search_fields():
return ("id__iexact", "username__icontains", "email__icontains", "first_name__icontains", "last_name__icontains")
def chrome_token (self, session):
return jwt.encode({
'session': session.session_key,
'exp': datetime.datetime(2030, 1, 1)
}, settings.SECRET_KEY)
@staticmethod
def get_session (token):
payload = jwt.decode(token, settings.SECRET_KEY, verify_expiration=False)
return SESSION_ENGINE.SessionStore(payload['session'])
def send_verify (self, request):
if self.email != self.verified_email:
EmailVerify.new_verify(self, request)
def send_pwreset (self, request):
EmailVerify.new_verify(self, request, True)
@cached_method
def subscription (self):
try:
return self.subscription_set.filter(expires__gte=timezone.now())[0]
except:
return None
class EmailVerify (models.Model):
user = models.ForeignKey(User)
email = models.EmailField()
used = models.BooleanField(default=False)
reset = models.BooleanField(default=False)
created = models.DateTimeField(default=timezone.now)
class Meta:
verbose_name = 'E-Mail Verify'
verbose_name_plural = 'E-Mail Verifies'
def __unicode__ (self):
return self.email
def qs (self):
return '?token={}&email={}'.format(self.token(), urllib.quote(self.email))
@cached_method
def token (self):
return jwt.encode({'id': self.id, 'created': unicode(self.created)}, settings.SECRET_KEY)
@staticmethod
def new_verify (user, request, reset=False):
verify = EmailVerify(user=user, email=user.email, reset=reset)
verify.save()
context = {'verify': verify, 'request': request}
if reset:
tpl = 'account/email.password-reset'
send_mail('Password Reset - {site_name}', [verify.email], tpl, context)
else:
tpl = 'account/email.verify'
send_mail('Please Verify Your E-Mail - {site_name}', [verify.email], tpl, context)
return verify
@staticmethod
def verify_token (token, email, age=10, reset=False):
payload = jwt.decode(token, settings.SECRET_KEY)
old = timezone.now() - datetime.timedelta(days=age)
verify = EmailVerify.objects.get(
id=payload['id'],
email=email,
created__gte=old,
used=False,
reset=reset,
)
if not reset:
verify.used = True
verify.save()
return verify
SUBS_TYPES = [
('initiate', 'Initiate'),
('padawan', 'Padawan'),
('knight', 'Knight'),
('master', 'Master'),
('grand-master', 'Grand Master'),
]
SUBSCRIPTIONS = OrderedDict([
('initiate', {
'cost': 2500,
'name': 'Initiate'
}),
('padawan', {
'cost': 5000,
'name': 'Padawan'
}),
('knight', {
'cost': 9900,
'name': 'Knight'
}),
('master', {
'cost': 30000,
'name': 'Master'
}),
('grand-master', {
'cost': 50000,
'name': 'Grand Master'
}),
])
if settings.DEBUG:
SUBSCRIPTIONS['special'] = {'cost': 200, 'name': 'Special'}
SUBS_TYPES.append(('special', 'Special'))
class Subscription (models.Model):
user = models.ForeignKey(User)
name = models.CharField('Display Name for Credits', max_length=30)
stype = models.CharField('Subscription Type', max_length=20, choices=SUBS_TYPES)
stripe_id = models.CharField(max_length=255, blank=True, null=True)
stripe_subs = models.CharField(max_length=255, blank=True, null=True)
paypal_id = models.CharField(max_length=255, blank=True, null=True)
paypal_subs = models.CharField(max_length=255, blank=True, null=True)
expires = models.DateTimeField()
cancelled = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('-expires',)
def __unicode__ (self):
return self.user.username
def payment_type (self):
if self.stripe_id:
return 'Stripe'
return 'PayPal'
def paypal_subs_created (sender, **kwargs):
user = User.objects.get(id=sender.custom)
subs = Subscription(
user = user,
name = user.username,
stype = sender.item_number,
expires = timezone.now() + datetime.timedelta(days=365),
paypal_id = sender.payer_email,
paypal_subs = sender.subscr_id,
)
subs.save()
subscription_signup.connect(paypal_subs_created)
| [
"[email protected]"
] | |
1797c37d09a01a52a738bcb504b0284fad56d361 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/ml/azure-ai-ml/tests/component/_util.py | d93cf7462afb7549afc5c81a02eb2035ffe81047 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-python-cwi",
"PSF-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"Python-2.0",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
"HPND",
"ODbL-1.0",
"GPL-3.0-only",
"ZPL-2.1",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 86 | py | _COMPONENT_TIMEOUT_SECOND = 20 * 60 # timeout for component's tests, unit in second.
| [
"[email protected]"
] | |
d8e1b1e542edb43a01bb810371c9af69a80d601c | 1e4d2a66f92b8ef3baddaf76366c1be4ad853328 | /Safari_Edris_DSC510/SandBox/ImportFiles/venv/Scripts/pip3-script.py | d8f7812de43586a23f66d5e0a7f99db0e1b9abc4 | [] | no_license | dlingerfelt/DSC-510-Fall2019 | 0c4168cf030af48619cfd5e044f425f1f9d376dd | 328a5a0c8876f4bafb975345b569567653fb3694 | refs/heads/master | 2022-12-04T05:04:02.663126 | 2022-11-28T14:58:34 | 2022-11-28T14:58:34 | 204,721,695 | 5 | 23 | null | 2019-12-06T01:15:11 | 2019-08-27T14:30:27 | Python | UTF-8 | Python | false | false | 463 | py | #!C:\Users\safar\Documents\GitHub\DSC-510-Fall2019\Safari_Edris_DSC510\SandBox\ImportFiles\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
] | |
2bb14a82bf0195f215a36c5e10aef5136ef02006 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/era5_scripts/02_preprocessing/combine82/35-tideGauge.py | c363a79bc5b9cbdedc37466360109e92883f0129 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,115 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 16 16:11:00 2020
--------------------------------------------
Load predictors for each TG and combine them
--------------------------------------------
@author: Michael Tadesse
"""
import os
import pandas as pd
#define directories
dir_in = '/lustre/fs0/home/mtadesse/eraFiveConcat'
dir_out = '/lustre/fs0/home/mtadesse/ereaFiveCombine'
def combine():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
#cd to where the actual file is
os.chdir(dir_in)
x = 35
y = 36
for t in range(x, y):
tg_name = tg_list_name[t]
print(tg_name, '\n')
#looping through each TG folder
os.chdir(tg_name)
#defining the path for each predictor
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp.csv'),\
"wnd_u": os.path.join(where, 'wnd_u.csv'),\
'wnd_v' : os.path.join(where, 'wnd_v.csv')}
first = True
for pr in csv_path.keys():
print(tg_name, ' ', pr)
#read predictor
pred = pd.read_csv(csv_path[pr])
#remove unwanted columns
pred.drop(['Unnamed: 0', 'Unnamed: 0.1'], axis = 1, inplace=True)
#give predictor columns a name
pred_col = list(pred.columns)
for pp in range(len(pred_col)):
if pred_col[pp] == 'date':
continue
pred_col[pp] = pr + str(pred_col[pp])
pred.columns = pred_col
#merge all predictors
if first:
pred_combined = pred
first = False
else:
pred_combined = pd.merge(pred_combined, pred, on = 'date')
#saving pred_combined
os.chdir(dir_out)
pred_combined.to_csv('.'.join([tg_name, 'csv']))
os.chdir(dir_in)
print('\n')
#run script
combine()
| [
"[email protected]"
] | |
ea48d2765c2ca0ae7d26e05b899fc93cb13349ec | e42a61b7be7ec3412e5cea0ffe9f6e9f34d4bf8d | /a10sdk/core/system/system_bfd_stats.py | 2b13f0d88b617ea5ea2c93a905d0181004463e88 | [
"Apache-2.0"
] | permissive | amwelch/a10sdk-python | 4179565afdc76cdec3601c2715a79479b3225aef | 3e6d88c65bd1a2bf63917d14be58d782e06814e6 | refs/heads/master | 2021-01-20T23:17:07.270210 | 2015-08-13T17:53:23 | 2015-08-13T17:53:23 | 40,673,499 | 0 | 0 | null | 2015-08-13T17:51:35 | 2015-08-13T17:51:34 | null | UTF-8 | Python | false | false | 4,388 | py | from a10sdk.common.A10BaseClass import A10BaseClass
class Stats(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param udp_checksum_error: {"optional": true, "size": "2", "type": "number", "oid": "2", "format": "counter"}
:param invalid_detect_mult: {"optional": true, "size": "2", "type": "number", "oid": "8", "format": "counter"}
:param auth_length_invalid: {"optional": true, "size": "2", "type": "number", "oid": "12", "format": "counter"}
:param auth_key_mismatch: {"optional": true, "size": "2", "type": "number", "oid": "16", "format": "counter"}
:param invalid_my_disc: {"optional": true, "size": "2", "type": "number", "oid": "10", "format": "counter"}
:param multihop_mismatch: {"optional": true, "size": "2", "type": "number", "oid": "4", "format": "counter"}
:param dest_unreachable: {"optional": true, "size": "2", "type": "number", "oid": "20", "format": "counter"}
:param length_too_small: {"optional": true, "size": "2", "type": "number", "oid": "6", "format": "counter"}
:param auth_mismatch: {"optional": true, "size": "2", "type": "number", "oid": "13", "format": "counter"}
:param auth_failed: {"optional": true, "size": "2", "type": "number", "oid": "18", "format": "counter"}
:param auth_type_mismatch: {"optional": true, "size": "2", "type": "number", "oid": "14", "format": "counter"}
:param invalid_ttl: {"optional": true, "size": "2", "type": "number", "oid": "11", "format": "counter"}
:param data_is_short: {"optional": true, "size": "2", "type": "number", "oid": "7", "format": "counter"}
:param session_not_found: {"optional": true, "size": "2", "type": "number", "oid": "3", "format": "counter"}
:param auth_seqnum_invalid: {"optional": true, "size": "2", "type": "number", "oid": "17", "format": "counter"}
:param local_state_admin_down: {"optional": true, "size": "2", "type": "number", "oid": "19", "format": "counter"}
:param ip_checksum_error: {"optional": true, "size": "2", "type": "number", "oid": "1", "format": "counter"}
:param version_mismatch: {"optional": true, "size": "2", "type": "number", "oid": "5", "format": "counter"}
:param auth_key_id_mismatch: {"optional": true, "size": "2", "type": "number", "oid": "15", "format": "counter"}
:param other_error: {"optional": true, "size": "2", "type": "number", "oid": "21", "format": "counter"}
:param invalid_multipoint: {"optional": true, "size": "2", "type": "number", "oid": "9", "format": "counter"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "stats"
self.DeviceProxy = ""
self.udp_checksum_error = ""
self.invalid_detect_mult = ""
self.auth_length_invalid = ""
self.auth_key_mismatch = ""
self.invalid_my_disc = ""
self.multihop_mismatch = ""
self.dest_unreachable = ""
self.length_too_small = ""
self.auth_mismatch = ""
self.auth_failed = ""
self.auth_type_mismatch = ""
self.invalid_ttl = ""
self.data_is_short = ""
self.session_not_found = ""
self.auth_seqnum_invalid = ""
self.local_state_admin_down = ""
self.ip_checksum_error = ""
self.version_mismatch = ""
self.auth_key_id_mismatch = ""
self.other_error = ""
self.invalid_multipoint = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Bfd(A10BaseClass):
"""Class Description::
Statistics for the object bfd.
Class bfd supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/system/bfd/stats`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "bfd"
self.a10_url="/axapi/v3/system/bfd/stats"
self.DeviceProxy = ""
self.stats = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
| [
"[email protected]"
] | |
cf9cab0e49fa44985cb0ae35e2aab029d37ecf6d | acf7cff7d08ae5984b0ba1e65e4404a0bfb07ba1 | /dataset.py | a467b9cd79fc5a65c62eac84ece5670e74cf611c | [] | no_license | aloyschen/NSFWImageClassify | 54981406c754cf0c6ecb0db8a337b41b836ce9fe | f8d5666bfcbaf24dc5e46beeeb50dd10a9efca0c | refs/heads/master | 2020-06-02T13:50:23.027165 | 2019-06-21T11:13:57 | 2019-06-21T11:13:57 | 191,176,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,119 | py | # encoding:utf-8
import os
import math
import shutil
import random
import config
import tensorflow as tf
class NSFWDataset():
def __init__(self, datasetDir, mode):
"""
Introduction
------------
图像数据集
1、将图像数据转换为tfRecord
"""
self.datasetDir = datasetDir
self.mode = mode
self._sess = tf.Session()
# if not os.path.exists(os.path.join(self.datasetDir, self.mode, "tfrecords")):
# os.mkdir(os.path.join(self.datasetDir, self.mode, "tfrecords"))
# file_pattern = os.path.join(self.datasetDir, self.mode) + '/tfrecords/*.tfrecord'
# self.tfRecord_file = tf.gfile.Glob(file_pattern)
self._encode_image = tf.placeholder(tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._encode_image, channels = 3)
self._decode_png = tf.image.decode_png(self._encode_image, channels = 3)
# if self.mode == "train":
# self.convert_to_tfecord()
# else:
# if len(file_pattern) == 0:
# self.convert_to_tfecord()
# self.tfRecord_file = tf.gfile.Glob(file_pattern)
def int64_feature(self, values):
"""
Introduction
------------
转换成tensorflow tfrecord int特征格式
"""
if not isinstance(values, (tuple, list)):
values = [values]
return tf.train.Feature(int64_list = tf.train.Int64List(value = values))
def bytes_feature(self, values):
"""
Introduction
------------
转换成tensorflow tfrecord bytes特征格式
"""
return tf.train.Feature(bytes_list = tf.train.BytesList(value = [values]))
def _get_filenames_and_classes(self):
"""
Introduction
------------
获取路径下对应的图片和所有的类别
Parameters
----------
dataset_dir: 数据集对应的路径
mode: 数据集对应的训练、测试、验证
Returns
-------
返回数据集包含的所有图片路径和所有的类别名称
"""
image_path = []
classes_name = []
root_path = os.path.join(self.datasetDir, self.mode)
for filename in os.listdir(root_path):
path = os.path.join(root_path, filename)
if os.path.isdir(path):
classes_name.append(filename)
per_classes = []
for imageFile in os.listdir(path):
per_classes.append(os.path.join(path, imageFile))
per_classes = random.sample(per_classes, config.perClass_num)
image_path = image_path + per_classes
return image_path, sorted(classes_name)
def PreProcessImage(self, image):
"""
Introduction
------------
对图片进行预处理
Parameters
----------
image: 输入图片
Returns
-------
预处理之后的图片
"""
if self.mode == 'train':
image = tf.image.resize_image_with_crop_or_pad(image, config.image_size, config.image_size)
image = tf.image.random_flip_left_right(image)
# 对图片像素进行标准化,减去均值,除以方差
image = tf.image.per_image_standardization(image)
return image
def convert_to_tfecord(self):
"""
Introduction
------------
将数据集转换为tfrecord格式
Parameters
----------
"""
# 先删除上轮随机抽取的训练数据
print("remove last train data")
shutil.rmtree(os.path.join(self.datasetDir, self.mode, "tfrecords"))
image_files, classes = self._get_filenames_and_classes()
os.mkdir(os.path.join(self.datasetDir, self.mode, "tfrecords"))
class_id_dict = dict(zip(classes, range(len(classes))))
if self.mode == "train":
num_shards = 20
else:
num_shards = 10
num_per_shard = int(math.ceil(len(image_files) / float(num_shards)))
image_nums = 0
for shard_id in range(num_shards):
output_filename = os.path.join(self.datasetDir, self.mode) + "/tfrecords/nsfw_{}_{}_of_{}.tfrecord".format(self.mode, shard_id, num_shards)
with tf.python_io.TFRecordWriter(output_filename) as tfRecordWriter:
start_idx = shard_id * num_per_shard
end_idx = min((shard_id + 1) * num_per_shard, len(image_files))
for idx in range(start_idx, end_idx):
print("converting image {}/{} shard {}".format(idx, len(image_files), shard_id))
image_data = tf.gfile.FastGFile(image_files[idx], 'rb').read()
# 数据可能有问题,若抛出异常则舍弃这条数据
try:
if image_files[idx].split('.')[-1] == 'png':
image = self._sess.run(self._decode_png, feed_dict = {self._encode_image : image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
else:
image = self._sess.run(self._decode_jpeg, feed_dict = {self._encode_image : image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
except Exception:
continue
height, width = image.shape[0], image.shape[1]
classname = os.path.basename(os.path.dirname(image_files[idx]))
class_id = class_id_dict[classname]
example = tf.train.Example(features = tf.train.Features(feature ={
'image/encoded' : self.bytes_feature(image_data),
'image/label' : self.int64_feature(class_id),
'image/height' : self.int64_feature(height),
'image/width' : self.int64_feature(width)
}))
tfRecordWriter.write(example.SerializeToString())
image_nums += 1
print("所有数据集数量", image_nums)
def parse_tfrecord(self, serialized_example):
"""
Introduction
------------
解析tfrecord文件
Parameters
----------
serialized_example: 序列化数据
"""
parsed = tf.parse_single_example(
serialized_example,
features = {
'image/encoded' : tf.FixedLenFeature([], tf.string),
'image/label' : tf.FixedLenFeature([], tf.int64),
'image/height' : tf.FixedLenFeature([], tf.int64),
'image/width' : tf.FixedLenFeature([], tf.int64)
})
image = tf.image.decode_jpeg(parsed['image/encoded'], channels=3)
image = tf.image.convert_image_dtype(image, tf.uint8)
image.set_shape([None, None, 3])
image = self.PreProcessImage(image)
if self.mode != 'train':
image = tf.image.resize_images(image, [config.image_size, config.image_size])
label = parsed['image/label']
label = tf.cast(label, tf.int32)
return image, label
def parse_image(self, filename, label):
"""
Introduction
------------
解析训练数据集
Parameters
----------
filename: 图片文件
label: 图片标签
"""
image_string = tf.read_file(filename)
image = tf.image.decode_jpeg(image_string)
image = self.PreProcessImage(image)
if self.mode != 'train':
image = tf.image.resize_images(image, [config.image_size, config.image_size])
return image, label
def process_record_dataset(self, batch_size, num_epochs):
"""
Introduction
------------
返回tensorflow 训练的dataset
Parameters
----------
batch_size: 数据集每个batch的大小
num_epochs: 数据集训练的轮数
"""
image_files, classes = self._get_filenames_and_classes()
class_id_dict = dict(zip(classes, range(len(classes))))
labels = []
for idx in range(len(image_files)):
classname = os.path.basename(os.path.dirname(image_files[idx]))
class_id = class_id_dict[classname]
labels.append(class_id)
dataset = tf.data.Dataset.from_tensor_slices((image_files, labels))
dataset = dataset.map(self.parse_image)
#dataset = tf.data.TFRecordDataset(filenames = self.tfRecord_file)
#dataset = dataset.map(self.parse, num_parallel_calls = 10)
dataset = dataset.batch(batch_size).prefetch(buffer_size = batch_size)
if self.mode == 'train':
dataset = dataset.shuffle(buffer_size = 500)
dataset = dataset.repeat(num_epochs)
return dataset
| [
"[email protected]"
] | |
c1bb69c3c89f7e74c5290bc657be0da088c70345 | 13696a9691b173d75b11b4aee22b79d4ea6b7c0b | /test/test_o_auth_api.py | 760055ebca9f7d8f8ae0f95734aad1999bf0caef | [
"Apache-2.0"
] | permissive | square/connect-python-sdk | 410613bc4b04f0f70176275591a16c9e49e25ede | e00e2889b2dd2c55048219cbe64db79962a68633 | refs/heads/master | 2023-06-15T09:24:17.190416 | 2019-08-15T17:44:41 | 2019-08-15T17:44:41 | 64,772,029 | 53 | 45 | Apache-2.0 | 2020-12-20T18:41:31 | 2016-08-02T16:07:17 | Python | UTF-8 | Python | false | false | 1,346 | py | # coding: utf-8
"""
Copyright 2017 Square, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import squareconnect
from squareconnect.rest import ApiException
from squareconnect.apis.o_auth_api import OAuthApi
class TestOAuthApi(unittest.TestCase):
""" OAuthApi unit test stubs """
def setUp(self):
self.api = squareconnect.apis.o_auth_api.OAuthApi()
def tearDown(self):
pass
def test_obtain_token(self):
print("Start test case for obtain_token")
pass
def test_renew_token(self):
print("Start test case for renew_token")
pass
def test_revoke_token(self):
print("Start test case for revoke_token")
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
0ad4839cc902ab89f8ee4c25b4c3fbf598f4798a | 8881a4927d893e1e755c0488f76ba7941b379f26 | /tech_gram_project2/producthunt_project/producthunt_project/urls.py | 01b11efd49f26851698655f127f6afdfa499ab26 | [] | no_license | SatishNitk/Django | 6bb839fcf2bc7d70413e3d56ac98124a7a96a5de | d9260c032322a34410d783c39a8f13e8f63b8be4 | refs/heads/master | 2020-05-24T23:01:35.767388 | 2019-07-06T13:56:50 | 2019-07-06T13:56:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | from django.contrib import admin
from django.urls import path,include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('product/', include("products.urls")),
path('account/', include("accounts.urls"))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) # this is for to open the image fro link inside admin
| [
"[email protected]"
] | |
faf7637b93bf57c9d86f6f84ec0dc2f5c276cca2 | 994ea22f35c635fdf139af9282b0d3a3d86ea34a | /ud617-intro_to_hadoop_mapreduce/lesson6/part1/reducer_q3.py | d3be0e7c6127a7fdf196f92e9b3177b5ef9970aa | [] | no_license | zjyx147/Udacity | ac371fbc5b5b456e88b411657ef5a28c3b071c6c | d86fadd537dbacc6f8142b043e71527b0448bae3 | refs/heads/master | 2022-06-23T14:25:41.242353 | 2019-06-20T20:12:13 | 2019-06-20T20:12:13 | 191,207,247 | 0 | 0 | null | 2022-06-21T22:07:35 | 2019-06-10T16:42:18 | DIGITAL Command Language | UTF-8 | Python | false | false | 593 | py | #!/usr/bin/python
import sys
totalNum = 0
totalVal = 0
oldKey = None
# Loop around the data
# It will be in the format key\tval
# Where key is the store name, val is the sale amount
#
# All the sales for a particular store will be presented,
# then the key will change and we'll be dealing with the next store
for line in sys.stdin:
data_mapped = line.strip().split("\t")
if len(data_mapped) != 2:
# Something has gone wrong. Skip this line.
continue
thisKey, thisSale = data_mapped
totalNum += 1
totalVal += float(thisSale)
print totalNum, totalVal
| [
"[email protected]"
] | |
f3711d296271e67c2ea2358fbca18f624f2a8a00 | 853d4cec42071b76a80be38c58ffe0fbf9b9dc34 | /venv/Lib/site-packages/async/task.py | 266d610dafdac10674199f6fb8a6fcccfbf9ca7b | [] | no_license | msainTesting/TwitterAnalysis | 5e1646dbf40badf887a86e125ef30a9edaa622a4 | b1204346508ba3e3922a52380ead5a8f7079726b | refs/heads/main | 2023-08-28T08:29:28.924620 | 2021-11-04T12:36:30 | 2021-11-04T12:36:30 | 424,242,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,331 | py | # Copyright (C) 2010, 2011 Sebastian Thiel ([email protected]) and contributors
#
# This module is part of async and is released under
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
from .graph import Node
from .util import ReadOnly
from .channel import IteratorReader
import threading
import weakref
__all__ = ('Task', 'ThreadTaskBase', 'IteratorTaskBase',
'IteratorThreadTask', 'ChannelThreadTask')
class Task(Node):
"""
Abstracts a named task, which contains
additional information on how the task should be queued and processed.
Results of the item processing are sent to a writer, which is to be
set by the creator using the ``set_writer`` method.
Items are read using the internal ``_read`` callable, subclasses are meant to
set this to a callable that supports the Reader interface's read function.
* **min_count** assures that not less than min_count items will be processed per call.
* **max_chunksize** assures that multi-threading is happening in smaller chunks. If
someone wants all items to be processed, using read(0), the whole task would go to
one worker, as well as dependent tasks. If you want finer granularity , you can
specify this here, causing chunks to be no larger than max_chunksize
* **apply_single** if True, default True, individual items will be given to the
worker function. If False, a list of possibly multiple items will be passed
instead.
"""
__slots__ = ( '_read', # method to yield items to process
'_out_writer', # output write channel
'_exc', # exception caught
'_done', # True if we are done
'_num_writers', # number of concurrent writers
'_wlock', # lock for the above
'fun', # function to call with items read
'min_count', # minimum amount of items to produce, None means no override
'max_chunksize', # maximium amount of items to process per process call
'apply_single' # apply single items even if multiple where read
)
def __init__(self, id, fun, apply_single=True, min_count=None, max_chunksize=0,
writer=None):
Node.__init__(self, id)
self._read = None # to be set by subclasss
self._out_writer = writer
self._exc = None
self._done = False
self._num_writers = 0
self._wlock = threading.Lock()
self.fun = fun
self.min_count = None
self.max_chunksize = 0 # not set
self.apply_single = apply_single
def is_done(self):
""":return: True if we are finished processing"""
return self._done
def set_done(self):
"""Set ourselves to being done, has we have completed the processing"""
self._done = True
def set_writer(self, writer):
"""Set the write channel to the given one"""
self._out_writer = writer
def writer(self):
"""
:return: a proxy to our write channel or None if non is set
:note: you must not hold a reference to our write channel when the
task is being processed. This would cause the write channel never
to be closed as the task will think there is still another instance
being processed which can close the channel once it is done.
In the worst case, this will block your reads."""
if self._out_writer is None:
return None
return self._out_writer
def close(self):
"""A closed task will close its channel to assure the readers will wake up
:note: its safe to call this method multiple times"""
self._out_writer.close()
def is_closed(self):
""":return: True if the task's write channel is closed"""
return self._out_writer.closed()
def error(self):
""":return: Exception caught during last processing or None"""
return self._exc
def process(self, count=0):
"""Process count items and send the result individually to the output channel"""
# first thing: increment the writer count - other tasks must be able
# to respond properly ( even if it turns out we don't need it later )
self._wlock.acquire()
self._num_writers += 1
self._wlock.release()
items = self._read(count)
try:
try:
if items:
write = self._out_writer.write
if self.apply_single:
for item in items:
rval = self.fun(item)
write(rval)
# END for each item
else:
# shouldn't apply single be the default anyway ?
# The task designers should chunk them up in advance
rvals = self.fun(items)
for rval in rvals:
write(rval)
# END handle single apply
# END if there is anything to do
finally:
self._wlock.acquire()
self._num_writers -= 1
self._wlock.release()
# END handle writer count
except Exception as e:
# be sure our task is not scheduled again
self.set_done()
# PROBLEM: We have failed to create at least one item, hence its not
# garantueed that enough items will be produced for a possibly blocking
# client on the other end. This is why we have no other choice but
# to close the channel, preventing the possibility of blocking.
# This implies that dependent tasks will go down with us, but that is
# just the right thing to do of course - one loose link in the chain ...
# Other chunks of our kind currently being processed will then
# fail to write to the channel and fail as well
self.close()
# If some other chunk of our Task had an error, the channel will be closed
# This is not an issue, just be sure we don't overwrite the original
# exception with the ReadOnly error that would be emitted in that case.
# We imply that ReadOnly is exclusive to us, as it won't be an error
# if the user emits it
if not isinstance(e, ReadOnly):
self._exc = e
# END set error flag
# END exception handling
# if we didn't get all demanded items, which is also the case if count is 0
# we have depleted the input channel and are done
# We could check our output channel for how many items we have and put that
# into the equation, but whats important is that we were asked to produce
# count items.
if not items or len(items) != count:
self.set_done()
# END handle done state
# If we appear to be the only one left with our output channel, and are
# done ( this could have been set in another thread as well ), make
# sure to close the output channel.
# Waiting with this to be the last one helps to keep the
# write-channel writable longer
# The count is: 1 = wc itself, 2 = first reader channel, + x for every
# thread having its copy on the stack
# + 1 for the instance we provide to refcount
# Soft close, so others can continue writing their results
if self.is_done():
self._wlock.acquire()
try:
if self._num_writers == 0:
self.close()
# END handle writers
finally:
self._wlock.release()
# END assure lock release
# END handle channel closure
#{ Configuration
class ThreadTaskBase(object):
"""Describes tasks which can be used with theaded pools"""
pass
class IteratorTaskBase(Task):
"""Implements a task which processes items from an iterable in a multi-processing
safe manner"""
__slots__ = tuple()
def __init__(self, iterator, *args, **kwargs):
Task.__init__(self, *args, **kwargs)
self._read = IteratorReader(iterator).read
# defaults to returning our items unchanged
if self.fun is None:
self.fun = lambda item: item
class IteratorThreadTask(IteratorTaskBase, ThreadTaskBase):
"""An input iterator for threaded pools"""
lock_type = threading.Lock
class ChannelThreadTask(Task, ThreadTaskBase):
"""Uses an input channel as source for reading items
For instantiation, it takes all arguments of its base, the first one needs
to be the input channel to read from though."""
__slots__ = "_pool_ref"
def __init__(self, in_reader, *args, **kwargs):
Task.__init__(self, *args, **kwargs)
self._read = in_reader.read
self._pool_ref = None
#{ Internal Interface
def reader(self):
""":return: input channel from which we read"""
# the instance is bound in its instance method - lets use this to keep
# the refcount at one ( per consumer )
return self._read.__self__
def set_read(self, read):
"""Adjust the read method to the given one"""
self._read = read
def set_pool(self, pool):
self._pool_ref = weakref.ref(pool)
def pool(self):
""":return: pool we are attached to, or None"""
if self._pool_ref is None:
return None
return self._pool_ref()
#} END intenral interface
| [
"[email protected]"
] | |
cf62c539355e00b0778c2edcea0d321f0c331db4 | f719fb52b2fee32742c62e0267633a68c228d982 | /2017-03-29/gen3.py | 3394a05273741324057417acd390b15bacc994bb | [] | no_license | elsys/python2016-2017 | 76e0fcb97b509a6f87fd010479b44ee702d7b2dd | 290ba35dc1242a9f13a320ada1ec0498acc8fb79 | refs/heads/master | 2021-06-18T08:07:12.025390 | 2017-06-14T15:41:12 | 2017-06-14T15:41:12 | 83,579,817 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 253 | py |
def fun(v):
print("calling fun() with value v=", v)
return 2*v
# print(fun(1))
def gen(maxv):
while maxv > 0:
print("before yield")
yield fun(maxv)
maxv -= 1
g = gen(3)
print(next(g))
# print(next(g))
"""
for v in g:
print(v)
"""
| [
"[email protected]"
] | |
cb2ad544ec354652fc3ec9b093ddbc618597cd18 | 44badce6303eb8df34707edf27c5f8f2d2bc2697 | /redfoot-1.6/lib/redfootlib/rdf/model/schema.py | 2e583d06866efeaa30576f5f9794e1023a1d9554 | [] | no_license | jtauber/redfoot-orig | d371456f79e8b584f8e58037a5ab33011027484a | a5c26c53ba94c6d8970578bfcbc637aafaad1e11 | refs/heads/master | 2021-01-13T01:13:24.072000 | 2014-06-22T14:58:45 | 2014-06-22T14:58:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,998 | py | from __future__ import generators
from redfootlib.rdf.model.core import Core
from redfootlib.rdf.const import LABEL, COMMENT
from redfootlib.rdf.const import TYPE, STATEMENT
from redfootlib.rdf.const import SUBJECT, PREDICATE, OBJECT
from redfootlib.rdf.const import DOMAIN, SUBCLASSOF
class Schema(Core):
def label(self, subject, default=None):
for s, p, o in self.triples(subject, LABEL, None):
return o
return default or subject
def comment(self, subject, default=None):
for s, p, o in self.triples(subject, COMMENT, None):
return o
return default or self.label(subject)
def typeless_resources(self):
for subject in self.subjects():
if not self.exists(subject, TYPE, None):
yield subject
# TODO: should we have a version of this that answers for subclasses too?
def is_of_type(self, subject, type):
return self.exists(subject, TYPE, type)
def subjects_by_type(self, type, predicate, object):
for subject in self.subjects(predicate, object):
if self.is_of_type(subject, type):
yield subject
def get_statement_uri(self, subject, predicate, object):
"""\
Returns the first statement uri for the given subject, predicate, object.
"""
for (s, p, o) in self.triples(None, TYPE, STATEMENT):
if self.exists(s, SUBJECT, subject)\
and self.exists(s, PREDICATE, predicate)\
and self.exists(s, OBJECT, object):
return s
return None
def possible_properties(self, type):
for object in self.transitive_objects(type, SUBCLASSOF):
for subject in self.subjects(DOMAIN, object):
yield subject
def possible_properties_for_subject(self, subject):
for type in self.objects(subject, TYPE):
for property in self.possible_properties(type):
yield property
| [
"[email protected]"
] | |
fe8e9488c5ed18762588b06bc9c15f7ea1d8989a | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /NyTjy8nmHj9bmxMTC_17.py | b636158314e9dca4037846b8c88031b88b2ef41e | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py |
import math as m
def vol_pizza(radius, height):
return round(m.pi * (radius**2) * height)
| [
"[email protected]"
] | |
9cc6d69a4edce9161dbfdc879d96259cff1bacef | 5be8b0f2ee392abeee6970e7a6364ac9a5b8ceaa | /xiaojian/first_phase/day06/exersice03.py | e8b14b51849de9acde1b36a099be0ce424888398 | [] | no_license | Wellsjian/20180826 | 424b65f828f0174e4d568131da01dafc2a36050a | 0156ad4db891a2c4b06711748d2624080578620c | refs/heads/master | 2021-06-18T12:16:08.466177 | 2019-09-01T10:06:44 | 2019-09-01T10:06:44 | 204,462,572 | 0 | 1 | null | 2021-04-20T18:26:03 | 2019-08-26T11:38:09 | JavaScript | UTF-8 | Python | false | false | 691 | py | # 在控制台中选取季度,并将相应月份打印出来
# season = input("请输入季度:")
# if season == "春":
# print("该季度有1 2 3 月份")
# elif season == "夏":
# print("该季度有4 5 6 月份")
# elif season == "秋":
# print("该季度有7 8 9 月份")
# elif season == "冬":
# print("该季度有10 11 12 月份")
# else:
# print("您的输入不合法")
season = input("请输入季度:")
season_dict = {"春": (1, 2, 3),
"夏": (4, 5, 6),
"秋": (7, 8, 9),
"冬": (10, 11, 12)
}
if season in season_dict:
print(season_dict[season])
else:
print("输入不正确")
| [
"[email protected]"
] | |
bc96195975a91b5368e14f03c4909420a70a4ac3 | 65bf0113da75390c4cf3960b6a409aca15569a06 | /tests/migrations/0014_apply_report_file.py | e3afd0e4e25db2a852b10394c22262f44c292c82 | [] | no_license | wenpengfan/opsadmin | e7701538265253653adb1c8ce490e0ce71d3b4f6 | 3d997259353dc2734ad153c137a91f3530e0a8ec | refs/heads/master | 2023-03-29T11:50:10.756596 | 2020-11-16T02:41:18 | 2020-11-16T02:41:18 | 313,171,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2020-06-01 13:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tests', '0013_apply_feedback'),
]
operations = [
migrations.AddField(
model_name='apply',
name='report_file',
field=models.CharField(max_length=255, null=True, verbose_name='\u6d4b\u8bd5\u62a5\u544a\u6587\u4ef6'),
),
]
| [
"[email protected]"
] | |
2a916755d6b8e25a39d5161ef7fcb1f6b6730526 | 28e54b74587bb2987234e9bee8e445b762024b18 | /autonetkit/nidb.py | ac97029f1078e8e73467dde3384f5a5509f42b68 | [] | no_license | sk2/ANK-NG | d6b49c864e6e9d5d1b7b6467c5ea2130e9079317 | 7b312fb7346dc2282904f0d9d0bf7441a186a2f5 | refs/heads/master | 2020-06-04T12:44:02.959146 | 2012-08-29T06:18:29 | 2012-08-29T06:18:29 | 4,663,827 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 20,488 | py | import networkx as nx
import pprint
import collections
import time
class overlay_data_dict(collections.MutableMapping):
"""A dictionary which allows access as dict.key as well as dict['key']
Based on http://stackoverflow.com/questions/3387691
"""
def __repr__(self):
return ", ".join(self.store.keys())
def __init__(self, *args, **kwargs):
self.store = dict()
self.update(dict(*args, **kwargs)) # use the free update to set keys
def __getitem__(self, key):
return self.store[self.__keytransform__(key)]
def __setitem__(self, key, value):
self.store[self.__keytransform__(key)] = value
def __delitem__(self, key):
del self.store[self.__keytransform__(key)]
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def __keytransform__(self, key):
return key
def __getattr__(self, key):
return self.store.get(key)
class overlay_data_list_of_dicts(object):
def __init__(self, data):
self.data = data
def __getstate__(self):
return (self.data)
def __getnewargs__(self):
return ()
def __setstate__(self, state):
self.data = state
def __len__(self):
return len(self.data)
def __repr__(self):
return str(self.data)
def __nonzero__(self):
"""Allows for checking if data exists """
if len(self.data):
return True
else:
return False
def __iter__(self):
#TODO: want to introduce some sorting here.... how?
return iter(overlay_data_dict(item) for item in self.data)
class overlay_edge_accessor(object):
#TODO: do we even need this?
"""API to access overlay nodes in ANM"""
#TODO: fix consistency between node_id (id) and edge (overlay edge)
def __init__(self, nidb, edge):
#Set using this method to bypass __setattr__
object.__setattr__(self, 'nidb', nidb)
object.__setattr__(self, 'edge', edge)
def __repr__(self):
#TODO: make this list overlays the node is present in
return "Overlay edge accessor: %s" % self.edge
def __getnewargs__(self):
return ()
def __getattr__(self, overlay_id):
"""Access overlay edge"""
#TODO: check on returning list or single edge if multiple found with same id (eg in G_igp from explode)
edge = self.nidb.edge(self.edge)
return edge
class overlay_edge(object):
"""API to access edge in nidb"""
def __init__(self, nidb, src_id, dst_id):
#Set using this method to bypass __setattr__
object.__setattr__(self, 'nidb', nidb)
object.__setattr__(self, 'src_id', src_id)
object.__setattr__(self, 'dst_id', dst_id)
def __repr__(self):
return "(%s, %s)" % (self.src, self.dst)
def __getstate__(self):
return (self.nidb, self.src_id, self.dst_id)
def __getnewargs__(self):
return ()
def __setstate__(self, state):
(nidb, src_id, dst_id) = state
object.__setattr__(self, 'nidb', nidb)
object.__setattr__(self, 'src_id', src_id)
object.__setattr__(self, 'dst_id', dst_id)
@property
def src(self):
return nidb_node(self.nidb, self.src_id)
@property
def dst(self):
return nidb_node(self.nidb, self.dst_id)
def dump(self):
return str(self._graph[self.src_id][self.dst_id])
def __nonzero__(self):
"""Allows for checking if edge exists
"""
try:
self._graph[self.src_id][self.dst_id]
return True
except KeyError:
return False
@property
def overlay(self):
"""Access node in another overlay graph"""
return overlay_edge_accessor(self.nidb, self)
@property
def _graph(self):
"""Return graph the node belongs to"""
return self.nidb._graph
def get(self, key):
"""For consistency, edge.get(key) is neater than getattr(edge, key)"""
return self.__getattr__(key)
def __getattr__(self, key):
"""Returns edge property"""
return self._graph[self.src_id][self.dst_id].get(key)
def __setattr__(self, key, val):
"""Sets edge property"""
self._graph[self.src_id][self.dst_id][key] = val
class overlay_node_accessor(object):
#TODO: do we even need this?
def __init__(self, nidb, node_id):
#Set using this method to bypass __setattr__
object.__setattr__(self, 'nidb', nidb)
object.__setattr__(self, 'node_id', node_id)
def __repr__(self):
#TODO: make this list overlays the node is present in
return "Overlay accessor for: %s" % self.nidb
def __getattr__(self, key):
"""Access category"""
return nidb_node_category(self.nidb, self.node_id, key)
class nidb_node_subcategory(object):
def __init__(self, nidb, node_id, category_id, subcategory_id):
#Set using this method to bypass __setattr__
object.__setattr__(self, 'nidb', nidb)
object.__setattr__(self, 'node_id', node_id)
object.__setattr__(self, 'category_id', category_id)
object.__setattr__(self, 'subcategory_id', subcategory_id)
@property
def _data(self):
return
def __repr__(self):
return self.nidb._graph.node[self.node_id][self.category_id][self.subcategory_id]
class nidb_node_category(object):
#TODO: make this custom dict like above?
def __init__(self, nidb, node_id, category_id):
#Set using this method to bypass __setattr__
object.__setattr__(self, 'nidb', nidb)
object.__setattr__(self, 'node_id', node_id)
object.__setattr__(self, 'category_id', category_id)
def __getstate__(self):
print "state has cat id", self.category_id
return (self.nidb, self.node_id, self.category_id)
def __getnewargs__(self):
return ()
def __setstate__(self, state):
"""For pickling"""
self._overlays = state
(nidb, node_id, category_id) = state
#TODO: call to self __init__ ???
object.__setattr__(self, 'nidb', nidb)
object.__setattr__(self, 'node_id', node_id)
object.__setattr__(self, 'category_id', category_id)
def __repr__(self):
return str(self._node_data.get(self.category_id))
def __nonzero__(self):
"""Allows for accessing to set attributes
This simplifies templates
but also for easy check, eg if sw1.bgp can return False if category not set
but can later do r1.bgp.attr = value
"""
if self.category_id in self._node_data:
return True
return False
@property
def _category_data(self):
return self._node_data[self.category_id]
def __getitem__(self, key):
"""Used to access the data directly. calling node.key returns wrapped data for templates"""
return self._category_data[key]
@property
def _node_data(self):
return self.nidb._graph.node[self.node_id]
def __getattr__(self, key):
"""Returns edge property"""
#TODO: allow appending if non existent: so can do node.bgp.session.append(data)
data = self._category_data.get(key)
try:
[item.keys() for item in data]
#TODO: replace this with an OrderedDict
return overlay_data_list_of_dicts(data)
except AttributeError:
pass # not a dict
except TypeError:
pass # also not a dict
return data
def dump(self):
return str(self._node_data)
def __setattr__(self, key, val):
"""Sets edge property"""
try:
self._node_data[self.category_id][key] = val
except KeyError:
self._node_data[self.category_id] = {} # create dict for this data category
setattr(self, key, val)
#TODO: this should also inherit from collections, so don't break __getnewargs__ etc
class nidb_node(object):
"""API to access overlay graph node in network"""
def __init__(self, nidb, node_id):
#Set using this method to bypass __setattr__
object.__setattr__(self, 'nidb', nidb)
object.__setattr__(self, 'node_id', node_id)
def __repr__(self):
return self._node_data['label']
def __getnewargs__(self):
return ()
def __getstate__(self):
return (self.nidb, self.node_id)
def __setstate__(self, state):
(nidb, node_id) = state
object.__setattr__(self, 'nidb', nidb)
object.__setattr__(self, 'node_id', node_id)
@property
def _node_data(self):
return self.nidb._graph.node[self.node_id]
def dump(self):
return str(self._node_data)
@property
def is_router(self):
return self.device_type == "router"
@property
def is_switch(self):
return self.device_type == "switch"
@property
def is_server(self):
return self.device_type == "server"
@property
def is_l3device(self):
"""Layer 3 devices: router, server, cloud, host
ie not switch
"""
return self.is_router or self.is_server
def edges(self, *args, **kwargs):
#TODO: want to add filter for *args and **kwargs here too
return self.nidb.edges(self, *args, **kwargs)
@property
def id(self):
return self.node_id
@property
def label(self):
return self.__repr__()
def get(self, key):
return getattr(self, key)
def __getattr__(self, key):
"""Returns edge property"""
data = self._node_data.get(key)
try:
[item.keys() for item in data]
return overlay_data_list_of_dicts(data)
except TypeError:
pass # Not set yet
except AttributeError:
pass # not a dict
try:
data.keys()
return nidb_node_category(self.nidb, self.node_id, key)
except TypeError:
pass # Not set yet
except AttributeError:
pass # not a dict
if data:
return data
else:
return nidb_node_category(self.nidb, self.node_id, key)
def __setattr__(self, key, val):
"""Sets edge property"""
self._node_data[key] = val
#return nidb_node_category(self.nidb, self.node_id, key)
def __iter__(self):
return iter(self._node_data)
@property
def overlay(self):
return overlay_node_accessor(self.nidb, self.node_id)
class nidb_graph_data(object):
"""API to access overlay graph data in network"""
def __init__(self, nidb):
#Set using this method to bypass __setattr__
object.__setattr__(self, 'nidb', nidb)
def __repr__(self):
return "NIDB data: %s" % self.nidb._graph.graph
def __getattr__(self, key):
"""Returns edge property"""
return self.nidb._graph.graph.get(key)
def __setattr__(self, key, val):
"""Sets edge property"""
self.nidb._graph.graph[key] = val
#TODO: make this inherit same overlay base as overlay_graph for add nodes etc properties
# but not the degree etc
class lab_topology(object):
"""API to access lab topology in network"""
def __init__(self, nidb, topology_id):
#Set using this method to bypass __setattr__
object.__setattr__(self, 'nidb', nidb)
object.__setattr__(self, 'topology_id', topology_id)
def __repr__(self):
return "Lab Topology: %s" % self.topology_id
@property
def _topology_data(self):
return self.nidb._graph.graph['topologies'][self.topology_id]
def dump(self):
return str(self._topology_data)
def __getattr__(self, key):
"""Returns topology property"""
data = self._topology_data.get(key)
try:
[item.keys() for item in data]
#TODO: replace this with an OrderedDict
return overlay_data_list_of_dicts(data)
except AttributeError:
pass # not a dict
except TypeError:
pass # also not a dict
return data
return self._topology_data.get(key)
def __setattr__(self, key, val):
"""Sets topology property"""
self._topology_data[key] = val
class NIDB_base(object):
#TODO: inherit common methods from same base as overlay
def __init__(self):
pass
def __getstate__(self):
return self._graph
def __setstate__(self, state):
self._graph = state
def __getnewargs__(self):
return ()
def __repr__(self):
return "nidb"
def dump(self):
return "%s %s %s" % (
pprint.pformat(self._graph.graph),
pprint.pformat(self._graph.nodes(data=True)),
pprint.pformat(self._graph.edges(data=True))
)
#TODO: add restore function
def save(self):
import os
pickle_dir = os.path.join("versions", "nidb")
if not os.path.isdir(pickle_dir):
os.makedirs(pickle_dir)
pickle_file = "nidb_%s.pickle.tar.gz" % self.timestamp
pickle_path = os.path.join(pickle_dir, pickle_file)
nx.write_gpickle(self._graph, pickle_path)
@property
def name(self):
return self.__repr__()
def __len__(self):
return len(self._graph)
def edges(self, nbunch = None, *args, **kwargs):
# nbunch may be single node
#TODO: Apply edge filters
if nbunch:
try:
nbunch = nbunch.node_id
except AttributeError:
nbunch = (n.node_id for n in nbunch) # only store the id in overlay
def filter_func(edge):
return (
all(getattr(edge, key) for key in args) and
all(getattr(edge, key) == val for key, val in kwargs.items())
)
#TODO: See if more efficient way to access underlying data structure rather than create overlay to throw away
all_edges = iter(overlay_edge(self, src, dst)
for src, dst in self._graph.edges(nbunch)
)
return (edge for edge in all_edges if filter_func(edge))
def node(self, key):
"""Returns node based on name
This is currently O(N). Could use a lookup table"""
#TODO: check if node.node_id in graph, if so return wrapped node for this...
# returns node based on name
try:
if key.node_id in self._graph:
return nidb_node(self, key.node_id)
except AttributeError:
# doesn't have node_id, likely a label string, search based on this label
for node in self:
if str(node) == key:
return node
print "Unable to find node", key, "in", self
return None
def edge(self, edge_to_find):
"""returns edge in this graph with same src and same edge_id"""
src_id = edge_to_find.src_id
search_id = edge_to_find.edge_id
#TODO: if no edge_id then search for src, dst pair
for src, dst in self._graph.edges_iter(src_id):
try:
if self._graph[src][dst]['edge_id'] == search_id:
return overlay_edge(self, src, dst)
except KeyError:
pass # no edge_id for this edge
@property
def data(self):
return nidb_graph_data(self)
def update(self, nbunch, **kwargs):
for node in nbunch:
for (category, key), value in kwargs.items():
node.category.set(key, value)
def nodes(self, *args, **kwargs):
result = self.__iter__()
if len(args) or len(kwargs):
result = self.filter(result, *args, **kwargs)
return result
def filter(self, nbunch = None, *args, **kwargs):
#TODO: also allow nbunch to be passed in to subfilter on...?
"""TODO: expand this to allow args also, ie to test if value evaluates to True"""
# need to allow filter_func to access these args
if not nbunch:
nbunch = self.nodes()
def filter_func(node):
return (
all(getattr(node, key) for key in args) and
all(getattr(node, key) == val for key, val in kwargs.items())
)
return (n for n in nbunch if filter_func(n))
def add_nodes_from(self, nbunch, retain=[], **kwargs):
try:
retain.lower()
retain = [retain] # was a string, put into list
except AttributeError:
pass # already a list
if len(retain):
add_nodes = []
for n in nbunch:
data = dict( (key, n.get(key)) for key in retain)
add_nodes.append( (n.node_id, data) )
nbunch = add_nodes
else:
nbunch = (n.node_id for n in nbunch) # only store the id in overlay
self._graph.add_nodes_from(nbunch, **kwargs)
def add_edge(self, src, dst, retain=[], **kwargs):
self.add_edges_from([(src, dst)], retain, **kwargs)
def add_edges_from(self, ebunch, retain=[], **kwargs):
try:
retain.lower()
retain = [retain] # was a string, put into list
except AttributeError:
pass # already a list
#TODO: need to test if given a (id, id) or an edge overlay pair... use try/except for speed
try:
if len(retain):
add_edges = []
for e in ebunch:
data = dict( (key, e.get(key)) for key in retain)
add_edges.append( (e.src.node_id, e.dst.node_id, data) )
ebunch = add_edges
else:
ebunch = [(e.src.node_id, e.dst.node_id) for e in ebunch]
except AttributeError:
ebunch = [(src.node_id, dst.node_id) for src, dst in ebunch]
#TODO: decide if want to allow nodes to be created when adding edge if not already in graph
self._graph.add_edges_from(ebunch, **kwargs)
def __iter__(self):
return iter(nidb_node(self, node)
for node in self._graph)
class lab_topology_accessor(object):
"""API to access overlay graphs in ANM"""
def __init__(self, nidb):
#Set using this method to bypass __setattr__
object.__setattr__(self, 'nidb', nidb)
@property
def topologies(self):
return self.self.nidb._graph.graph['topologies']
#TODO: add iter similarly to anm overlay accessor
def __iter__(self):
return iter(lab_topology(self.nidb, key) for key in self.topologies.keys())
def __repr__(self):
return "Available lab topologies: %s" % ", ".join(sorted(self.topologies.keys()))
def __getattr__(self, key):
"""Access overlay graph"""
return lab_topology(self.nidb, key)
def __getitem__(self, key):
"""Access overlay graph"""
return lab_topology(self.nidb, key)
def get(self, key):
return getattr(self, key)
def add(self, key):
self.topologies[key] = {}
return lab_topology(self.nidb, key)
class NIDB(NIDB_base):
def __init__(self):
self._graph = nx.Graph() # only for connectivity, any other information stored on node
self._graph.graph['topologies'] = collections.defaultdict(dict)
self.timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime())
@property
def topology(self):
return lab_topology_accessor(self)
def subgraph(self, nbunch, name = None):
nbunch = (n.node_id for n in nbunch) # only store the id in overlay
return overlay_subgraph(self._graph.subgraph(nbunch), name)
def boundary_nodes(self, nbunch, nbunch2 = None):
nbunch = (n.node_id for n in nbunch) # only store the id in overlay
return iter(nidb_node(self, node)
for node in nx.node_boundary(self._graph, nbunch, nbunch2))
def boundary_edges(self, nbunch, nbunch2 = None):
nbunch = (n.node_id for n in nbunch) # only store the id in overlay
return iter(overlay_edge(self, src, dst)
for (src, dst) in nx.edge_boundary(self._graph, nbunch, nbunch2))
class overlay_subgraph(NIDB_base):
def __init__(self, graph, name = None):
#TODO: need to refer back to the source nidb
self._graph = graph # only for connectivity, any other information stored on node
self._name = name
def __repr__(self):
return "nidb: %s" % self._name
| [
"[email protected]"
] | |
0d9a7d280e51e2933b55ef5fd026a4939f72886c | f38e78214992de722a6ec2012e844bce7b3c59ed | /bin/taskwarrior | 25c78d0f0862d46f2f10288152304e2e7cfef0a4 | [
"MIT"
] | permissive | clckwrkbdgr/dotfiles | 20fb86f54d93ae4936c334898c3d7b1b3820fb06 | a7e880e189bfa4793f30ff928b049e4a182a38cd | refs/heads/master | 2023-08-31T13:13:47.533868 | 2023-08-30T18:32:00 | 2023-08-30T18:32:00 | 20,396,084 | 2 | 2 | MIT | 2022-10-01T16:35:31 | 2014-06-02T07:26:38 | Python | UTF-8 | Python | false | false | 1,716 | #!/usr/bin/env python
import logging
import functools
logger = logging.getLogger('taskwarrior')
from clckwrkbdgr import utils
import clckwrkbdgr.taskwarrior
from clckwrkbdgr.taskwarrior import TaskWarrior, Config
import clckwrkbdgr.logging
import click, click_default_group
import clckwrkbdgr.click
@functools.lru_cache()
def get_taskwarrior():
return TaskWarrior(Config.read_config())
@clckwrkbdgr.click.windows_noexpand_args
@click.group(cls=click_default_group.DefaultGroup, default='current', default_if_no_args=True)
@click.option('--debug', is_flag=True, help='Enables debug output.')
def cli(debug=False):
""" Provides simple interface to manage user's task flow. """
clckwrkbdgr.logging.init(logger, debug=debug)
@cli.command('current')
@utils.exits_with_return_value
def current_task():
""" Displays current task. """
if get_taskwarrior().get_current_task() is None:
return False
print(get_taskwarrior().get_current_task())
return True
@cli.command('start')
@click.argument('task', required=False)
@utils.exits_with_return_value
def start_task(task=None):
""" Starts given task.
If task is not given, resumes previous task.
"""
return get_taskwarrior().start(task)
@cli.command('stop')
@utils.exits_with_return_value
def stop_task():
""" Stops current task. """
return get_taskwarrior().stop()
@cli.command('list')
@utils.exits_with_return_value
def list_history():
""" Prints task execution history. """
for entry in get_taskwarrior().get_history():
print(entry)
return True
@cli.command('fix')
@utils.exits_with_return_value
def fix_history():
""" Provides interface to fix task history manually. """
return get_taskwarrior().fix_history()
if __name__ == '__main__':
cli()
| [
"[email protected]"
] | ||
5b2abe106d6315f4695312f7040b4d674324543f | 6515dee87efbc5edfbf4c117e262449999fcbb50 | /eet/Merge_k_Sorted_Lists.py | a79231a7a08f8900b10c642d099fb90026c69498 | [] | no_license | wangyunge/algorithmpractice | 24edca77e180854b509954dd0c5d4074e0e9ef31 | 085b8dfa8e12f7c39107bab60110cd3b182f0c13 | refs/heads/master | 2021-12-29T12:55:38.096584 | 2021-12-12T02:53:43 | 2021-12-12T02:53:43 | 62,696,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,102 | py | """
Merge k sorted linked lists and return it as one sorted list. Analyze and describe its complexity.
Example:
Input:
[
1->4->5,
1->3->4,
2->6
]
Output: 1->1->2->3->4->4->5->6
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
def _merge_two(a, b):
fake_head = ListNode(0)
head = fake_head
while a and b :
if a.val <= b.val:
head.next = a
head = a
a = a.next
else:
head.next = b
head = b
b = b.next
if a:
head.next = a
if b:
head.next = b
return fake_head.next
def _merge_sort(arr):
if len(arr) == 1:
return arr[0]
mid = len(arr) // 2
left = _merge_sort(arr[:mid])
right = _merge_sort(arr[mid:])
return _merge_two(left, right)
return _merge_sort(lists)
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def mergeKLists(self,lists):
if not lists:
return []
heap = []
for headers in lists:
if headers:
heapq.heappush(heap,(headers.val,headers))
if not heap:
return []
(value,head) = heapq.heappop(heap)
operator = head
if head.next:
heapq.heappush(heap,(head.next.val,head.next))
while heap:
(value,poped) = heapq.heappop(heap)
operator.next = poped
operator = operator.next
if poped.next:
heapq.heappush(heap,(poped.next.val,poped.next))
return head
| [
"[email protected]"
] | |
ebf05022393496f5a3c2690de8595fb5f621a652 | fcfb3f5e94f35aa0d7c5632efec1d1c15d66e856 | /day9/flask_day3/inherit_demo/app.py | 826748d83c0c62228ad9455de8a5457081fe0b4b | [
"Apache-2.0"
] | permissive | gaohj/wh1904js | 98a9d1dd63d42766b656f07ce537b5933eaafb78 | a3af38f8311f79eb9f2e08a3de16dd1e02c40714 | refs/heads/master | 2021-07-11T17:16:49.885524 | 2020-01-17T09:48:15 | 2020-01-17T09:48:15 | 232,022,360 | 0 | 0 | Apache-2.0 | 2021-03-20T02:41:32 | 2020-01-06T04:14:22 | JavaScript | UTF-8 | Python | false | false | 322 | py | from flask import Flask,render_template
from flask_script import Manager
app = Flask(__name__)
manager = Manager(app)
@app.route('/')
def hello_world():
return render_template('index.html')
@app.route('/detail/')
def details():
return render_template('detail.html')
if __name__ == '__main__':
manager.run()
| [
"[email protected]"
] | |
569e5135fac1555cf0fb518269b99b2c71661cc5 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_9477.py | fa57d72004d0e3842548a0d58e2499f639d33ab5 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | # Django Testing: Using a login decorator for test cases
class SimpleTest(TestCase):
def setUp(self):
self.client.login(username='foo', password='bar')
| [
"[email protected]"
] | |
52b1286ab48d460abebb87719f7d65cef1e7009d | c62a07c8051d6106717863651004c8186a0e3027 | /logic.py | 84c2678f81cea7f404f2c5b6faddd8b4b1335110 | [] | no_license | isakura313/third_22 | bf47bef5914ac5debeb33a36dad39566181ed6fb | c43911d6b73f638894d14f757a0ec5462e9e8005 | refs/heads/master | 2022-04-18T14:27:00.231299 | 2020-04-22T17:01:23 | 2020-04-22T17:01:23 | 257,967,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | role = input("Введите вашу роль в проекте: ")
age = input("Введите ваш возраст: ")
age = int(age)
if role == "admin" and age > 18:
print("У вас есть все права")
elif role == "user" and age> 16:
print("У вас на этом проекте есть некоторые права")
else:
print(" этот сервис закрыт на карантин")
| [
"[email protected]"
] | |
7953194e08d87e2cc8bd5e2a743dc383d4d6458b | fc3c9d2143aecedce191bb91dbd01babe7f6d40b | /tensorpack/callbacks/dump.py | ef62833b31118c6a9f00e80eb5e6c9216d57a65e | [
"Apache-2.0"
] | permissive | rahulbprakash/tensorpack | 0ee10de245f486d17a252354833c98dd713fd6e6 | b2ec42a8d152760498aa911818d50b01e408bb43 | refs/heads/master | 2020-12-30T19:12:08.800662 | 2016-06-09T23:03:37 | 2016-06-09T23:03:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,077 | py | # -*- coding: UTF-8 -*-
# File: dump.py
# Author: Yuxin Wu <[email protected]>
import os
import scipy.misc
from scipy.misc import imsave
import numpy as np
from .base import Callback
from ..utils import logger
from ..tfutils import get_op_var_name
__all__ = ['DumpParamAsImage']
class DumpParamAsImage(Callback):
"""
Dump a variable to image(s) after every epoch.
"""
def __init__(self, var_name, prefix=None, map_func=None, scale=255, clip=False):
"""
:param var_name: the name of the variable.
:param prefix: the filename prefix for saved images. Default is the op name.
:param map_func: map the value of the variable to an image or list of
images of shape [h, w] or [h, w, c]. If None, will use identity
:param scale: a multiplier on pixel values, applied after map_func. default to 255
:param clip: whether to clip the result to [0, 255]
"""
op_name, self.var_name = get_op_var_name(var_name)
self.func = map_func
if prefix is None:
self.prefix = op_name
else:
self.prefix = prefix
self.log_dir = logger.LOG_DIR
self.scale = scale
self.clip = clip
def _before_train(self):
# TODO might not work for multiGPU?
self.var = self.graph.get_tensor_by_name(self.var_name)
def _trigger_epoch(self):
val = self.trainer.sess.run(self.var)
if self.func is not None:
val = self.func(val)
if isinstance(val, list):
for idx, im in enumerate(val):
self._dump_image(im, idx)
else:
self._dump_image(val)
def _dump_image(self, im, idx=None):
assert im.ndim in [2, 3], str(im.ndim)
fname = os.path.join(
self.log_dir,
self.prefix + '-ep{:03d}{}.png'.format(
self.epoch_num, '-' + str(idx) if idx else ''))
res = im * self.scale
if self.clip:
res = np.clip(res, 0, 255)
imsave(fname, res.astype('uint8'))
| [
"[email protected]"
] | |
9d3ebb55f1314362a215d95a4aadf6a840bf824d | 1b9075ffea7d4b846d42981b41be44238c371202 | /2009/stable/hardware/firmware/flashrom/actions.py | 870e4cb6e12699846f65a6f0b2a8ad85380f45fd | [] | no_license | pars-linux/contrib | bf630d4be77f4e484b8c6c8b0698a5b34b3371f4 | 908210110796ef9461a1f9b080b6171fa022e56a | refs/heads/master | 2020-05-26T20:35:58.697670 | 2011-07-11T11:16:38 | 2011-07-11T11:16:38 | 82,484,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2007-2010 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
def build():
autotools.make()
def install():
pisitools.dosbin("flashrom")
pisitools.doman("flashrom.8")
pisitools.dodoc("ChangeLog", "COPYING", "README")
| [
"[email protected]"
] | |
22af9c136349ee70da4d000c5eef00cb1baf0109 | 8ecd899a8558ad0a644ecefa28faf93e0710f6fb | /ABC007/ABC007_A.py | 679623e243c760e493ba9bd56ca1c2569cd69a61 | [] | no_license | yut-inoue/AtCoder_ABC | b93885547049788d452e86b442a4a9f5ee191b0e | 3d2c4b2b2f8871c75f86040ad07ccd7736ad3dbe | refs/heads/master | 2021-07-03T09:09:20.478613 | 2021-02-21T13:20:31 | 2021-02-21T13:20:31 | 227,140,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | n = int(input())
#a, b = map(int,input().split())
#l = list(map(int,input().split()))
#l = [list(map(int,input().split())) for i in range(n)]
print(n-1) | [
"[email protected]"
] | |
2859050f5f4926044ceeb1a9937dfdf2a9332f07 | 3ec84a6e34f9bc709cb203f8b3f668f2b6697e2a | /python20200322-master/class_Python기초/py12패키지/mylib/operation/test.py | a8124c496932fed9a4168d433ceb4a82eeb59f3b | [] | no_license | eopr12/pythonclass | 52079bd99358ac73664beed236659b97c8b63d40 | 2526fe255969a799f6c534c9db6bff9e4eccd877 | refs/heads/master | 2022-07-10T11:17:31.692754 | 2020-05-16T08:43:00 | 2020-05-16T08:43:00 | 263,377,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py |
def test_operation():
result = None
try:
result = "test_operation"
except Exception as ex:
print(ex)
return result | [
"[email protected]"
] | |
a227df16f5ae47666110b93b5b1169d8690aa7b7 | 6189f34eff2831e3e727cd7c5e43bc5b591adffc | /WebMirror/management/rss_parser_funcs/feed_parse_extractEllionoratranslationsCom.py | 0f48d87eec935ae8a6cf120a0f95327cacb1e09f | [
"BSD-3-Clause"
] | permissive | fake-name/ReadableWebProxy | 24603660b204a9e7965cfdd4a942ff62d7711e27 | ca2e086818433abc08c014dd06bfd22d4985ea2a | refs/heads/master | 2023-09-04T03:54:50.043051 | 2023-08-26T16:08:46 | 2023-08-26T16:08:46 | 39,611,770 | 207 | 20 | BSD-3-Clause | 2023-09-11T15:48:15 | 2015-07-24T04:30:43 | Python | UTF-8 | Python | false | false | 563 | py |
def extractEllionoratranslationsCom(item):
'''
Parser for 'ellionoratranslations.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| [
"[email protected]"
] | |
922ad653e03b85705765df9053e41ed4a995fcc9 | 7e3c7e9bf8e8410b688787bbf41f93e0bce30ef8 | /misc/fix_keras_optimizer.py | 4c1e72705ec6e77de0e31f5dd426bd7ffed1acef | [] | no_license | directorscut82/msthesis-experiments | bb8233d4e54da0b294b3a43f219bc424626e8ad5 | f86e344c972f2b61c3fa16eae523fd20303e8842 | refs/heads/master | 2020-03-23T08:24:19.535200 | 2017-07-27T06:23:18 | 2017-07-27T06:23:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | #!/usr/bin/env python
"""
Make keras 1.x models usable in keras 2.x.
Run this when you get the following error:
ValueError: Optimizer weight shape (512,) not compatible with provided weight shape (32,)
"""
import glob
import h5py
model_files = sorted(glob.glob('*.h5'))
for model_file in model_files:
print("Update '{}'".format(model_file))
with h5py.File(model_file, 'a') as f:
if 'optimizer_weights' in f.keys():
del f['optimizer_weights']
| [
"[email protected]"
] | |
a515fc587646476cc8878bb50f72120b4e6aa5ba | bad85cd8d547a071baf4b6590f7e81d13ef1ec0d | /assistant/core/views.py | 2732966b89290e6982d4e90149bce48ffa294e63 | [
"MIT"
] | permissive | kapiak/ware_prod | 92e11671059642e14219d5aa8334e0564403db77 | ae61256890834c434d2e38cc2ccacf00b638665a | refs/heads/master | 2023-01-06T04:36:43.173093 | 2020-09-21T04:06:51 | 2020-09-21T04:06:51 | 310,320,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | from typing import List
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import TemplateView
from assistant.orders.models import Order
from assistant.products.models import Product
class DashboardViewMixin(LoginRequiredMixin):
title: str = None
breadcrumbs: List = []
def get_title(self):
return self.title
def get_context_data(self):
context = super().get_context_data()
context.update({'title': self.get_title()})
return context
class DashboardTemplateView(LoginRequiredMixin, TemplateView):
template_name = "core/dashboard.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'orders': Order.objects.all(),
'products': Product.objects.all()
})
return context
| [
"[email protected]"
] | |
f7526d46e57dacaf54913613ea92feeddb67cffd | e34cbf5fce48f661d08221c095750240dbd88caf | /python/homework/day10_ansibleLike/core/verify.py | 70613c6ced9cebe0e42908774b56c4de14604d30 | [] | no_license | willianflasky/growup | 2f994b815b636e2582594375e90dbcb2aa37288e | 1db031a901e25bbe13f2d0db767cd28c76ac47f5 | refs/heads/master | 2023-01-04T13:13:14.191504 | 2020-01-12T08:11:41 | 2020-01-12T08:11:41 | 48,899,304 | 2 | 0 | null | 2022-12-26T19:46:22 | 2016-01-02T05:04:39 | C | UTF-8 | Python | false | false | 790 | py | #!/usr/bin/env python
# -*-coding:utf8-*-
# __author__ = "willian"
import getpass
from lib import mysql_helper
from conf.settings import *
def verify():
conn = mysql_helper.MySQLHandler(db_host, db_port, db_user, db_pass, db_name)
result = conn.select('select * from {0}', 'users')
count = 3
while count > 0:
_username = input("请输入用户名:").strip()
_password = getpass.getpass("请输入密码:").strip() # pycharm调试不好用
for user_dic in result:
if _username == user_dic['username'] and _password == user_dic['password']:
print("\033[32;1m验证成功!\033[0m")
return True, user_dic
count -= 1
else:
print("\033[31;1m超过3次!\033[0m")
return False
| [
"[email protected]"
] | |
143e7fb5eb3ea2d1e3afc369d304ee334bde63fc | 7b6c0318585f1639529002e8c69f23f3603775a9 | /detectron2/modeling/box_regression.py | 1488ad748288cefe9b4b2e9d28f00774362e203f | [
"Apache-2.0"
] | permissive | av777x/detectron2 | 41f2dda1198c21ef999da1cd0e28b980f68065ee | c1794881d6d2fac6af0b3206937d32628677469c | refs/heads/master | 2023-03-03T19:40:45.820084 | 2021-02-19T00:02:03 | 2021-02-19T00:02:03 | 340,200,661 | 0 | 0 | Apache-2.0 | 2021-02-19T00:02:04 | 2021-02-18T23:04:22 | null | UTF-8 | Python | false | false | 11,122 | py | # Copyright (c) Facebook, Inc. and its affiliates.
import math
from typing import List, Tuple
import torch
from fvcore.nn import giou_loss, smooth_l1_loss
from detectron2.layers import cat
from detectron2.structures import Boxes
# Value for clamping large dw and dh predictions. The heuristic is that we clamp
# such that dw and dh are no larger than what would transform a 16px box into a
# 1000px box (based on a small anchor, 16px, and a typical image size, 1000px).
_DEFAULT_SCALE_CLAMP = math.log(1000.0 / 16)
__all__ = ["Box2BoxTransform", "Box2BoxTransformRotated"]
@torch.jit.script
class Box2BoxTransform(object):
"""
The box-to-box transform defined in R-CNN. The transformation is parameterized
by 4 deltas: (dx, dy, dw, dh). The transformation scales the box's width and height
by exp(dw), exp(dh) and shifts a box's center by the offset (dx * width, dy * height).
"""
def __init__(
self, weights: Tuple[float, float, float, float], scale_clamp: float = _DEFAULT_SCALE_CLAMP
):
"""
Args:
weights (4-element tuple): Scaling factors that are applied to the
(dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set
such that the deltas have unit variance; now they are treated as
hyperparameters of the system.
scale_clamp (float): When predicting deltas, the predicted box scaling
factors (dw and dh) are clamped such that they are <= scale_clamp.
"""
self.weights = weights
self.scale_clamp = scale_clamp
def get_deltas(self, src_boxes, target_boxes):
"""
Get box regression transformation deltas (dx, dy, dw, dh) that can be used
to transform the `src_boxes` into the `target_boxes`. That is, the relation
``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless
any delta is too large and is clamped).
Args:
src_boxes (Tensor): source boxes, e.g., object proposals
target_boxes (Tensor): target of the transformation, e.g., ground-truth
boxes.
"""
assert isinstance(src_boxes, torch.Tensor), type(src_boxes)
assert isinstance(target_boxes, torch.Tensor), type(target_boxes)
src_widths = src_boxes[:, 2] - src_boxes[:, 0]
src_heights = src_boxes[:, 3] - src_boxes[:, 1]
src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths
src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights
target_widths = target_boxes[:, 2] - target_boxes[:, 0]
target_heights = target_boxes[:, 3] - target_boxes[:, 1]
target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths
target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights
wx, wy, ww, wh = self.weights
dx = wx * (target_ctr_x - src_ctr_x) / src_widths
dy = wy * (target_ctr_y - src_ctr_y) / src_heights
dw = ww * torch.log(target_widths / src_widths)
dh = wh * torch.log(target_heights / src_heights)
deltas = torch.stack((dx, dy, dw, dh), dim=1)
assert (src_widths > 0).all().item(), "Input boxes to Box2BoxTransform are not valid!"
return deltas
def apply_deltas(self, deltas, boxes):
"""
Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`.
Args:
deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1.
deltas[i] represents k potentially different class-specific
box transformations for the single box boxes[i].
boxes (Tensor): boxes to transform, of shape (N, 4)
"""
deltas = deltas.float() # ensure fp32 for decoding precision
boxes = boxes.to(deltas.dtype)
widths = boxes[:, 2] - boxes[:, 0]
heights = boxes[:, 3] - boxes[:, 1]
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = self.weights
dx = deltas[:, 0::4] / wx
dy = deltas[:, 1::4] / wy
dw = deltas[:, 2::4] / ww
dh = deltas[:, 3::4] / wh
# Prevent sending too large values into torch.exp()
dw = torch.clamp(dw, max=self.scale_clamp)
dh = torch.clamp(dh, max=self.scale_clamp)
pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
pred_w = torch.exp(dw) * widths[:, None]
pred_h = torch.exp(dh) * heights[:, None]
pred_boxes = torch.zeros_like(deltas)
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w # x1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h # y1
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w # x2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h # y2
return pred_boxes
@torch.jit.script
class Box2BoxTransformRotated(object):
"""
The box-to-box transform defined in Rotated R-CNN. The transformation is parameterized
by 5 deltas: (dx, dy, dw, dh, da). The transformation scales the box's width and height
by exp(dw), exp(dh), shifts a box's center by the offset (dx * width, dy * height),
and rotate a box's angle by da (radians).
Note: angles of deltas are in radians while angles of boxes are in degrees.
"""
def __init__(
self,
weights: Tuple[float, float, float, float, float],
scale_clamp: float = _DEFAULT_SCALE_CLAMP,
):
"""
Args:
weights (5-element tuple): Scaling factors that are applied to the
(dx, dy, dw, dh, da) deltas. These are treated as
hyperparameters of the system.
scale_clamp (float): When predicting deltas, the predicted box scaling
factors (dw and dh) are clamped such that they are <= scale_clamp.
"""
self.weights = weights
self.scale_clamp = scale_clamp
def get_deltas(self, src_boxes, target_boxes):
"""
Get box regression transformation deltas (dx, dy, dw, dh, da) that can be used
to transform the `src_boxes` into the `target_boxes`. That is, the relation
``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless
any delta is too large and is clamped).
Args:
src_boxes (Tensor): Nx5 source boxes, e.g., object proposals
target_boxes (Tensor): Nx5 target of the transformation, e.g., ground-truth
boxes.
"""
assert isinstance(src_boxes, torch.Tensor), type(src_boxes)
assert isinstance(target_boxes, torch.Tensor), type(target_boxes)
src_ctr_x, src_ctr_y, src_widths, src_heights, src_angles = torch.unbind(src_boxes, dim=1)
target_ctr_x, target_ctr_y, target_widths, target_heights, target_angles = torch.unbind(
target_boxes, dim=1
)
wx, wy, ww, wh, wa = self.weights
dx = wx * (target_ctr_x - src_ctr_x) / src_widths
dy = wy * (target_ctr_y - src_ctr_y) / src_heights
dw = ww * torch.log(target_widths / src_widths)
dh = wh * torch.log(target_heights / src_heights)
# Angles of deltas are in radians while angles of boxes are in degrees.
# the conversion to radians serve as a way to normalize the values
da = target_angles - src_angles
da = (da + 180.0) % 360.0 - 180.0 # make it in [-180, 180)
da *= wa * math.pi / 180.0
deltas = torch.stack((dx, dy, dw, dh, da), dim=1)
assert (
(src_widths > 0).all().item()
), "Input boxes to Box2BoxTransformRotated are not valid!"
return deltas
def apply_deltas(self, deltas, boxes):
"""
Apply transformation `deltas` (dx, dy, dw, dh, da) to `boxes`.
Args:
deltas (Tensor): transformation deltas of shape (N, k*5).
deltas[i] represents box transformation for the single box boxes[i].
boxes (Tensor): boxes to transform, of shape (N, 5)
"""
assert deltas.shape[1] % 5 == 0 and boxes.shape[1] == 5
boxes = boxes.to(deltas.dtype).unsqueeze(2)
ctr_x = boxes[:, 0]
ctr_y = boxes[:, 1]
widths = boxes[:, 2]
heights = boxes[:, 3]
angles = boxes[:, 4]
wx, wy, ww, wh, wa = self.weights
dx = deltas[:, 0::5] / wx
dy = deltas[:, 1::5] / wy
dw = deltas[:, 2::5] / ww
dh = deltas[:, 3::5] / wh
da = deltas[:, 4::5] / wa
# Prevent sending too large values into torch.exp()
dw = torch.clamp(dw, max=self.scale_clamp)
dh = torch.clamp(dh, max=self.scale_clamp)
pred_boxes = torch.zeros_like(deltas)
pred_boxes[:, 0::5] = dx * widths + ctr_x # x_ctr
pred_boxes[:, 1::5] = dy * heights + ctr_y # y_ctr
pred_boxes[:, 2::5] = torch.exp(dw) * widths # width
pred_boxes[:, 3::5] = torch.exp(dh) * heights # height
# Following original RRPN implementation,
# angles of deltas are in radians while angles of boxes are in degrees.
pred_angle = da * 180.0 / math.pi + angles
pred_angle = (pred_angle + 180.0) % 360.0 - 180.0 # make it in [-180, 180)
pred_boxes[:, 4::5] = pred_angle
return pred_boxes
def _dense_box_regression_loss(
anchors: List[Boxes],
box2box_transform: Box2BoxTransform,
pred_anchor_deltas: List[torch.Tensor],
gt_boxes: List[torch.Tensor],
fg_mask: torch.Tensor,
box_reg_loss_type="smooth_l1",
smooth_l1_beta=0.0,
):
"""
Compute loss for dense multi-level box regression.
Loss is accumulated over ``fg_mask``.
Args:
anchors: #lvl anchor boxes, each is (HixWixA, 4)
pred_anchor_deltas: #lvl predictions, each is (N, HixWixA, 4)
gt_boxes: N ground truth boxes, each has shape (R, 4) (R = sum(Hi * Wi * A))
fg_mask: the foreground boolean mask of shape (N, R) to compute loss on
box_reg_loss_type (str): Loss type to use. Supported losses: "smooth_l1", "giou".
smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to
use L1 loss. Only used when `box_reg_loss_type` is "smooth_l1"
"""
anchors = type(anchors[0]).cat(anchors).tensor # (R, 4)
if box_reg_loss_type == "smooth_l1":
gt_anchor_deltas = [box2box_transform.get_deltas(anchors, k) for k in gt_boxes]
gt_anchor_deltas = torch.stack(gt_anchor_deltas) # (N, R, 4)
loss_box_reg = smooth_l1_loss(
cat(pred_anchor_deltas, dim=1)[fg_mask],
gt_anchor_deltas[fg_mask],
beta=smooth_l1_beta,
reduction="sum",
)
elif box_reg_loss_type == "giou":
pred_boxes = [
box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)
]
loss_box_reg = giou_loss(
torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction="sum"
)
else:
raise ValueError(f"Invalid dense box regression loss type '{box_reg_loss_type}'")
return loss_box_reg
| [
"[email protected]"
] | |
1f274d45c819c75e5909ef811396617f68af6e41 | 32271508e449e8842f38186e5e4528696b41d1f9 | /tabby/tab/migrations/0025_remove_race_win_market.py | 16142941a5e3532c0ac93d4c68b1aef608b2742a | [] | no_license | Tjorriemorrie/tabby | d623ad5be3ae53b9370fd400f362d940e7191ac3 | 09c697bd48fdc4de548c911f1fd81b2a7e4b511b | refs/heads/master | 2022-12-10T10:01:40.317751 | 2019-12-13T04:31:12 | 2019-12-13T04:31:12 | 100,076,546 | 4 | 2 | null | 2022-12-08T06:51:55 | 2017-08-11T23:26:00 | Jupyter Notebook | UTF-8 | Python | false | false | 326 | py | # Generated by Django 2.0.1 on 2018-01-30 01:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tab', '0024_auto_20180128_1356'),
]
operations = [
migrations.RemoveField(
model_name='race',
name='win_market',
),
]
| [
"[email protected]"
] | |
21dae073458e0bac5899c85d1f117f88958119dc | fb78fd824e904705fb1ee09db8b3c20cc3902805 | /django-myshop/myshop/settings.py | e93a63838401dfb03886299b9b686dadf4dae54b | [] | no_license | Roderich25/mac | 8469833821ac49c539a744db29db5a41d755ad55 | 4f7fe281c88f0199b85d0ac99ce41ffb643d6e82 | refs/heads/master | 2023-01-12T05:55:12.753209 | 2021-11-26T01:16:24 | 2021-11-26T01:16:24 | 207,029,750 | 0 | 0 | null | 2023-01-07T11:49:23 | 2019-09-07T21:51:53 | Jupyter Notebook | UTF-8 | Python | false | false | 3,011 | py | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ea=e^w3s$qfrb9_+5oq962$u(e7xq&me_b%ez7^c!6&6hm-q0d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'shop.apps.ShopConfig',
'cart.apps.CartConfig',
'orders.apps.OrdersConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myshop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'cart.context_processors.cart',
],
},
},
]
WSGI_APPLICATION = 'myshop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
CART_SESSION_ID = 'cart'
| [
"[email protected]"
] | |
e350c375091476a2506c1e698410dc3a6adfbfb8 | 30f8afce1ba484183d8e1e14aae76cabb2d92354 | /pbase/day29/old.py | 741ae415c047f7297fc6ca49c5aab16131a342b0 | [] | no_license | brooot/Python_Base_Codes | d83e8c3b8a37b86672412c812fdb0d47deb67836 | a864685e160b5df4162a6f9fb910627eda702aaf | refs/heads/master | 2023-04-10T20:08:39.161289 | 2021-03-25T12:59:23 | 2021-03-25T12:59:23 | 200,570,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | from socket import *
import sys
class FtpClient(object):
def __init__(self,serveraddr):
self.serveraddr=serveraddr
def do_list(self):
sockfd=socket()
sockfd.connect(self.ser)
def main():
if len(sys.argv)<3:
print('argv is error')
host=sys.argv[1]
port=int(sys.argv[2])
BUFFERSIZE=1024
addr=(host,port)
# sockfd=socket()
while True:
print('**command **')
print('**list **')
print('**get **')
print('**put filename**')
print('**quit **')
data=input('shuru')
ftp=FtpClient(addr)
if data[:4]=='list':
ftp.do_list()
elif data[:3]=='get':
ftp.do_get()
elif data=='put':
pass
else:
sys.quit(0)
if __name__=='__main__':
main()
| [
"[email protected]"
] | |
a296b492f907bd7c9b0172fb9bddb55b6caf2e3b | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_15314.py | d42d7ec70ac3476ead6abba82a8293d71aa4e166 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | # Different results when using sklearn RandomizedPCA with sparse and dense matrices
RandomizedPCA
| [
"[email protected]"
] | |
c51c27f98dfdd33ed8055495236836b200efc808 | 93652e0f73558ffa24059647324f79ba043ba241 | /topi/tests/python/test_topi_bitserial_conv2d.py | 6df18483a45f9263f685e4ade3c425b75b29eb76 | [
"Apache-2.0"
] | permissive | souptc/tvm | 830b1444435b6bda267df305538a783eb687d473 | a8574e7bb814997cb3920a72035071899635b753 | refs/heads/master | 2020-03-25T12:42:20.686770 | 2018-08-06T21:07:38 | 2018-08-06T21:07:38 | 143,789,191 | 1 | 0 | Apache-2.0 | 2018-08-06T22:18:20 | 2018-08-06T22:18:19 | null | UTF-8 | Python | false | false | 4,821 | py | import os
import numpy as np
import tvm
import topi
import topi.testing
from tvm.contrib.pickle_memoize import memoize
from topi.util import get_const_tuple
from tvm.contrib import util
from tvm.contrib.pickle_memoize import memoize
def generate_quantized_np(shape, bits, out_dtype):
min_val = 0
max_val = 1 << bits
return np.random.randint(min_val, max_val, size=shape).astype(out_dtype)
def verify_bitserial_conv2d_nchw(batch, in_size, in_channel, num_filter, kernel, stride, padding,
activation_bits, weight_bits, dorefa):
in_height = in_width = in_size
input_type='uint32'
out_dtype='int32'
with tvm.target.create('llvm'):
A = tvm.placeholder((batch, in_channel, in_height, in_width), dtype=input_type, name='A')
W = tvm.placeholder((num_filter, in_channel, kernel, kernel), dtype=input_type, name='W')
B = topi.nn.bitserial_conv2d(A, W, stride, padding, activation_bits, weight_bits,
out_dtype=out_dtype, layout="NCHW", dorefa=dorefa)
s = topi.generic.schedule_bitserial_conv2d_nchw([B])
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
dtype = A.dtype
def get_ref_data():
a_np = generate_quantized_np(get_const_tuple(A.shape), activation_bits, input_type)
w_np = generate_quantized_np(get_const_tuple(W.shape), weight_bits, input_type)
if dorefa:
w_ = np.copy(w_np).astype(out_dtype)
for x in np.nditer(w_, op_flags=['readwrite']):
x[...] = 1 if x == 1 else -1
b_np = topi.testing.conv2d_nchw_python(a_np.astype(out_dtype), w_, stride, padding)
else:
b_np = topi.testing.conv2d_nchw_python(a_np, w_np, stride, padding)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
ctx = tvm.cpu(0)
a = tvm.nd.array(a_np, ctx)
w = tvm.nd.array(w_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
func = tvm.build(s, [A, W, B], "llvm")
func(a, w, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
def verify_bitserial_conv2d_nhwc(batch, in_size, in_channel, num_filter, kernel, stride, padding,
activation_bits, weight_bits, dorefa):
in_height = in_width = in_size
input_type='uint32'
out_dtype='int32'
with tvm.target.create('llvm'):
A = tvm.placeholder((batch, in_height, in_width, in_channel), dtype=input_type, name='A')
W = tvm.placeholder((kernel, kernel, in_channel, num_filter), dtype=input_type, name='W')
B = topi.nn.bitserial_conv2d(A, W, stride, padding, activation_bits, weight_bits, out_dtype=out_dtype,
layout="NHWC", dorefa=dorefa)
s = topi.generic.schedule_bitserial_conv2d_nhwc([B])
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
dtype = A.dtype
def get_ref_data():
a_np = generate_quantized_np(get_const_tuple(A.shape), activation_bits, input_type)
w_np = generate_quantized_np(get_const_tuple(W.shape), weight_bits, input_type)
if dorefa:
w_ = np.copy(w_np).astype(out_dtype)
for x in np.nditer(w_, op_flags=['readwrite']):
x[...] = 1 if x == 1 else -1
b_np = topi.testing.conv2d_nhwc_python(a_np, w_, stride, padding).astype(out_dtype)
else:
b_np = topi.testing.conv2d_nhwc_python(a_np, w_np, stride, padding).astype(out_dtype)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
ctx = tvm.cpu(0)
a = tvm.nd.array(a_np, ctx)
w = tvm.nd.array(w_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
func = tvm.build(s, [A, W, B], 'llvm')
func(a, w, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
def test_bitserial_conv2d():
in_size = 56
ic, oc = 64, 64
k = 3
stride = 1
pad = 1
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 1, 1, True)
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 2, 1, True)
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 1, 1, False)
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 2, 1, False)
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 2, 2, False)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 1, 1, True)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 1, True)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 1, 1, False)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 1, False)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 2, False)
if __name__ == "__main__":
test_bitserial_conv2d() | [
"[email protected]"
] | |
3d1f1756528afaa87544ba2b6c62e67f3b6572f7 | b2c24abff86b28ca8a495b3a3c3227f070737aa2 | /parlai/agents/AdaND/utils.py | 2467ef5a277ddf9fdc24d14669efbf63843aff07 | [
"MIT"
] | permissive | hengyicai/AdaND | d5dda1b2fcd2abd17be6603de632f0515382b37b | 5e3fefb1cf40c42215a37246efc64958ae6db005 | refs/heads/master | 2023-09-01T07:38:49.076947 | 2020-10-19T04:58:00 | 2020-10-19T04:58:00 | 204,633,631 | 10 | 2 | MIT | 2023-08-11T19:52:23 | 2019-08-27T06:20:39 | Python | UTF-8 | Python | false | false | 947 | py | import torch.nn as nn
def reverse(lst):
return lst[::-1]
class FeedForward(nn.Module):
def __init__(self, input_dim, out_dim, hidden_sizes=(512,),
activation="Tanh", bias=True, dropout=0.1):
super(FeedForward, self).__init__()
self.activation = getattr(nn, activation)()
n_inputs = [input_dim] + list(hidden_sizes)
n_outputs = list(hidden_sizes) + [out_dim]
self.linears = nn.ModuleList([nn.Linear(n_in, n_out, bias=bias)
for n_in, n_out in zip(n_inputs, n_outputs)])
self.num_layer = len(self.linears)
self.dropout_layer = nn.Dropout(dropout)
def forward(self, input_):
x = input_
i = 0
for linear in self.linears:
x = linear(x)
if i < self.num_layer - 1:
x = self.dropout_layer(x)
x = self.activation(x)
i += 1
return x
| [
"[email protected]"
] | |
fdef0e55fea15ec9925ee84443a708abafdfecc5 | 4a8775eac5a5f39400848b4c81476c49ddfbd871 | /apps/api-test/urls.py | a999262fcf5944dbfda9b7441dd035df4a93df14 | [] | no_license | wdudek82/quotarium-backend | ec6d73c13ed06a201066442f108cdbcc4777da5e | b37cbbe1a136f89fe10ed6d6418a69d585bec8ff | refs/heads/master | 2022-12-10T18:32:37.564838 | 2018-07-08T20:40:28 | 2018-07-08T20:40:28 | 140,035,629 | 0 | 0 | null | 2022-12-08T02:17:15 | 2018-07-06T22:39:13 | Python | UTF-8 | Python | false | false | 142 | py | from django.conf.urls import url
from .views import UserViewSet
urlpatterns = [
url(r'^users/$', UserViewSet.as_view(), name='users'),
] | [
"[email protected]"
] | |
e6a2fb17e898a3dedb7ffb531fb3c9dcd46ee0a7 | 6189f34eff2831e3e727cd7c5e43bc5b591adffc | /WebMirror/management/rss_parser_funcs/feed_parse_extractRandnovelstlsamatchateaWordpressCom.py | d0cebc880f2171d48c773fdaf78dc3e6e389d55b | [
"BSD-3-Clause"
] | permissive | fake-name/ReadableWebProxy | 24603660b204a9e7965cfdd4a942ff62d7711e27 | ca2e086818433abc08c014dd06bfd22d4985ea2a | refs/heads/master | 2023-09-04T03:54:50.043051 | 2023-08-26T16:08:46 | 2023-08-26T16:08:46 | 39,611,770 | 207 | 20 | BSD-3-Clause | 2023-09-11T15:48:15 | 2015-07-24T04:30:43 | Python | UTF-8 | Python | false | false | 586 | py |
def extractRandnovelstlsamatchateaWordpressCom(item):
'''
Parser for 'randnovelstlsamatchatea.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| [
"[email protected]"
] | |
4aa8272025f036b52ea729420003ccaed04615fc | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02955/s157641213.py | 4c49cb084d398b88a82c7768e464c3400e1d3697 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | from collections import deque
def isok(x):
que=deque(sorted(z%x for z in a))
res=0
while que:
l=que[0]
if l==0:
que.popleft()
continue
r=que[-1]
if r==0:
que.pop()
continue
d=min(l,x-r)
que[0]-=d
que[-1]=(que[-1]+d)%x
res+=d
return res
n,k=map(int,input().split())
a=list(map(int,input().split()))
sum_=sum(a)
fac=set()
for i in range(1,sum_+1):
if i*i>sum_:
break
if sum_%i==0:
fac.add(i)
fac.add(sum_//i)
fac=sorted(fac,reverse=True)
ans=1
for x in fac:
c=isok(x)
if c<=k:
ans=x
break
print(ans) | [
"[email protected]"
] | |
25c76f936b1e618ae4f59f11a453aeb716d710ca | 4c0062f3b45afe6a087f0e8b0b9292448ce8680e | /inwike/wsgi.py | b62adb52993addcae9133236c57a9f24c5e90cd2 | [] | no_license | mitshel/inwike | 89846286824d4dd322edb4d51836af8d86da00d2 | e89bd4ccb9c3a71d17692d14def6e1041596d0f9 | refs/heads/master | 2020-06-20T21:04:00.623930 | 2019-07-19T20:14:03 | 2019-07-19T20:14:03 | 197,248,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
WSGI config for inwike project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'inwike.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
daf4ddaea769085c50cf8f4f15f0287de9a5ab16 | 7256596fc6437c7f3cd1947f9f88bc556df6ba56 | /programs_in_python/programming_excercise/1.py | 7aa1303ea03548fa583c4aa0857c6f32292d692b | [] | no_license | panu2306/Python-Articles | fd02cf70635e4a63eae8b691597b6858c40832b8 | 7585dbdca92264a8f52cfb3c1b918b29814d3bd1 | refs/heads/master | 2020-12-27T17:33:28.576776 | 2020-05-06T14:55:10 | 2020-05-06T14:55:10 | 237,990,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | '''
Write a program which will find all such numbers which are divisible by 7 but are not a multiple of 5,
between 2000 and 3200 (both included).
The numbers obtained should be printed in a comma-separated sequence on a single line.
'''
# Using List in Python:
def multiple_of_seven(start, end):
l = []
for i in range(start, end+1):
if((i%7 == 0) and (i%5 != 0)):
l.append(str(i))
return l
print(','.join(multiple_of_seven(2000, 3200)))
# Using yield in Python:
def multiple_seven(start, end):
for i in range(start, end+1):
if((i%7==0) and (i%5!=0)):
yield(str(i))
for i in multiple_seven(2000, 3200):
print(i, end=',')
| [
"[email protected]"
] | |
c7e086c6ea45c41cf28e897e3b175a4f462aca19 | ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3 | /python/baiduads-sdk-auto/test/test_app_info_item.py | deaf0a9f6d02f3a631c0a04ff600f2afd04a818c | [
"Apache-2.0"
] | permissive | baidu/baiduads-sdk | 24c36b5cf3da9362ec5c8ecd417ff280421198ff | 176363de5e8a4e98aaca039e4300703c3964c1c7 | refs/heads/main | 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 | Apache-2.0 | 2023-06-02T05:19:40 | 2022-01-11T07:23:17 | Python | UTF-8 | Python | false | false | 637 | py | """
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import baiduads
from baiduads.appprocess.model.app_info_item import AppInfoItem
class TestAppInfoItem(unittest.TestCase):
"""AppInfoItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAppInfoItem(self):
"""Test AppInfoItem"""
# FIXME: construct object with mandatory attributes with example values
# model = AppInfoItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
5bd0e53ba54a4a57cf01aa9a0f830f42c969bd2f | e08e7bb643b81899d261bbdada63754eb32da2e8 | /demos/helloworld/jinja2-support/main.py | fff720215c30da226ea87bdea8861ee34f58e750 | [
"Apache-2.0"
] | permissive | tao12345666333/app-turbo | 95baa0e0d7f7172183591c2bc177efc9ae0e1b37 | 8717ba5631e47c476e277c3a897d85b5a93f9082 | refs/heads/master | 2020-12-25T04:45:26.575354 | 2016-12-11T15:35:12 | 2016-12-11T15:35:12 | 31,700,837 | 0 | 0 | null | 2015-03-05T07:14:08 | 2015-03-05T07:14:08 | null | UTF-8 | Python | false | false | 478 | py | #-*- coding:utf-8 -*-
from tornado.options import define, options
import tornado.options
import setting
import turbo.register
import turbo.app
#uncomment this to init state manager: store
#import store
turbo.register.register_app(setting.SERVER_NAME, setting.TURBO_APP_SETTING, setting.WEB_APPLICATION_SETTING, __file__, globals())
define("port", default=8888, type=int)
if __name__ == '__main__':
tornado.options.parse_command_line()
turbo.app.start(options.port) | [
"[email protected]"
] | |
22731541dd107b93c3c9efbc5cf7a570dc5ca82e | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/pointcloud/hoverlabel/font/_color.py | ab79906bd35243f454c553b46a4a5012a256ba50 | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 473 | py | import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="pointcloud.hoverlabel.font", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| [
"[email protected]"
] | |
b1e8dff10bd7f06b4f82282a4a65779bd9215537 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/hamming/ccca6c36f7464721986b3e6214962018.py | 73c19fda905d9bdfc0b57e3963e8c92516bc8d4d | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 239 | py | def distance(strand1, strand2):
if len(strand1) != len(strand2):
raise Exception('length mismatch', len(strand1), len(strand2))
hd = 0
for i in xrange(len(strand1)):
if strand1[i] != strand2[i]:
hd += 1
return hd
| [
"[email protected]"
] | |
0ddbf86f3bedb7bcc299a23d41f190b92dc242af | bc2c2d63ac18dfa6b5171ff97ad6b88f647dc282 | /mininet/wifi/util.py | 00b9511e070c1f3f235e10d02d277a5b7d286e1e | [
"LicenseRef-scancode-x11-stanford"
] | permissive | MatthiasEckhart/mininet-wifi | ca8cadccb62db7ce6221ab0dcf4af7a79a93e74e | 95392e59f82e1380730b0b3f4e375a04839316ce | refs/heads/master | 2020-03-16T00:52:39.895729 | 2018-05-11T09:17:26 | 2018-05-11T09:17:26 | 132,427,457 | 1 | 2 | null | 2018-05-07T08:03:36 | 2018-05-07T08:03:36 | null | UTF-8 | Python | false | false | 1,014 | py | "Utility functions for Mininet-WiFi"
from mininet.util import retry
def moveIntfNoRetry(intf, dstNode, printError=False):
"""Move interface to node, without retrying.
intf: string, interface
dstNode: destination Node
printError: if true, print error"""
from mininet.wifi.node import Station, Car, AP
if (isinstance(dstNode, Station) or isinstance(dstNode, Car)
or isinstance(dstNode, AP) and 'eth' not in str(intf)):
if isinstance(dstNode, Station) or isinstance(dstNode, Car):
return True
else:
return True
def moveIntf(intf, dstNode, printError=True,
retries=3, delaySecs=0.001):
"""Move interface to node, retrying on failure.
intf: string, interface
dstNode: destination Node
printError: if true, print error"""
from mininet.wifi.node import AP
if not isinstance(dstNode, AP):
retry(retries, delaySecs, moveIntfNoRetry, intf, dstNode,
printError=printError)
| [
"[email protected]"
] | |
094fb1998b4fb1a04b1860e17d4d7bcda5a15b28 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_0848+249/sdB_PG_0848+249_lc.py | 3419530c1b53949ad19342e48dd7c6716eb727b3 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[132.907667,24.697419], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_PG_0848+249 /sdB_PG_0848+249_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
4880c6673cf71e0c7ee5ecb34afce54a4736b043 | 48156b85839d832ecfe8fdf0a0e17b5ebf8b9460 | /75.findPeak.py | 1672766c60cae18647eadcca3b6c3ce2ede0c597 | [] | no_license | Aissen-Li/lintcode | 7dc2564fcec20667f073d9219fe049808c5de625 | 4d2a717956a75197ce1dfa1094cdd5ab3a1d2004 | refs/heads/master | 2020-11-28T16:43:21.760691 | 2020-01-14T15:08:45 | 2020-01-14T15:08:45 | 229,871,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | class Solution:
"""
@param A: An integers array.
@return: return any of peek positions.
"""
def findPeak(self, A):
if A[1] > A[2]:
return 1
if A[len(A) - 2] > A[len(A) - 3]:
return len(A) - 2
start, end = 0, len(A) - 1
while start + 1 < end:
mid = (start + end) // 2
if A[mid] > A[mid + 1] and A[mid] > A[mid - 1]:
return mid
if A[mid] < A[mid + 1]:
start = mid + 1
else:
end = mid - 1
return start if A[start] >= A[end] else end
| [
"[email protected]"
] | |
81e8042a40de433fce29be36bc546150bd69ec66 | 87e60b0504be11c6997f1b20b72e9428cc128342 | /python/cowbells/data/tqplot.py | 5ca6659a29428f017baacabacc79523cfcbe6ff4 | [] | no_license | brettviren/cowbells | 70a85856fdfc54526c847f115d5dc01ec85ec215 | 1ceca86383f4f774d56c3f159658518242875bc6 | refs/heads/master | 2021-01-10T18:44:41.531525 | 2014-04-09T15:17:29 | 2014-04-09T15:17:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,644 | py | #!/usr/bin/env python
'''
Make some plots from the TQ tree.
'''
import ROOT
import math
def liberate(tobj):
ROOT.SetOwnership(tobj,0)
return tobj
def draw_stats(pad, h, fitnum =111):
h.Draw()
pad.Modified()
pad.Update()
stats = h.FindObject("stats")
if stats:
stats.SetOptStat(1110)
stats.SetOptFit(fitnum)
return
class Plots(object):
expected_led_time = "abs(tmin[%(chn)d]-1530) < 30"
def __init__(self, tree, canvas = None, pdffile = 'tqplot.pdf'):
self.tree = tree
self.pdffile = pdffile
if not canvas:
canvas = ROOT.TCanvas("tqtree","tqtree debug", 0,0, 1000, 700)
self.canvas = canvas
def cprint(self,extra=''):
self.canvas.Print('%s%s'%(self.pdffile,extra), 'pdf')
def do_twoXtwo(self, what, chn=0):
self.canvas.Clear()
self.canvas.Divide(2,2)
for count, what in enumerate(what):
pad = self.canvas.cd(count+1)
pad.SetLogy(True)
self.tree.Draw("%s[%d]"%(what,chn))
return
def do_minmax(self, chn=0):
self.do_twoXtwo(['qmin','qmax','tmin','tmax'], chn)
def do_stats(self, chn=0):
self.do_twoXtwo(['avg','mean','rms','sigma'], chn)
def do_sumn(self, chn=0):
self.do_twoXtwo(['n3','n4','sum3','sum4'], chn)
def do_34(self, chn=0, maxq=400, opt="", logy=True, fit=(25,100)):
self.canvas.Clear()
self.canvas.Divide(2,2)
todraw = "n%(nsig)d[%(chn)d]*mean[%(chn)d] -sum%(nsig)d[%(chn)d]"
for count,nsig in enumerate([3,4]):
pad = self.canvas.cd(count+1)
pad.SetLogy(logy)
self.tree.Draw(todraw%locals(),"",opt)
for count,nsig in enumerate([3,4]):
pad = self.canvas.cd(count+3)
pad.SetLogy(logy)
h = liberate(ROOT.TH1F("spe%d"%nsig,'sum(ADC) >%d sigma above ped'%nsig,maxq,0,maxq))
self.tree.Draw(todraw%locals()+">>spe%d"%nsig,"",opt)
if fit:
h.Fit("gaus","","", *fit)
h.Draw()
pad.Modified()
pad.Update()
stats = h.FindObject("stats")
if stats:
stats.SetOptStat(1110)
stats.SetOptFit(111)
continue
return
def do_34_50(self, chn=0, opt="", logy=True):
self.do_34(chn=chn, maxq=50, opt=opt, logy=logy,fit=None)
def do_34vEntry(self, chn=0):
self.canvas.Clear()
self.canvas.Divide(2,2)
measure = "n%(nsig)d[%(chn)d]*mean[%(chn)d]-sum%(nsig)d[%(chn)d]"
for count,nsig in enumerate([3,4]):
pad = self.canvas.cd(count+1)
m = measure % locals()
m += ':Entry$'
c = ""
print m
self.tree.Draw(m,c,'colz')
for count,nsig in enumerate([3,4]):
pad = self.canvas.cd(count+3)
m = measure % locals()
c = "%s > 0 && %s < 400" % (m,m)
m += ':Entry$'
print m
print c
self.tree.Draw(m,c,'colz')
return
def do_fit(self, chn=0):
self.canvas.Clear()
self.canvas.Divide(2,2)
toplot = "mean[%(chn)d] sigma[%(chn)d] mean[%(chn)d]:Entry$ sigma[%(chn)d]:Entry$"
toplot = toplot % locals()
for count,what in enumerate(toplot.split()):
pad = self.canvas.cd(count+1)
opt = ""
if 'Entry$' in what:
opt = "COLZ"
self.tree.Draw(what,"",opt)
continue
return
def _fit_pe(self, chn=0, cuts=None,
spe=(60,110), dpe=(115,220), tpe=(225,350), qmeas = 'qpeak'):
'''
Fit single/double PE peak of qpeak.
'''
if cuts is None:
cuts = self.expected_led_time
nbins, minq, maxq = 500, 0, 500
cuts = cuts%locals()
what = "%(qmeas)s[%(chn)d]"%locals()
h = liberate(ROOT.TH1F('hqpeak', "%s {%s}" % (qmeas, cuts,), nbins, minq, maxq))
self.tree.Draw('%s >> hqpeak'%what, cuts)
pe1 = liberate(h.Clone())
pe1.Fit("gaus","L","",*spe)
fit1 = pe1.GetFunction("gaus")
fit1.SetRange(minq,maxq)
fit1.SetLineColor(2)
pe2 = liberate(h.Clone())
pe2.Add(fit1, -1)
pe2.Fit("gaus","L","",*dpe)
fit2 = pe2.GetFunction("gaus")
fit2.SetRange(spe[0],maxq)
fit2.SetLineColor(4)
pe3 = liberate(h.Clone())
pe3.Add(fit2, -1)
pe3.Fit("gaus","L","",*tpe)
fit3 = pe3.GetFunction("gaus")
#fit3.SetRange(dpe[0],maxq)
fit3.SetLineColor(6)
pe123 = liberate(h.Clone())
dfit = liberate(ROOT.TF1("dfit","gaus(0)+gaus(3)+gaus(6)",10,tpe[1]))
for ind in range(3):
dfit.SetParameter(ind, fit1.GetParameter(ind))
dfit.SetParameter(ind+3,fit2.GetParameter(ind))
dfit.SetParameter(ind+6,fit3.GetParameter(ind))
pe123.Fit(dfit,"L","",10,maxq)
dfit = pe123.GetFunction("dfit")
dfit.SetRange(10,maxq)
dfit.SetLineColor(7)
self.canvas.Clear()
self.canvas.Divide(2,2)
pad = self.canvas.cd(1)
draw_stats(pad, pe1)
pad = self.canvas.cd(2)
draw_stats(pad, pe2)
pad = self.canvas.cd(3)
draw_stats(pad, pe3)
pad = self.canvas.cd(4)
draw_stats(pad, pe123, 111111111)
a1 = fit1.Integral(minq,maxq)
if not a1:
print 'No fit 1'
return
a2 = fit2.Integral(minq,maxq)
c1 = fit1.GetParameter(0)
c2 = fit2.GetParameter(0)
mu1 = fit1.GetParameter(1)
mu2 = fit2.GetParameter(1)
mupe = 2.0*a2/a1
print 'Mean <PE> of source = 2*%.1f/%.1f = %.3f' %(a2,a1,mupe)
mu2mu1_frac = 0
if mu1: mu2mu1_frac = mu2/mu1
print 'Ratio of PE2/PE1: %.1f/%.1f = %.3f (~2?)' % (mu2,mu1,mu2/mu1)
if mupe > 0:
print 'Prob 0PE: %.3f' % (math.exp(-1*mupe),)
return
def do_pe_fits(self, chn=0, cuts = None):
for qmeas in ['qpeak','qpeaks3','qpeaks4','qpeaks5','qwin']:
self._fit_pe(chn=chn,qmeas=qmeas,cuts=cuts)
self.cprint()
continue
return
def do_interspersed_led_cuts(self):
# Cuts to select LEDs interspersed with cosmic muon triggers
self.canvas.Clear()
self.canvas.Divide(2,2)
pad = self.canvas.cd(1)
pad.SetLogy(True)
self.tree.Draw("mean[2]-qmin[2]","mean[2]-qmin[2]<1000")
pad = self.canvas.cd(2)
pad.SetLogy(True)
self.tree.Draw("qnpeaks[0]","mean[2]-qmin[2]<100")
pad = self.canvas.cd(3)
pad.SetLogy(True)
self.tree.Draw("tmin[0]","mean[2]-qmin[2]<100 && qnpeaks[0] == 1")
pad = self.canvas.cd(4)
pad.SetLogy(False)
self.tree.Draw("qpeak[0]")
def all(self, chn = 0):
self.cprint('[')
for what in [
'minmax','stats','fit','sumn',
'34','34_50', '34vEntry',
]:
meth = getattr(self, 'do_%s' % what)
meth(chn)
self.cprint()
self.do_interspersed_led_cuts()
self.cprint()
self.do_pe_fits(chn)
self.cprint(']')
if __name__ == '__main__':
import sys
fp = ROOT.TFile.Open(sys.argv[1])
tree = fp.Get("tq")
try:
pdf = sys.argv[2]
except IndexError:
pdf = None
p = Plots(tree, pdffile=pdf)
p.all()
| [
"[email protected]"
] | |
cd7a6e39bddcd867989015fc0c40cc09c18bc796 | d86e9d59784097a7262fa9337585a36bd58a6d29 | /cvxbenchmarks/lib/data/epsilon/epopt/problems/hinge_l2.py | 41b8b42fd08be15cf32527c0526e7dc334f6548e | [] | no_license | nishi951/cvxbenchmarks | 2ae36e75c42c8bd35fafac98bad5d9d88168bd68 | 932141d8e4e929860011bf25c41e941e2f8fbd76 | refs/heads/master | 2021-01-11T07:23:32.260811 | 2018-09-15T22:23:14 | 2018-09-15T22:23:14 | 62,177,196 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | """Standard SVM, i.e.. hinge loss w/ l2 regularization."""
from epopt.problems import problem_util
import cvxpy as cp
import epopt as ep
import numpy as np
import scipy.sparse as sp
def create(**kwargs):
A, b = problem_util.create_classification(**kwargs)
lam = 1
x = cp.Variable(A.shape[1])
f = ep.hinge_loss(x, A, b) + lam*cp.sum_squares(x)
return cp.Problem(cp.Minimize(f))
| [
"[email protected]"
] | |
87a1365fd6a9f6ccca02348485d3a70abebd022f | 34a26b713021f15d94d416b9728bac50d283ed5f | /interno_pymedigital-9.0/sale_order_invoice_amount/models/sale_order.py | 7dc71e3de0680f62a283f98ad86d6675e8ad4e35 | [] | no_license | Brahim820/odoo-1 | 7641b2a0ef411fb3b82f806a11e88b9880875a46 | d8ee18a7dc467ff250113a0a3df3fcf1e876b321 | refs/heads/master | 2020-04-07T10:58:09.517097 | 2018-11-19T16:56:45 | 2018-11-19T16:56:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,690 | py | # -*- encoding: utf-8 -*-
from openerp import api, fields, models, _
from openerp.tools import float_is_zero
from openerp.exceptions import UserError
class SaleOrder(models.Model):
_inherit = 'sale.order'
@api.multi
def action_invoice_create_from_amount(self, grouped=False, amount=0):
if amount == 0:
raise UserError(_('The amount to invoice should be greater than cero.'))
for order in self:
group_key = order.id if grouped else (order.partner_invoice_id.id, order.currency_id.id)
inv_obj = self.env['account.invoice']
invoices = {}
for line in order.order_line.sorted(key=lambda l: l.qty_to_invoice):
if group_key not in invoices:
inv_data = order._prepare_invoice()
invoice = inv_obj.create(inv_data)
invoices[group_key] = invoice
elif group_key in invoices:
vals = {}
if order.name not in invoices[group_key].origin.split(', '):
vals['origin'] = invoices[group_key].origin + ', ' + order.name
if order.client_order_ref and order.client_order_ref not in invoices[group_key].name.split(', '):
vals['name'] = invoices[group_key].name + ', ' + order.client_order_ref
invoices[group_key].write(vals)
discount = 1 - (line.discount or 0.0 / 100.0)
if line.price_unit > 0 and discount < 100:
paid_qty = amount / (line.price_unit * discount)
else:
paid_qty = line.product_uom_qty
to_invoice = 0
if line.qty_to_invoice > 0:
if paid_qty >= line.qty_to_invoice:
to_invoice = line.qty_to_invoice
else:
to_invoice = paid_qty
name = line.name + ' desde ' + str(round(line.qty_invoiced, 2)) + ' a ' + str(
round(line.qty_invoiced + to_invoice, 2)) + ' de ' + str(round(line.product_uom_qty, 2))
line.invoice_line_create_from_amount(invoices[group_key].id, to_invoice, name)
amount -= to_invoice * line.price_unit
if amount > 0:
discount = 1 - (line.discount or 0.0 / 100.0)
lines = order.order_line.filtered(lambda l: l.product_uom_qty - l.qty_invoiced > 0)
for line in lines.sorted(
key=lambda l: (l.product_uom_qty - l.qty_invoiced) * l.price_unit):
if line.price_unit > 0 and discount < 100:
paid_qty = amount / (line.price_unit * discount)
else:
paid_qty = line.product_uom_qty
residual_qty = line.product_uom_qty - line.qty_invoiced
to_invoice = 0
if residual_qty > 0:
if round(paid_qty, 5) > round(residual_qty, 5):
to_invoice = residual_qty
else:
to_invoice = paid_qty
name = ' Pago anticipado: ' + line.name + ' desde ' + str(round(line.qty_invoiced, 2)) + ' a ' + str(
round(line.qty_invoiced + to_invoice, 2)) + ' de ' + str(round(line.product_uom_qty, 2))
line.invoice_line_create_from_amount(invoices[group_key].id, to_invoice, name)
amount -= to_invoice * line.price_unit
if not invoices:
raise UserError(_('There is no invoicable line.'))
for invoice in invoices.values():
if not invoice.invoice_line_ids:
raise UserError(_('There is no invoicable line.'))
# If invoice is negative, do a refund invoice instead
if invoice.amount_untaxed < 0:
invoice.type = 'out_refund'
for line in invoice.invoice_line_ids:
line.quantity = -line.quantity
# Use additional field helper function (for account extensions)
for line in invoice.invoice_line_ids:
line._set_additional_fields(invoice)
# Necessary to force computation of taxes. In account_invoice, they are triggered
# by onchanges, which are not triggered when doing a create.
invoice.compute_taxes()
#TODO: agregar este cálculo a la función principal
# para evitar problemas con las funciones que hacen super
# como en el módulo l10n_ec_sri_sale
resx = [inv.id for inv in invoices.values()]
invx = self.env['account.invoice'].browse(resx)
for i in invx:
i.compute_sri_invoice_amounts()
return [inv.id for inv in invoices.values()]
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
@api.multi
def invoice_line_create_from_amount(self, invoice_id, qty, name):
"""
Create an invoice line. The quantity to invoice can be positive (invoice) or negative
(refund).
:param name: char
:param invoice_id: integer
:param qty: float quantity to invoice
"""
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
for line in self:
if not float_is_zero(qty, precision_digits=precision):
vals = line._prepare_invoice_line(qty=qty)
vals.update({'name': name, 'invoice_id': invoice_id, 'sale_line_ids': [(6, 0, [line.id])]})
self.env['account.invoice.line'].create(vals)
| [
"[email protected]"
] | |
f412ca0a2c5181ff18128b3529882d3a0380882b | 8fe1c908807f53fa489a9fae0afcec79b24e9aeb | /run.py | a3397d3e7b3c7c4b9dd45a0a4c21ce0a196e280e | [] | no_license | satwik77/Simple_LoginApp | 2fe8973e86b128b3a4b71149606149389416c6b4 | af8c4a08ef909985127fb6267a8701bbeb7ae288 | refs/heads/master | 2021-01-10T04:04:01.857690 | 2016-04-14T14:51:13 | 2016-04-14T14:51:13 | 55,737,211 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | #!flask/bin/python
from login_app import login_app
login_app.run(debug=True)
| [
"[email protected]"
] | |
24673abaadfc0876b37e11c910ea20939b0461a1 | e7823c85962f7b7b08339cbcf7aa05de422c0fe2 | /td/credentials.py | 05a309651f83069bd35e17910b624d0130bdcbe5 | [
"MIT"
] | permissive | Aftermath213/td-ameritrade-api | 1a8a4a63b98b2fef1543ef24b069de90f1ef9612 | e5132f13c883d9bd6d15f282662f548467b6ef55 | refs/heads/master | 2023-09-02T23:44:40.935626 | 2021-10-31T18:59:17 | 2021-10-31T18:59:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,506 | py | import json
import urllib
import pathlib
import webbrowser
from typing import Union
from datetime import datetime
from urllib.parse import parse_qs
from urllib.parse import urlparse
import requests
class TdCredentials():
"""
### Overview
----
TD Ameritrade uses an oAuth protocol
to authenticate it's users. The `TdCredential`
object helps the user manage the credentials to ensure
the are properly authenticated.
"""
def __init__(
self,
client_id: str,
redirect_uri: str,
credential_dict: dict = None,
credential_file: Union[str, pathlib.Path] = None
) -> None:
"""Initializes the `TdCredential` object."""
self._access_token = ''
self._refresh_token = ''
self._scope = []
self._token_type = ''
self._expires_in = 0
self._refresh_token_expires_in = 0
self._is_expired = True
self._client_id = client_id
self._redirect_uri = redirect_uri
self._refresh_token_expiration_time = 0
self._access_token_expiration_time = 0
self.resource_url = 'https://api.tdameritrade.com/'
self.version = 'v1/'
self.token_endpoint = 'oauth2/token'
self.authorization_url = 'https://auth.tdameritrade.com/auth?'
self.authorization_code = ""
self._loaded_from_file = False
self._file_path = ""
if credential_file:
if isinstance(credential_file, pathlib.Path):
credential_file = credential_file.resolve()
self._loaded_from_file = True
self._file_path = credential_file
self.from_credential_file(file_path=credential_file)
elif credential_dict:
self.from_credential_dict(token_dict=credential_dict)
else:
self.from_workflow()
@property
def redirect_uri(self) -> str:
"""Returns the user's redirect URI.
### Returns
----
str
The User's redirect URI.
### Usage
----
>>> td_credential = TdCredentials()
>>> td_credential.redirect_uri
"""
return self._redirect_uri
@property
def client_id(self) -> str:
"""Returns the Client ID.
### Returns
----
str
The users Client Id.
### Usage
----
>>> td_credential = TdCredentials()
>>> td_credential.client_id
"""
return self._client_id
@property
def access_token(self) -> str:
"""Returns the Access token.
### Returns
----
str
A valid Access Token.
### Usage
----
>>> td_credential = TdCredentials()
>>> td_credential.access_token
"""
return self._access_token
@property
def refresh_token(self) -> str:
"""Returns the Refresh token.
### Returns
----
str
A valid Refresh Token.
### Usage
----
>>> td_credential = TdCredentials()
>>> td_credential.refresh_token
"""
return self._refresh_token
@property
def refresh_token_expiration_time(self) -> datetime:
"""Returns when the Refresh Token will expire.
### Returns
----
datetime
The date and time of the refresh token
expiration.
### Usage
----
>>> td_credential = TdCredentials()
>>> td_credential.refresh_token_expiration_time
"""
return self._refresh_token_expiration_time
@property
def is_refresh_token_expired(self) -> bool:
"""Specifies whether the current Refresh Token is expired
or not.
### Returns
----
bool
`True` if the Refresh Token is expired,
`False` otherwise.
### Usage
----
>>> td_credential = TdCredentials()
>>> td_credential.is_refresh_token_expired
"""
exp_time = self.refresh_token_expiration_time.timestamp() - 20
now = datetime.now().timestamp()
return bool(exp_time < now)
def from_token_dict(self, token_dict: dict) -> None:
"""Converts a token dicitonary to a `TdCredential`
object.
### Parameters
----
token_dict : dict
A dictionary containing all the
original token details.
### Usage
----
>>> td_credential = TdCredentials()
>>> td_credential.from_dict(
token_dict={
'access_token': '',
'refresh_token': ',
'scope': '',
'expires_in': 0,
'refresh_token_expires_in': 0,
'token_type': ''
}
)
"""
self._access_token = token_dict.get('access_token', '')
self._refresh_token = token_dict.get('refresh_token', '')
self._scope = token_dict.get('scope', [])
self._token_type = token_dict.get('token_type', '')
self._expires_in = token_dict.get('expires_in', 0)
self._refresh_token_expires_in = token_dict.get(
'refresh_token_expires_in',
0
)
self._refresh_token_expiration_time = token_dict.get(
'refresh_token_expiration_time', 0
)
self._access_token_expiration_time = token_dict.get(
'access_token_expiration_time', 0
)
# Calculate the Refresh Token expiration time.
if isinstance(self._refresh_token_expiration_time, str):
self._refresh_token_expiration_time = datetime.fromisoformat(
self._refresh_token_expiration_time
)
elif isinstance(self._refresh_token_expiration_time, float):
self._refresh_token_expiration_time = datetime.fromtimestamp(
self._refresh_token_expiration_time
)
else:
self._calculate_refresh_token_expiration(
expiration_secs=self._refresh_token_expires_in
)
# Calculate the Access Token Expiration Time.
if isinstance(self._access_token_expiration_time, str):
self._access_token_expiration_time = datetime.fromisoformat(
self._access_token_expiration_time
)
elif isinstance(self._access_token_expiration_time, float):
self._access_token_expiration_time = datetime.fromtimestamp(
self._access_token_expiration_time
)
else:
self._calculate_access_token_expiration(
expiration_secs=self._expires_in,
)
self.validate_token()
def to_token_dict(self) -> dict:
"""Converts the TdCredential object
to a dictionary object.
### Returns
----
dict
A dictionary containing all the
original token details.
### Usage
----
>>> td_credential = TdCredentials()
>>> td_credential.to_dict()
"""
token_dict = {
'access_token': self._access_token,
'refresh_token': self._refresh_token,
'scope': self._scope,
'expires_in': self._expires_in,
'refresh_token_expires_in': self._refresh_token_expires_in,
'token_type': self._token_type,
'refresh_token_expiration_time': self.refresh_token_expiration_time.isoformat(),
'access_token_expiration_time': self.access_token_expiration_time.isoformat(),
}
return token_dict
def _calculate_refresh_token_expiration(self, expiration_secs: int) -> None:
"""Calculates the number of seconds until the refresh token
expires.
### Parameters
----
expiration_secs : int
The number of seconds until expiration.
"""
expiration_time = datetime.now().timestamp() + expiration_secs
self._refresh_token_expiration_time = datetime.fromtimestamp(
expiration_time
)
def _calculate_access_token_expiration(self, expiration_secs: int) -> None:
"""Calculates the number of seconds until the access token
expires.
### Parameters
----
expiration_secs : int
The number of seconds until expiration.
"""
expiration_time = datetime.now().timestamp() + expiration_secs
self._access_token_expiration_time = datetime.fromtimestamp(
expiration_time
)
@property
def access_token_expiration_time(self) -> datetime:
"""Returns when the Access Token will expire.
### Returns
----
datetime
The date and time of the access token
expiration.
### Usage
----
>>> td_credential = TdCredentials()
>>> td_credential.access_token_expiration_time
"""
return self._access_token_expiration_time
@property
def is_access_token_expired(self) -> bool:
"""Specifies whether the current Access Token is expired
or not.
### Returns
----
bool
`True` if the Access Token is expired,
`False` otherwise.
### Usage
----
>>> td_credential = TdCredentials()
>>> td_credential.is_access_token_expired
"""
exp_time = self.access_token_expiration_time.timestamp() - 20
now = datetime.now().timestamp()
return bool(exp_time < now)
def from_workflow(self) -> None:
"""Grabs an Access toke and refresh token using
the oAuth workflow.
### Usage
----
>>> td_credentials = TdCredentials(
client_id=client_id,
redirect_uri=redirect_uri,
credential_file='config/td_credentials.jsonc'
)
>>> td_credentials.from_workflow()
"""
self.grab_authorization_code()
token_dict = self.exchange_code_for_token(return_refresh_token=True)
self.from_token_dict(token_dict=token_dict)
def from_credential_file(self, file_path: str) -> None:
"""Loads the credentials for a JSON file that is formatted
in the correct fashion.
### Parameters
file_path : str
The location of the credentials file.
"""
with open(file=file_path, mode='r', encoding='utf-8') as token_file:
token_dict = json.load(fp=token_file)
self.from_token_dict(token_dict=token_dict)
def to_credential_file(self, file_path: Union[str, pathlib.Path]) -> None:
"""Takes the token dictionary and saves it to a JSON file.
### Parameters
----
file_path : Union[str, pathlib.Path]
The file path to the credentials file.
### Usage
----
>>> td_credentials.to_token_file(
file_path='config/td_credentials.json'
)
"""
if isinstance(file_path, pathlib.Path):
file_path = file_path.resolve()
with open(file=file_path, mode='w+', encoding='utf-8') as token_file:
json.dump(obj=self.to_token_dict(), fp=token_file, indent=2)
def from_credential_dict(self, token_dict: dict) -> None:
"""Loads the credentials from a token dictionary.
### Parameters
----
token_dict : dict
The token dictionary with the required
authentication tokens.
### Usage
----
### Example 1
----
You don't necessairly need the `refresh_token_expiration_time` or the
`access_token_expiration_time` because they can be calculated using the
`access_token` key and `refresh_token`.
>>> td_credentials.from_credential_dict(
token_dict={
"access_token": "YOUR_ACCESS_TOKEN",
"refresh_token": "YOUR_REFRESH_TOKEN"
"scope": "PlaceTrades AccountAccess MoveMoney",
"expires_in": 1800,
"refresh_token_expires_in": 7776000,
"token_type": "Bearer",
"refresh_token_expiration_time": "2021-07-08T17:38:07.973982",
"access_token_expiration_time": "2021-04-09T18:08:07.973982"
}
)
### Example 2
----
You don't necessairly need the `refresh_token_expiration_time` or the
`access_token_expiration_time` because they can be calculated using the
`access_token` key and `refresh_token`.
>>> # This just is another way of sending it through.
>>> td_credentials.from_credential_dict(
token_dict={
"access_token": "YOUR_ACCESS_TOKEN",
"refresh_token": "YOUR_REFRESH_TOKEN"
"scope": "PlaceTrades AccountAccess MoveMoney",
"expires_in": 1800,
"refresh_token_expires_in": 7776000,
"token_type": "Bearer"
}
)
"""
self.from_token_dict(token_dict=token_dict)
self.validate_token()
def grab_authorization_code(self) -> None:
"""Generates the URL to grab the authorization code."""
data = {
"response_type": "code",
"redirect_uri": self.redirect_uri,
"client_id": self.client_id + "@AMER.OAUTHAP"
}
# url encode the data.
params = urllib.parse.urlencode(data)
# build the full URL for the authentication endpoint.
url = self.authorization_url + params
webbrowser.open(url=url)
code_url = input("Please Paste the Authorization Code Here: ")
query = urlparse(url=code_url)
parse_code = parse_qs(qs=query.query)
self.authorization_code = parse_code['code'][0]
def exchange_code_for_token(self, return_refresh_token: bool) -> dict:
"""Access token handler for AuthCode Workflow.
### Overview
----
This takes the authorization code parsed from
the auth endpoint to call the token endpoint
and obtain an access token.
### Parameters
----
return_refresh_token: bool
If set to `True`, will request a refresh token in
the request. Otherwise, will only request an access
token along.
### Returns
----
dict :
The token dictionary with the content.
"""
# Define the parameters of our access token post.
data = {
'grant_type': 'authorization_code',
'client_id': self.client_id + '@AMER.OAUTHAP',
'code': self.authorization_code,
'redirect_uri': self.redirect_uri
}
if return_refresh_token:
data['access_type'] = 'offline'
# Make the request.
response = requests.post(
url="https://api.tdameritrade.com/v1/oauth2/token",
headers={
'Content-Type': 'application/x-www-form-urlencoded'
},
data=data
)
if response.ok:
return response.json()
raise requests.HTTPError()
def grab_access_token(self) -> dict:
"""Refreshes the current access token.
This takes a valid refresh token and refreshes
an expired access token. This is different from
exchanging a code for an access token.
### Returns
----
dict:
The dictionary contain all the token
info.
"""
# build the parameters of our request
data = {
'client_id': self.client_id,
'grant_type': 'refresh_token',
'access_type': 'offline',
'refresh_token': self.refresh_token
}
# Make the request.
response = requests.post(
url="https://api.tdameritrade.com/v1/oauth2/token",
headers={
'Content-Type': 'application/x-www-form-urlencoded'
},
data=data
)
if response.ok:
return response.json()
raise requests.HTTPError()
def validate_token(self) -> None:
"""Validates the access token and refresh token.
### Overview
----
A TD Ameritrade Access token is only valid for 30 minutes,
and a TD Ameritrade Refresh token is only valid for 90 days.
When an access token expires, a new one is retrieved using the
refresh token. If the refresh token is expired the oAuth workflow
starts again.
"""
if self.is_refresh_token_expired:
print("Refresh Token Expired, initiating oAuth workflow...")
self.from_workflow()
if self.is_access_token_expired:
print("Access Token Expired, refreshing access token...")
token_dict = self.grab_access_token()
self.from_token_dict(token_dict=token_dict)
if self._loaded_from_file:
self.to_credential_file(file_path=self._file_path)
| [
"[email protected]"
] | |
6436176ee36f61be4e18fceb8292042e2a8cd3bd | ccbb7fb8fda4d936e765263f05a435058b397bd9 | /src/guiltytargets/ppi_network_annotation/model/__init__.py | ab512ca4a78d001836dbb692256bd93d16deee04 | [
"MIT"
] | permissive | GuiltyTargets/guiltytargets | 5a5d3ba9e45867a64c81a91529ae6689f8be447f | c20a5cae6c9cc71c2ca73080a862abe986bc34c0 | refs/heads/master | 2022-02-13T03:30:49.705239 | 2021-12-22T12:51:20 | 2021-12-22T12:51:20 | 154,318,881 | 10 | 5 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | # -*- coding: utf-8 -*-
"""Package that includes classes for data models."""
from .attribute_network import AttributeNetwork # noqa: F401
from .filtered_network import FilteredNetwork # noqa: F401
from .gene import Gene # noqa: F401
from .labeled_network import LabeledNetwork # noqa: F401
from .network import Network # noqa: F401
| [
"[email protected]"
] | |
34ea6aa1267f976a48d3711fbeae24feee76691c | a74cabbe1b11fc8ef575ea86f2543cd95db78ec9 | /python_program/q1769_Minimum_Number_of_Operations_to_Move_All_Balls_to_Each_Box.py | f0c1cbca670d01a831baba6505718f6aabeab348 | [] | no_license | tszandy/leetcode | 87e3ccf291b2879637d2d8238935a455b401a78a | f1f4361541dcffbb291285663c8820d7ffb37d2f | refs/heads/master | 2023-04-06T15:34:04.847875 | 2023-03-26T12:22:42 | 2023-03-26T12:22:42 | 204,069,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,932 | py | from typing import List
from collections import Counter,defaultdict
from math import *
from functools import reduce
import numpy as np
from heapq import *
class Solution:
def minOperations(self, boxes: str) -> List[int]:
num1sAhead = 0
num1sBehind = 0
initialDiff = 0
res = []
for num in boxes:
if num == "1":
num1sAhead += 1
for i, num in enumerate(boxes):
if num == "1":
initialDiff += i
for i, num in enumerate(boxes):
if i == 0:
res.append(initialDiff)
if num == "1":
num1sAhead -= 1
num1sBehind += 1
else:
res.append(res[i-1] - num1sAhead + num1sBehind)
if num == "1":
num1sAhead -= 1
num1sBehind += 1
return res
def minOperations_2(self, boxes: str) -> List[int]:
n = len(boxes)
list_1 = np.array(list(map(lambda x:int(x),boxes)))[:,None]
list_2 = np.array(range(n))
result = np.abs((list_2[:,None]-list_2[None,:])*list_1).sum(axis = 0)
return list(result)
def minOperations_1(self, boxes: str) -> List[int]:
n = len(boxes)
return_list = [0]*n
for i in range(n):
count_move = 0
for j,e in enumerate(boxes):
count_move+=abs((j-i)*int(e))
return_list[i] = count_move
return return_list
sol = Solution()
# input
boxes = "110"
# output
output = sol.minOperations(boxes)
# answer
answer = [1,1,3]
print(output, answer, answer == output)
# input
boxes = "001011"
# output
output = sol.minOperations(boxes)
# answer
answer = [11,8,5,4,3,4]
print(output, answer, answer == output)
# input
boxes = "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
# output
output = sol.minOperations(boxes)
# answer
answer = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
print(output, answer, answer == output) | [
"[email protected]"
] | |
3f7145a11f4c1d019d782a5fae6848a3d4d3f507 | 1d892928c70ee9ddf66f2a37a8e083d2632c6e38 | /nova/api/openstack/compute/contrib/rescue.py | 7bf815a37979d0e68811a4baac694cc8f191f500 | [
"Apache-2.0"
] | permissive | usc-isi/essex-baremetal-support | 74196c3f1332ee3cdeba9c263faff0ac0567d3cf | a77daf8ef56cf41e38de36621eda25ed3f180156 | refs/heads/master | 2021-05-19T03:12:11.929550 | 2020-07-24T14:15:26 | 2020-07-24T14:15:26 | 4,702,421 | 0 | 1 | Apache-2.0 | 2020-07-24T14:15:27 | 2012-06-18T15:19:41 | null | UTF-8 | Python | false | false | 3,310 | py | # Copyright 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The rescue mode extension."""
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions as exts
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
authorize = exts.extension_authorizer('compute', 'rescue')
class RescueController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(RescueController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def _get_instance(self, context, instance_id):
try:
return self.compute_api.get(context, instance_id)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(msg)
@wsgi.action('rescue')
@exts.wrap_errors
def _rescue(self, req, id, body):
"""Rescue an instance."""
context = req.environ["nova.context"]
authorize(context)
if body['rescue'] and 'adminPass' in body['rescue']:
password = body['rescue']['adminPass']
else:
password = utils.generate_password(FLAGS.password_length)
instance = self._get_instance(context, id)
try:
self.compute_api.rescue(context, instance,
rescue_password=password)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'rescue')
return {'adminPass': password}
@wsgi.action('unrescue')
@exts.wrap_errors
def _unrescue(self, req, id, body):
"""Unrescue an instance."""
context = req.environ["nova.context"]
authorize(context)
instance = self._get_instance(context, id)
try:
self.compute_api.unrescue(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'unrescue')
return webob.Response(status_int=202)
class Rescue(exts.ExtensionDescriptor):
"""Instance rescue mode"""
name = "Rescue"
alias = "os-rescue"
namespace = "http://docs.openstack.org/compute/ext/rescue/api/v1.1"
updated = "2011-08-18T00:00:00+00:00"
def get_controller_extensions(self):
controller = RescueController()
extension = exts.ControllerExtension(self, 'servers', controller)
return [extension]
| [
"[email protected]"
] | |
c7a1755f0e7fbc0d4edee7b813130bfb252193cf | 2acf64fca88200f4a4ada46f5da4f96702bafa06 | /stubs/facebook_business/adobjects/hotelroom.pyi | 0c74f95680b203626a30e5473345113f0c61cb3b | [] | no_license | vlab-research/adopt | bf6cdbfb751f7d85674e3925b207639e7d9d92c4 | 66347b00996e26910290e4fdb883e4231cc614af | refs/heads/master | 2023-04-12T12:16:23.061861 | 2021-05-18T14:17:01 | 2021-05-18T14:17:01 | 278,025,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,736 | pyi | from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject as AbstractCrudObject
from facebook_business.adobjects.abstractobject import AbstractObject as AbstractObject
from facebook_business.adobjects.objectparser import ObjectParser as ObjectParser
from facebook_business.api import FacebookRequest as FacebookRequest
from facebook_business.typechecker import TypeChecker as TypeChecker
from typing import Any, Optional
class HotelRoom(AbstractCrudObject):
def __init__(self, fbid: Optional[Any] = ..., parent_id: Optional[Any] = ..., api: Optional[Any] = ...) -> None: ...
class Field(AbstractObject.Field):
applinks: str = ...
base_price: str = ...
currency: str = ...
description: str = ...
id: str = ...
images: str = ...
margin_level: str = ...
name: str = ...
room_id: str = ...
sale_price: str = ...
url: str = ...
def api_delete(self, fields: Optional[Any] = ..., params: Optional[Any] = ..., batch: Optional[Any] = ..., success: Optional[Any] = ..., failure: Optional[Any] = ..., pending: bool = ...): ...
def api_get(self, fields: Optional[Any] = ..., params: Optional[Any] = ..., batch: Optional[Any] = ..., success: Optional[Any] = ..., failure: Optional[Any] = ..., pending: bool = ...): ...
def api_update(self, fields: Optional[Any] = ..., params: Optional[Any] = ..., batch: Optional[Any] = ..., success: Optional[Any] = ..., failure: Optional[Any] = ..., pending: bool = ...): ...
def get_pricing_variables(self, fields: Optional[Any] = ..., params: Optional[Any] = ..., batch: Optional[Any] = ..., success: Optional[Any] = ..., failure: Optional[Any] = ..., pending: bool = ...): ...
| [
"[email protected]"
] | |
1bda6fd6e7271cebb0d5a3ec0f810bf5ba116d12 | 386d1b6557f4cbaf20794cd222f3b7b8598ef6a6 | /data/clean_data/A1/18.py | 165e341e4a20acdb7adcf06e11bc7e769b947482 | [] | no_license | woowei0102/code2pro | 3baf86985f911264362963c503f12d20bdc1f89f | 0b16c62a1cb9053ab59edd7a52e1b3b39fdf66dc | refs/heads/main | 2023-06-28T23:09:23.998798 | 2021-07-13T11:49:27 | 2021-07-13T11:49:27 | 385,585,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | class Account:
def __init__(self, name):
self.name = name
self._balance = 0
def deposit(self, amount):
self._balance = self._balance + amount
print('{}存了NT${:,.0f}元.'.format(self.name,self._balance))
def withdraw(self, amount):
if amount < self._balance:
self._balance = self._balance - amount
print('{}提了NT${:,.0f}元.'.format(self.name,self._balance))
else:
print('{}的存款不足.'.format(self.name))
def show(self):
print('{}餘額NT${:,.0f}元.'.format(self.name,self._balance))
userA = Account("Jack")
userA.withdraw(1000)
userA.deposit(5000)
userA.withdraw(1000)
userA.show()
| [
"[email protected]"
] | |
d6de7da64fe8278c4dcc7e25bc1fdf741e82efa8 | d7e9bf5d59343f9ea1670fc529e1afa8fdcbf337 | /Section-04/create_tables.py | 2cd9ed6868b3e5aafebbbf768358599456b3f6fa | [] | no_license | tyday/solid-guacamole | 2610985f3156d44144cf40dd65b040898fb8c159 | f1a1544ae831c18c2acf558afdf8a1d4c9991152 | refs/heads/master | 2020-05-05T09:01:56.946260 | 2019-04-14T17:49:13 | 2019-04-14T17:50:21 | 179,888,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | import sqlite3
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
create_table = "CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, username text, password text)"
cursor.execute(create_table)
create_table = "CREATE TABLE IF NOT EXISTS items (name text, price real)"
cursor.execute(create_table)
cursor.execute("INSERT INTO items VALUES ('test', 10.99)")
connection.commit()
connection.close() | [
"[email protected]"
] | |
6a377949935a75af9eaadc89fad29c3b315a1549 | 824b582c2e0236e987a29b233308917fbdfc57a7 | /sdk/python/pulumi_google_native/managedidentities/v1beta1/get_domain_iam_policy.py | fb18cbda25e88bff30ce2de666aa2916274a7222 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | 24601/pulumi-google-native | ce8faf8455609a9572a8cbe0638c66427bf0ae7f | b219a14201c6c58eaa10caaeacbdaab528931adf | refs/heads/master | 2023-08-23T05:48:31.819709 | 2021-10-08T18:50:44 | 2021-10-08T18:50:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,609 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetDomainIamPolicyResult',
'AwaitableGetDomainIamPolicyResult',
'get_domain_iam_policy',
'get_domain_iam_policy_output',
]
@pulumi.output_type
class GetDomainIamPolicyResult:
def __init__(__self__, bindings=None, etag=None, version=None):
if bindings and not isinstance(bindings, list):
raise TypeError("Expected argument 'bindings' to be a list")
pulumi.set(__self__, "bindings", bindings)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if version and not isinstance(version, int):
raise TypeError("Expected argument 'version' to be a int")
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def bindings(self) -> Sequence['outputs.BindingResponse']:
"""
Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.
"""
return pulumi.get(self, "bindings")
@property
@pulumi.getter
def etag(self) -> str:
"""
`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def version(self) -> int:
"""
Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"""
return pulumi.get(self, "version")
class AwaitableGetDomainIamPolicyResult(GetDomainIamPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDomainIamPolicyResult(
bindings=self.bindings,
etag=self.etag,
version=self.version)
def get_domain_iam_policy(domain_id: Optional[str] = None,
options_requested_policy_version: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDomainIamPolicyResult:
"""
Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
"""
__args__ = dict()
__args__['domainId'] = domain_id
__args__['optionsRequestedPolicyVersion'] = options_requested_policy_version
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:managedidentities/v1beta1:getDomainIamPolicy', __args__, opts=opts, typ=GetDomainIamPolicyResult).value
return AwaitableGetDomainIamPolicyResult(
bindings=__ret__.bindings,
etag=__ret__.etag,
version=__ret__.version)
@_utilities.lift_output_func(get_domain_iam_policy)
def get_domain_iam_policy_output(domain_id: Optional[pulumi.Input[str]] = None,
options_requested_policy_version: Optional[pulumi.Input[Optional[str]]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDomainIamPolicyResult]:
"""
Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
"""
...
| [
"[email protected]"
] | |
caa8266f63e9454a80ff08be34a5a07f072d0f01 | 98a359465e6e0620accede5b87b819aed663179d | /schol_library/migrations/0059_auto_20190922_1729.py | f4818e0050c290afbb640c3b9136a5ea6ce4a2ed | [] | no_license | mustavfaa/back-end | 88f8674bd6c2f8d0c4984a2a3d34f2aece3ec8d1 | 6635e8f504c7a7ba9709121b4dd8d5ccecdf05ca | refs/heads/main | 2023-08-15T10:48:03.461138 | 2021-09-27T15:26:03 | 2021-09-27T15:26:03 | 410,938,832 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | # Generated by Django 2.2 on 2019-09-22 11:29
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('schol_library', '0058_auto_20190922_1144'),
]
operations = [
migrations.AddField(
model_name='requestedition',
name='checkid',
field=models.BooleanField(blank=True, default=False, verbose_name='статус'),
),
migrations.AlterField(
model_name='checkidrequestedition',
name='date_time',
field=models.DateTimeField(blank=True, default=datetime.datetime(2019, 9, 22, 17, 29, 19, 729564), null=True, verbose_name='время просмотра'),
),
migrations.AlterField(
model_name='requestedition',
name='date_time',
field=models.DateTimeField(blank=True, default=datetime.datetime(2019, 9, 22, 17, 29, 19, 728450), verbose_name='время заявки'),
),
]
| [
"[email protected]"
] | |
eb129243a035487b54c5721c6288ed1cc40cdb22 | 96a34a048c783a75736bf0ec775df22142f9ee53 | /packages/models-library/src/models_library/services_access.py | 9e121fad95a0a38e9c550ccca50cfff86227dfc2 | [
"MIT"
] | permissive | ITISFoundation/osparc-simcore | 77e5b9f7eb549c907f6ba2abb14862154cc7bb66 | f4c57ffc7b494ac06a2692cb5539d3acfd3d1d63 | refs/heads/master | 2023-08-31T17:39:48.466163 | 2023-08-31T15:03:56 | 2023-08-31T15:03:56 | 118,596,920 | 39 | 29 | MIT | 2023-09-14T20:23:09 | 2018-01-23T10:48:05 | Python | UTF-8 | Python | false | false | 666 | py | """Service access rights models
"""
from pydantic import BaseModel, Field
from pydantic.types import PositiveInt
GroupId = PositiveInt
class ServiceGroupAccessRights(BaseModel):
execute_access: bool = Field(
default=False,
description="defines whether the group can execute the service",
)
write_access: bool = Field(
default=False, description="defines whether the group can modify the service"
)
class ServiceAccessRights(BaseModel):
access_rights: dict[GroupId, ServiceGroupAccessRights] | None = Field(
None,
alias="accessRights",
description="service access rights per group id",
)
| [
"[email protected]"
] | |
5431703d1c4fa12874ad6fb9cb4a6c792be79bb7 | 0809ea2739d901b095d896e01baa9672f3138825 | /ORMproject1/testApp/migrations/0002_proxyemployee_proxyemployee2.py | 947459ac9771a7ef22b74ac0159c4d06da01f56a | [] | no_license | Gagangithub1988/djangoprojects | dd001f2184e78be2fb269dbfdc8e3be1dd71ce43 | ea236f0e4172fbf0f71a99aed05ed7c7b38018e2 | refs/heads/master | 2022-11-15T23:46:46.134247 | 2020-07-15T06:37:51 | 2020-07-15T06:37:51 | 273,479,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | # Generated by Django 3.0.5 on 2020-04-30 05:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('testApp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProxyEmployee',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('testApp.employee',),
),
migrations.CreateModel(
name='ProxyEmployee2',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('testApp.employee',),
),
]
| [
"[email protected]"
] | |
f3f75af6d875f307aaf0c5dd59ebde978c2efb5d | 19101bf9478c585f73540f1962494a0315ccd0a6 | /ax/models/tests/test_alebo_initializer.py | 8c37f09dce95a0e81f510189ed5551873bcd1268 | [
"MIT"
] | permissive | liusulin/Ax | 4ca1dcaa34f129d25faa2f52a8094b5f6e399eba | 850b6975b7c7f9960ad5461e71d0304b2670232a | refs/heads/main | 2023-07-14T01:02:38.044397 | 2021-08-18T15:34:06 | 2021-08-18T15:35:11 | 397,664,102 | 1 | 0 | MIT | 2021-08-18T16:16:10 | 2021-08-18T16:16:09 | null | UTF-8 | Python | false | false | 1,047 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from ax.models.random.alebo_initializer import ALEBOInitializer
from ax.utils.common.testutils import TestCase
class ALEBOSobolTest(TestCase):
def testALEBOSobolModel(self):
B = np.array([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]])
Q = np.linalg.pinv(B) @ B
# Test setting attributes
m = ALEBOInitializer(B=B)
self.assertTrue(np.allclose(Q, m.Q))
# Test gen
Z, w = m.gen(5, bounds=[(-1.0, 1.0)] * 3)
self.assertEqual(Z.shape, (5, 3))
self.assertTrue(Z.min() >= -1.0)
self.assertTrue(Z.max() <= 1.0)
# Verify that it is in the subspace
self.assertTrue(np.allclose(Q @ Z.transpose(), Z.transpose()))
m = ALEBOInitializer(B=B, nsamp=1)
with self.assertRaises(ValueError):
m.gen(2, bounds=[(-1.0, 1.0)] * 3)
| [
"[email protected]"
] | |
66f2df3cf8c49c743f988bcbdddae4207bad389c | c0c8aeb5aaf08925d8c9e1d660b02c89cbc7ad71 | /Algorithms/Medium/105. Construct Binary Tree from Preorder and Inorder Traversal/answer.py | bec4709a6dc30054d5688961993bb42736c611cf | [
"Apache-2.0"
] | permissive | kenwoov/PlayLeetCode | b2fdc43d799c37683a9efdc31c4df159cf553bf5 | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | refs/heads/master | 2022-12-17T05:54:22.775972 | 2020-09-26T14:08:43 | 2020-09-26T14:08:43 | 214,839,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
if not preorder:
return None
root = TreeNode(preorder[0])
mid = inorder.index(preorder[0])
root.left = self.buildTree(preorder[1:mid+1], inorder[:mid])
root.right = self.buildTree(preorder[mid+1:], inorder[mid+1:])
return root
if __name__ == "__main__":
s = Solution()
result = s.buildTree([3, 9, 20, 15, 7], [9, 3, 15, 20, 7])
print(result)
| [
"[email protected]"
] | |
4fd186ecb8de7d13fb1a560a5b7063dd55cf34c3 | b0856a2d66cc4c71705b8c16c169848070294cf6 | /graphValidTree.py | c267910cc1708fcf239eee741ff3637ac2e1b0d5 | [] | no_license | jfriend08/LeetCode | 9e378ff015edc3102a4785b0832cf0eeb09f5fc2 | f76d3cf2e7fd91767f80bd60eed080a7bad06e62 | refs/heads/master | 2021-01-21T19:28:25.354537 | 2016-01-15T04:53:11 | 2016-01-15T04:53:11 | 28,518,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,012 | py | '''
Given n nodes labeled from 0 to n - 1 and a list of undirected edges (each edge is a
pair of nodes), write a function to check whether these edges make up a valid tree.
For example:
Given n = 5 and edges = [[0, 1], [0, 2], [0, 3], [1, 4]], return true.
Given n = 5 and edges = [[0, 1], [1, 2], [2, 3], [1, 3], [1, 4]], return false.
Hint:
Given n = 5 and edges = [[0, 1], [1, 2], [3, 4]], what should your return? Is this case a valid tree?
According to the definition of tree on Wikipedia: "a tree is an undirected graph in which any two
vertices are connected by exactly one path. In other words, any connected graph without simple cycles
is a tree."
Note: you can assume that no duplicate edges will appear in edges. Since all edges are undirected,
[0, 1] is the same as [1, 0] and thus will not appear together in edges.
'''
class Solution(object):
def makeMap(self, n, edges, linkMap):
for i in xrange(n):
linkMap[i] = []
for n1, n2 in edges:
linkMap[n1] += [n2]
linkMap[n2] += [n1]
def isValidTravel(self, parent, node, linkMap, visited):
visited[node] = True
for nei in linkMap[node]:
if nei == parent:
continue
elif not nei in visited:
res = self.isValidTravel(node, nei, linkMap, visited)
if not res:
return res
else:
return False
return True
def validTree(self, n, edges):
linkMap, visited = {}, {}
self.makeMap(n, edges, linkMap)
res = self.isValidTravel(None, 0, linkMap, visited)
return len(visited.keys()) == n and res
# for node in xrange(n):
# if not node in visited:
# res = self.isValidTravel(None, node, linkMap, visited)
# if res == False:
# return res
# return True
sol = Solution()
n, edges = 5, [[0, 1], [0, 2], [0, 3], [1, 4]]
print sol.validTree(n, edges)
n, edges = 5, [[0, 1], [1, 2], [2, 3], [1, 3], [1, 4]]
print sol.validTree(n, edges)
n, edges = 5, [[0, 1], [1, 2], [3, 4]]
print sol.validTree(n, edges) | [
"[email protected]"
] | |
d1676107aebbd8f6a8e915250251ab1170737d58 | 66a9c0e23af1fab7f3c0b2f0cd6b8c6ac060b1d7 | /models/image_segmentation/tensorflow/maskrcnn/inference/fp32/coco.py | b9813e3e51326ca1273d56c770bca83f18cab6b1 | [
"Apache-2.0"
] | permissive | hekaplex/resnet_dl | ea289864b330bfa74996444d0325f1a062feae59 | fc8d4dcc0adffbe22d01d333e6cf5db955f2f011 | refs/heads/master | 2023-04-15T06:03:18.696578 | 2021-05-05T14:18:13 | 2021-05-05T14:18:13 | 364,602,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,454 | py | #
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: EPL-2.0
#
"""
Mask R-CNN
Configurations and data loading code for MS COCO.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from pre-trained COCO weights
python3 coco.py train --dataset=/path/to/coco/ --model=coco
# Train a new model starting from ImageNet weights
python3 coco.py train --dataset=/path/to/coco/ --model=imagenet
# Continue training a model that you had trained earlier
python3 coco.py train --dataset=/path/to/coco/ --model=/path/to/weights.h5
# Continue training the last model you trained
python3 coco.py train --dataset=/path/to/coco/ --model=last
# Run COCO evaluatoin on the last model you trained
python3 coco.py evaluate --dataset=/path/to/coco/ --model=last
"""
import os
import time
import numpy as np
import subprocess
from pdb import set_trace as bp
# Download and install the Python COCO tools from https://github.com/waleedka/coco
# That's a fork from the original https://github.com/pdollar/coco with a bug
# fix for Python 3.
# I submitted a pull request https://github.com/cocodataset/cocoapi/pull/50
# If the PR is merged then use the original repo.
# Note: Edit PythonAPI/Makefile and replace "python" with "python3".
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools import mask as maskUtils
import zipfile
import urllib.request
import shutil
from config import Config
import utils
import model as modellib
# Path to trained weights file
COCO_MODEL_PATH = os.path.join(os.environ["MOUNT_EXTERNAL_MODELS_SOURCE"], "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(os.environ["MOUNT_BENCHMARK"], "common/tensorflow/logs")
DEFAULT_DATASET_YEAR = "2014"
############################################################
# Configurations
############################################################
class CocoConfig(Config):
"""Configuration for training on MS COCO.
Derives from the base Config class and overrides values specific
to the COCO dataset.
"""
# Give the configuration a recognizable name
NAME = "coco"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
# Uncomment to train on 8 GPUs (default is 1)
# GPU_COUNT = 8
# Number of classes (including background)
NUM_CLASSES = 1 + 80 # COCO has 80 classes
############################################################
# Dataset
############################################################
class CocoDataset(utils.Dataset):
def load_coco(self, dataset_dir, subset, year=DEFAULT_DATASET_YEAR, class_ids=None,
class_map=None, return_coco=False, auto_download=False):
"""Load a subset of the COCO dataset.
dataset_dir: The root directory of the COCO dataset.
subset: What to load (train, val, minival, valminusminival)
year: What dataset year to load (2014, 2017) as a string, not an integer
class_ids: If provided, only loads images that have the given classes.
class_map: TODO: Not implemented yet. Supports maping classes from
different datasets to the same class ID.
return_coco: If True, returns the COCO object.
auto_download: Automatically download and unzip MS-COCO images and annotations
"""
if auto_download is True:
self.auto_download(dataset_dir, subset, year)
coco = COCO("{}/annotations/instances_{}{}.json".format(dataset_dir, subset, year))
if subset == "minival" or subset == "valminusminival":
subset = "val"
image_dir = "{}/{}{}".format(dataset_dir, subset, year)
# Load all classes or a subset?
if not class_ids:
# All classes
class_ids = sorted(coco.getCatIds())
# All images or a subset?
if class_ids:
image_ids = []
for id in class_ids:
image_ids.extend(list(coco.getImgIds(catIds=[id])))
# Remove duplicates
image_ids = list(set(image_ids))
else:
# All images
image_ids = list(coco.imgs.keys())
# Add classes
for i in class_ids:
self.add_class("coco", i, coco.loadCats(i)[0]["name"])
# Add images
for i in image_ids:
self.add_image(
"coco", image_id=i,
path=os.path.join(image_dir, coco.imgs[i]['file_name']),
width=coco.imgs[i]["width"],
height=coco.imgs[i]["height"],
annotations=coco.loadAnns(coco.getAnnIds(
imgIds=[i], catIds=class_ids, iscrowd=None)))
if return_coco:
return coco
def auto_download(self, dataDir, dataType, dataYear):
"""Download the COCO dataset/annotations if requested.
dataDir: The root directory of the COCO dataset.
dataType: What to load (train, val, minival, valminusminival)
dataYear: What dataset year to load (2014, 2017) as a string, not an integer
Note:
For 2014, use "train", "val", "minival", or "valminusminival"
For 2017, only "train" and "val" annotations are available
"""
# Setup paths and file names
if dataType == "minival" or dataType == "valminusminival":
imgDir = "{}/{}{}".format(dataDir, "val", dataYear)
imgZipFile = "{}/{}{}.zip".format(dataDir, "val", dataYear)
imgURL = "http://images.cocodataset.org/zips/{}{}.zip".format("val", dataYear)
else:
imgDir = "{}/{}{}".format(dataDir, dataType, dataYear)
imgZipFile = "{}/{}{}.zip".format(dataDir, dataType, dataYear)
imgURL = "http://images.cocodataset.org/zips/{}{}.zip".format(dataType, dataYear)
# print("Image paths:"); print(imgDir); print(imgZipFile); print(imgURL)
# Create main folder if it doesn't exist yet
if not os.path.exists(dataDir):
os.makedirs(dataDir)
# Download images if not available locally
if not os.path.exists(imgDir):
os.makedirs(imgDir)
print("Downloading images to " + imgZipFile + " ...")
with urllib.request.urlopen(imgURL) as resp, open(imgZipFile, 'wb') as out:
shutil.copyfileobj(resp, out)
print("... done downloading.")
print("Unzipping " + imgZipFile)
with zipfile.ZipFile(imgZipFile, "r") as zip_ref:
zip_ref.extractall(dataDir)
print("... done unzipping")
print("Will use images in " + imgDir)
# Setup annotations data paths
annDir = "{}/annotations".format(dataDir)
if dataType == "minival":
annZipFile = "{}/instances_minival2014.json.zip".format(dataDir)
annFile = "{}/instances_minival2014.json".format(annDir)
annURL = "https://dl.dropboxusercontent.com/s/o43o90bna78omob/instances_minival2014.json.zip?dl=0"
unZipDir = annDir
elif dataType == "valminusminival":
annZipFile = "{}/instances_valminusminival2014.json.zip".format(dataDir)
annFile = "{}/instances_valminusminival2014.json".format(annDir)
annURL = "https://dl.dropboxusercontent.com/s/s3tw5zcg7395368/instances_valminusminival2014.json.zip?dl=0"
unZipDir = annDir
else:
annZipFile = "{}/annotations_trainval{}.zip".format(dataDir, dataYear)
annFile = "{}/instances_{}{}.json".format(annDir, dataType, dataYear)
annURL = "http://images.cocodataset.org/annotations/annotations_trainval{}.zip".format(dataYear)
unZipDir = dataDir
# print("Annotations paths:"); print(annDir); print(annFile); print(annZipFile); print(annURL)
# Download annotations if not available locally
if not os.path.exists(annDir):
os.makedirs(annDir)
if not os.path.exists(annFile):
if not os.path.exists(annZipFile):
print("Downloading zipped annotations to " + annZipFile + " ...")
with urllib.request.urlopen(annURL) as resp, open(annZipFile, 'wb') as out:
shutil.copyfileobj(resp, out)
print("... done downloading.")
print("Unzipping " + annZipFile)
with zipfile.ZipFile(annZipFile, "r") as zip_ref:
zip_ref.extractall(unZipDir)
print("... done unzipping")
print("Will use annotations in " + annFile)
def load_mask(self, image_id):
"""Load instance masks for the given image.
Different datasets use different ways to store masks. This
function converts the different mask format to one format
in the form of a bitmap [height, width, instances].
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a COCO image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "coco":
return super(CocoDataset, self).load_mask(image_id)
instance_masks = []
class_ids = []
annotations = self.image_info[image_id]["annotations"]
# Build mask of shape [height, width, instance_count] and list
# of class IDs that correspond to each channel of the mask.
for annotation in annotations:
class_id = self.map_source_class_id(
"coco.{}".format(annotation['category_id']))
if class_id:
m = self.annToMask(annotation, image_info["height"],
image_info["width"])
# Some objects are so small that they're less than 1 pixel area
# and end up rounded out. Skip those objects.
if m.max() < 1:
continue
# Is it a crowd? If so, use a negative class ID.
if annotation['iscrowd']:
# Use negative class ID for crowds
class_id *= -1
# For crowd masks, annToMask() sometimes returns a mask
# smaller than the given dimensions. If so, resize it.
if m.shape[0] != image_info["height"] or m.shape[1] != image_info["width"]:
m = np.ones([image_info["height"], image_info["width"]], dtype=bool)
instance_masks.append(m)
class_ids.append(class_id)
# Pack instance masks into an array
if class_ids:
mask = np.stack(instance_masks, axis=2)
class_ids = np.array(class_ids, dtype=np.int32)
return mask, class_ids
else:
# Call super class to return an empty mask
return super(CocoDataset, self).load_mask(image_id)
def image_reference(self, image_id):
"""Return a link to the image in the COCO Website."""
info = self.image_info[image_id]
if info["source"] == "coco":
return "http://cocodataset.org/#explore?id={}".format(info["id"])
else:
super(CocoDataset, self).image_reference(image_id)
# The following two functions are from pycocotools with a few changes.
def annToRLE(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
segm = ann['segmentation']
if isinstance(segm, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, height, width)
rle = maskUtils.merge(rles)
elif isinstance(segm['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, height, width)
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann, height, width)
m = maskUtils.decode(rle)
return m
############################################################
# COCO Evaluation
############################################################
def build_coco_results(dataset, image_ids, rois, class_ids, scores, masks):
"""Arrange resutls to match COCO specs in http://cocodataset.org/#format
"""
# If no results, return an empty list
if rois is None:
return []
results = []
for image_id in image_ids:
# Loop through detections
for i in range(rois.shape[0]):
class_id = class_ids[i]
score = scores[i]
bbox = np.around(rois[i], 1)
mask = masks[:, :, i]
result = {
"image_id": image_id,
"category_id": dataset.get_source_class_id(class_id, "coco"),
"bbox": [bbox[1], bbox[0], bbox[3] - bbox[1], bbox[2] - bbox[0]],
"score": score,
"segmentation": maskUtils.encode(np.asfortranarray(mask))
}
results.append(result)
return results
def evaluate_coco(model, dataset, coco, eval_type="bbox", limit=0, warmup=0, image_ids=None):
"""Runs official COCO evaluation.
dataset: A Dataset object with valiadtion data
eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
limit: if not 0, it's the number of images to use for evaluation
"""
# Pick COCO images from the dataset
image_ids = image_ids or dataset.image_ids
limit = int(limit/config.BATCH_SIZE)*config.BATCH_SIZE;
# Limit to a subset
if limit:
image_ids = image_ids[:limit]
# Get corresponding COCO image IDs.
coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]
t_prediction = 0
t_start = time.time()
results = []
for i, image_id in enumerate(image_ids):
# Load image
# image = dataset.load_image(image_id)
if (i%config.BATCH_SIZE!=0):
continue;
image_list=[];
for j in range(0,config.BATCH_SIZE):
print("i image_id",i+j, image_id+j)
image = dataset.load_image(image_id+j)
image_list.append(image)
# Run detection
t = time.time()
r = model.detect(image_list, verbose=0)[0]
t1 = time.time() - t
#t_prediction += (time.time() - t)
if (i/config.BATCH_SIZE>=warmup):
t_prediction += t1
print("pred time:",i,t1)
# Convert results to COCO format
image_results = build_coco_results(dataset, coco_image_ids[i:i + 1],
r["rois"], r["class_ids"],
r["scores"], r["masks"])
results.extend(image_results)
# Load results. This modifies results with additional attributes.
coco_results = coco.loadRes(results)
# Evaluate
cocoEval = COCOeval(coco, coco_results, eval_type)
cocoEval.params.imgIds = coco_image_ids
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
print("Batch size: %d" % (config.BATCH_SIZE))
print("Time spent per BATCH: %.4f ms" % (t_prediction / (len(image_ids)/config.BATCH_SIZE-warmup) * 1000))
print("Total samples/sec: %.4f samples/s" % ((len(image_ids)/config.BATCH_SIZE-warmup) * config.BATCH_SIZE / t_prediction))
print("Total time: ", time.time() - t_start)
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN on MS COCO.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'evaluate' on MS COCO")
parser.add_argument("--trainbs", required=False,
default=2,
metavar="<train batchsize>",
help="Batchsize to train (default=2)")
parser.add_argument("--infbs", required=False,
default=1,
metavar="<inference batchsize>",
help="Batchsize to inference (default=1)")
parser.add_argument("--num_intra_threads", required=False,
default=56,
metavar="<num intra threads>",
help="Num intra threads (default=56)")
parser.add_argument("--num_inter_threads", required=False,
default=1,
metavar="<num inter threads>",
help="Num inter threads (default=1)")
parser.add_argument('--dataset', required=True,
metavar="/path/to/coco/",
help='Directory of the MS-COCO dataset')
parser.add_argument('--year', required=False,
default=DEFAULT_DATASET_YEAR,
metavar="<year>",
help='Year of the MS-COCO dataset (2014 or 2017) (default=2014)')
parser.add_argument('--model', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--cp', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--nb', required=False,
default=50,
metavar="<image count>",
help='Images to use for evaluation (default=500)')
parser.add_argument('--nw', required=False,
default=5,
metavar="<image count>",
help='Images to use for evaluation warmup (default=10)')
parser.add_argument('--download', required=False,
default=False,
metavar="<True|False>",
help='Automatically download and unzip MS-COCO files (default=False)',
type=bool)
args = parser.parse_args()
print("Command: ", args.command)
print("Model: ", args.model)
print("Dataset: ", args.dataset)
print("Year: ", args.year)
print("Logs: ", args.cp)
print("Auto Download: ", args.download)
# For pycocotools updates
ppath = subprocess.Popen(["python3", "-m", "site", "--user-site"],
stdout=subprocess.PIPE).communicate()[0].decode("utf-8")
ppath = ppath[:-1] + "/pycocotools/coco.py"
ret = subprocess.Popen(["sed", "-i", "s/unicode/bytes/", ppath],
stdout=subprocess.PIPE).communicate()[0]
# Configurations
if args.command == "train":
class TrainConfig(CocoConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = int(args.trainbs)
STEPS_PER_EPOCH = int(args.nb)
config = TrainConfig()
else:
class InferenceConfig(CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = int(args.infbs)
DETECTION_MIN_CONFIDENCE = 0
config = InferenceConfig()
config.NUM_INTRA = int(args.num_intra_threads)
config.NUM_INTER = int(args.num_inter_threads)
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.cp)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.cp)
# Select weights file to load
if args.model.lower() == "coco":
model_path = COCO_MODEL_PATH
elif args.model.lower() == "last":
# Find last trained weights
model_path = model.find_last()[1]
elif args.model.lower() == "imagenet":
# Start from ImageNet trained weights
model_path = model.get_imagenet_weights()
else:
model_path = args.model
# Load weights
print("Loading weights ", model_path)
model.load_weights(model_path, by_name=True)
# Train or evaluate
if args.command == "train":
# Training dataset. Use the training set and 35K from the
# validation set, as as in the Mask RCNN paper.
dataset_train = CocoDataset()
dataset_train.load_coco(args.dataset, "train", year=args.year, auto_download=args.download)
dataset_train.load_coco(args.dataset, "valminusminival", year=args.year, auto_download=args.download)
dataset_train.prepare()
# Validation dataset
dataset_val = CocoDataset()
dataset_val.load_coco(args.dataset, "minival", year=args.year, auto_download=args.download)
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Training - Stage 1
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=1, #40,
layers='heads', warmup=int(args.nw))
# Training - Stage 2
# Finetune layers from ResNet stage 4 and up
print("Fine tune Resnet stage 4 and up")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=2, #120,
layers='4+', warmup=int(args.nw))
# Training - Stage 3
# Fine tune all layers
print("Fine tune all layers")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=3, #160,
layers='all', warmup=int(args.nw))
elif args.command == "evaluate":
# Validation dataset
dataset_val = CocoDataset()
coco = dataset_val.load_coco(args.dataset, "minival", year=args.year, return_coco=True, auto_download=args.download)
dataset_val.prepare()
print("Running COCO evaluation on {} images.".format(args.nb))
evaluate_coco(model, dataset_val, coco, "bbox", limit=int(args.nb), warmup=int(args.nw))
else:
print("'{}' is not recognized. "
"Use 'train' or 'evaluate'".format(args.command))
| [
"[email protected]"
] | |
88231ca16773294f42e2cf6d51ba1b8dc86895a1 | 25b81256057c9a2de014ab511e04703dc617f050 | /etl/census/census_features.py | 19b6e85b6775b9ed6933def9b874cd9390f5bb66 | [
"MIT"
] | permissive | conorhenley/cincinnati | 7b9b2fc6d13e49ad5e95a557cd79b28bd17f0565 | 5ca86a8a31099365188969493e0dd369b4faefc0 | refs/heads/master | 2021-01-13T06:50:18.403686 | 2016-05-26T20:21:12 | 2016-05-26T20:21:12 | 64,249,902 | 1 | 0 | null | 2016-07-26T19:51:03 | 2016-07-26T19:51:03 | null | UTF-8 | Python | false | false | 7,805 | py |
# coding: utf-8
# In[1]:
from sqlalchemy import create_engine
import pandas as pd
from lib_cinci.config import main as config
user = config['db']['user']
password = config['db']['password']
host = config['db']['host']
database = config['db']['database']
engine = create_engine('postgresql://{user}:{password}@{host}:5432/{database}'.format(user=user, password=password, host=host, database=database))
# In[3]:
pop_housing_sql = """SELECT census.*, groups.area FROM shape_files.census_pop_housing as census
JOIN shape_files.census_blocks_groups as groups
on census.tract = groups.tract
and census.block_group = groups.blkgrp;"""
# In[4]:
pop_housing_raw = pd.read_sql_query(pop_housing_sql, con=engine)
# # Raw census data
# In[5]:
pop_housing_raw.head()
# # Calculating census features
# list of feature description and calculation can be found in folder docs/data_dictionaries
#
# features are claculated for each pair of census tract and block
# In[5]:
features = pd.DataFrame({ 'tract' : pop_housing_raw.tract,
'block_group' : pop_housing_raw.block_group,
'housing_density': pop_housing_raw.H0030001/pop_housing_raw.area,
'rate_occupied_units': pop_housing_raw.H0030002/pop_housing_raw.H0030001,
'rate_vacant_units': pop_housing_raw.H0030003/pop_housing_raw.H0030001,
'rate_mortgage_or_loan' : pop_housing_raw.H0040002/pop_housing_raw.H0030001,
'rate_renter_occupied' : pop_housing_raw.H0040004/pop_housing_raw.H0030001,
'rate_for_rent' : pop_housing_raw.H0050002/pop_housing_raw.H0030001,
'rate_white_householder' : pop_housing_raw.H0060002/pop_housing_raw.P0180001,
'rate_black_householder' : pop_housing_raw.H0060003/pop_housing_raw.P0180001,
'rate_native_householder' : (pop_housing_raw.H0060004+pop_housing_raw.H0060006)/pop_housing_raw.P0180001,
'rate_asian_householder' : pop_housing_raw.H0060005/pop_housing_raw.P0180001,
'rate_other_race_householder' : pop_housing_raw.H0060007/pop_housing_raw.P0180001,
'rate_pop_occupied_units' : pop_housing_raw.H0100001/pop_housing_raw.P0010001,
'rate_1_per_household' : pop_housing_raw.H0130002/pop_housing_raw.P0180001,
'rate_2_per_household' : pop_housing_raw.H0130003/pop_housing_raw.P0180001,
'rate_3_per_household' : pop_housing_raw.H0130004/pop_housing_raw.P0180001,
'rate_4_per_household' : pop_housing_raw.H0130005/pop_housing_raw.P0180001,
'rate_5_per_household' : pop_housing_raw.H0130006/pop_housing_raw.P0180001,
'rate_6_per_household' : pop_housing_raw.H0130007/pop_housing_raw.P0180001,
'rate_7_plus_per_household' : pop_housing_raw.H0130008/pop_housing_raw.P0180001,
'rate_owner_occupied' : pop_housing_raw.H0140002/pop_housing_raw.H0030001,
'rate_owner_occupied_white' : pop_housing_raw.H0140003/pop_housing_raw.H0140002,
'rate_owner_occupied_black' : pop_housing_raw.H0140004/pop_housing_raw.H0140002,
'rate_owner_occupied_native' : (pop_housing_raw.H0140005+pop_housing_raw.H0140007)/pop_housing_raw.H0140002,
'rate_owner_occupied_asian' : pop_housing_raw.H0140006/pop_housing_raw.H0140002,
'rate_owner_occupied_other_race' : pop_housing_raw.H0140008/pop_housing_raw.H0140002,
'rate_renter_occupied_white' : pop_housing_raw.H0140011/pop_housing_raw.H0040004,
'rate_renter_occupied_black' : pop_housing_raw.H0140012/pop_housing_raw.H0040004,
'rate_renter_occupied_native' : (pop_housing_raw.H0140013+pop_housing_raw.H0140015)/pop_housing_raw.H0040004,
'rate_renter_occupied_asian' : pop_housing_raw.H0140014/pop_housing_raw.H0040004,
'rate_renter_occupied_other' : pop_housing_raw.H0140016/pop_housing_raw.H0040004,
'rate_owner_occupied_hispanic' : pop_housing_raw.H0150004/pop_housing_raw.H0140002,
#'rate_renter_occupied_hispanic' : pop_housing_raw.H0150005/pop_housing_raw.H0040004,
'rate_owner_occupied_w_children' : pop_housing_raw.H0190003/pop_housing_raw.H0140002,
'rate_owner_occupied_no_children' : pop_housing_raw.H0190004/pop_housing_raw.H0140002,
'rate_renter_occupied_no_children' : 1-(pop_housing_raw.H0190006/pop_housing_raw.H0040004),
'rate_renter_occupied_w_children' : pop_housing_raw.H0190006/pop_housing_raw.H0040004,
'population_density' : pop_housing_raw.P0010001/pop_housing_raw.area,
'rate_white_pop' : pop_housing_raw.P0030002/pop_housing_raw.P0010001,
'rate_black_pop' : pop_housing_raw.P0030003/pop_housing_raw.P0010001,
'rate_native_pop' : (pop_housing_raw.P0030006+pop_housing_raw.P0030004)/pop_housing_raw.P0010001,
'rate_asian_pop' : pop_housing_raw.P0030005/pop_housing_raw.P0010001,
'rate_other_race_pop' : pop_housing_raw.P0030007/pop_housing_raw.P0010001,
'rate_pop_over_18' : pop_housing_raw.P0110001/pop_housing_raw.P0010001,
'rate_male_under_18' : (pop_housing_raw.P0120003+pop_housing_raw.P0120004+pop_housing_raw.P0120005+pop_housing_raw.P0120006)/pop_housing_raw.P0010001,
'rate_male_18_35' : pop_housing_raw[['P0120007','P0120008','P0120009','P0120010','P0120011','P0120012']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_male_35_50' : pop_housing_raw[['P0120013','P0120014','P0120015']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_male_50_75' : pop_housing_raw[['P0120016', 'P0120017', 'P0120018', 'P0120019', 'P0120020', 'P0120021', 'P0120022']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_male_over_75' : pop_housing_raw[['P0120023','P0120024','P0120025']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_female_under_18' : pop_housing_raw[['P0120027','P0120028','P0120029','P0120030']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_female_18_35' : pop_housing_raw[['P0120031', 'P0120032', 'P0120033', 'P0120034', 'P0120035', 'P0120036']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_female_35_50' : pop_housing_raw[['P0120037', 'P0120038', 'P0120039']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_female_50_75' : pop_housing_raw[['P0120040', 'P0120041', 'P0120042', 'P0120043', 'P0120044', 'P0120045', 'P0120046']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_male_over_75' : pop_housing_raw[['P0120047','P0120048','P0120049']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_households' : pop_housing_raw.P0180001/pop_housing_raw.H0030001})
# In[7]:
features
# In[10]:
features.to_sql('census_features', engine, schema='shape_files', if_exists='replace', index=False)
| [
"[email protected]"
] | |
b561022b7fd0c683ba9c07ba5381c7a55b8b49cd | bc599c9a404940fae21ed6b57edb7bb9dc04e71c | /test/base_test/graphics/baseScatter.py | 2bd2cb944a44d6477901a3cff545da2ae9d41870 | [] | no_license | jcarlosglx/SparkReport | c9b37a1419f113ea13341e6641ceb17056aeb7d0 | 9d6b044f037e8dfe583bcf76c51dd792ac1cc34a | refs/heads/master | 2023-08-11T16:04:28.393856 | 2021-09-21T23:06:08 | 2021-09-21T23:06:08 | 409,001,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | from test.base_test.base_http.baseGetTest import BaseGetGeneralTest
from test.base_test.base_dimension.baseGraphicTwoDimensionTest import \
BaseGraphicTwoDimensionTest
from typing import List, Type
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
class BaseGetScatterTest(BaseGetGeneralTest, BaseGraphicTwoDimensionTest):
Graphics: List[str] = ["Scatter"]
def test_get_scatter(self, get_app: Flask, get_db: Type[SQLAlchemy]):
self.reload_json()
response = get_app.test_client().get(
f"{self.url_get}{self.endpoint_get}", json=self.JSON
)
self.save_response_file(response)
code_response = str(response.status_code)
assert code_response == self.expect_status_get, self.print_error(code_response)
| [
"[email protected]"
] | |
70be044b2afc007606fdccb195846bb31d41a92a | 4d332c45578246847ef2cdcdeb827ca29ab06090 | /modules/Bio/SeqUtils/IsoelectricPoint.py | 817626b257eee1231c8ea29aca828e2ba2de2cf3 | [
"MIT"
] | permissive | prateekgupta3991/justforlearn | 616cc297a2a6119fa959b9337a5e91c77a11ebf7 | 3984c64063b356cf89003e17a914272983b6cf48 | refs/heads/master | 2021-03-12T22:09:12.184638 | 2014-01-28T10:37:07 | 2014-01-28T10:37:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | /usr/share/pyshared/Bio/SeqUtils/IsoelectricPoint.py | [
"[email protected]"
] | |
c85cb0f32d51c4871c1b38ca50593c5a5e7ecd75 | b95f80c0c2e7700ed24248bb84f4ef02723e367c | /tests/k8s/test_discovery.py | 3bec8820ae816dfa8b80dda2036bf7231f9dce29 | [
"MIT"
] | permissive | tinyzimmer/kopf | b97faab3f396dc169ebe053c6b41d57d20756738 | 74c42a2acdf2a72446d290fa1f27b53ec5d43218 | refs/heads/master | 2022-12-04T17:51:30.648646 | 2020-08-30T00:23:18 | 2020-08-30T00:23:18 | 291,496,989 | 0 | 0 | MIT | 2020-08-30T15:26:12 | 2020-08-30T15:26:11 | null | UTF-8 | Python | false | false | 4,573 | py | import aiohttp.web
import pytest
from kopf.clients.discovery import discover, is_namespaced, is_status_subresource
from kopf.structs.resources import Resource
async def test_discovery_of_existing_resource(
resp_mocker, aresponses, hostname):
res1info = {'name': 'someresources', 'namespaced': True}
result = {'resources': [res1info]}
list_mock = resp_mocker(return_value=aiohttp.web.json_response(result))
aresponses.add(hostname, '/apis/some-group.org/someversion', 'get', list_mock)
resource = Resource('some-group.org', 'someversion', 'someresources')
info = await discover(resource=resource)
assert info == res1info
async def test_discovery_of_unexisting_resource(
resp_mocker, aresponses, hostname):
result = {'resources': []}
list_mock = resp_mocker(return_value=aiohttp.web.json_response(result))
aresponses.add(hostname, '/apis/some-group.org/someversion', 'get', list_mock)
resource = Resource('some-group.org', 'someversion', 'someresources')
info = await discover(resource=resource)
assert info is None
@pytest.mark.parametrize('status', [403, 404])
async def test_discovery_of_unexisting_group_or_version(
resp_mocker, aresponses, hostname, status):
list_mock = resp_mocker(return_value=aresponses.Response(status=status, reason="boo!"))
aresponses.add(hostname, '/apis/some-group.org/someversion', 'get', list_mock)
resource = Resource('some-group.org', 'someversion', 'someresources')
info = await discover(resource=resource)
assert info is None
async def test_discovery_is_cached_per_session(
resp_mocker, aresponses, hostname):
res1info = {'name': 'someresources1', 'namespaced': True}
res2info = {'name': 'someresources2', 'namespaced': True}
result = {'resources': [res1info]}
list_mock = resp_mocker(return_value=aiohttp.web.json_response(result))
aresponses.add(hostname, '/apis/some-group.org/someversion', 'get', list_mock)
result = {'resources': [res2info]}
list_mock = resp_mocker(return_value=aiohttp.web.json_response(result))
aresponses.add(hostname, '/apis/some-group.org/someversion', 'get', list_mock)
resource = Resource('some-group.org', 'someversion', 'someresources1')
info = await discover(resource=resource)
assert info == res1info
resource = Resource('some-group.org', 'someversion', 'someresources2')
info = await discover(resource=resource)
assert info is None # cached as absent on the 1st call.
resource = Resource('some-group.org', 'someversion', 'someresources1')
info = await discover(resource=resource)
assert info == res1info
@pytest.mark.parametrize('namespaced', [True, False])
async def test_is_namespaced(
resp_mocker, aresponses, hostname, namespaced):
res1info = {'name': 'someresources', 'namespaced': namespaced}
result = {'resources': [res1info]}
list_mock = resp_mocker(return_value=aiohttp.web.json_response(result))
aresponses.add(hostname, '/apis/some-group.org/someversion', 'get', list_mock)
resource = Resource('some-group.org', 'someversion', 'someresources')
result = await is_namespaced(resource=resource)
assert result == namespaced
@pytest.mark.parametrize('namespaced', [True, False])
async def test_is_status_subresource_when_not_a_subresource(
resp_mocker, aresponses, hostname, namespaced):
res1info = {'name': 'someresources', 'namespaced': namespaced}
result = {'resources': [res1info]}
list_mock = resp_mocker(return_value=aiohttp.web.json_response(result))
aresponses.add(hostname, '/apis/some-group.org/someversion', 'get', list_mock)
resource = Resource('some-group.org', 'someversion', 'someresources')
result = await is_status_subresource(resource=resource)
assert result is False # an extra type-check
@pytest.mark.parametrize('namespaced', [True, False])
async def test_is_status_subresource_when_is_a_subresource(
resp_mocker, aresponses, hostname, namespaced):
res1info = {'name': 'someresources', 'namespaced': namespaced}
res1status = {'name': 'someresources/status', 'namespaced': namespaced}
result = {'resources': [res1info, res1status]}
list_mock = resp_mocker(return_value=aiohttp.web.json_response(result))
aresponses.add(hostname, '/apis/some-group.org/someversion', 'get', list_mock)
resource = Resource('some-group.org', 'someversion', 'someresources')
result = await is_status_subresource(resource=resource)
assert result is True # an extra type-check
| [
"[email protected]"
] | |
63eb117df50510a881cad1cd17e8650e4c931d87 | 84a5c4c2e0977d42425771098f5f881c750da7f0 | /neomodel_constraints/fetcher/constraints/util.py | 3a08ca6379d4f6cab307f35957905429b97c95ac | [] | no_license | SSripilaipong/neomodel-constraints | 6c3023ba156275e48f5f7ebcbdd283ce8d41f9a1 | 4b91185ba9eec993c58e9ae770fd3d0e90f915ae | refs/heads/main | 2023-07-15T09:58:41.451631 | 2021-08-29T13:19:38 | 2021-08-29T13:19:38 | 390,312,509 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | from typing import List
from neomodel_constraints.constraint import ConstraintSet, TypeMapperAbstract
from .data import Neo4jConstraintQueryRecord
def convert_constraints_with_type_mapper(
raw: List[Neo4jConstraintQueryRecord],
type_mapper: TypeMapperAbstract
) -> ConstraintSet:
constraints = set()
for record in raw:
constraint_type = type_mapper.map(record.type_)
constraint = constraint_type.from_raw(record.dict())
constraints.add(constraint)
return ConstraintSet(constraints)
| [
"[email protected]"
] | |
74173ef5d6c8e8f1b2f1282a3ba50014aaf181af | 5db0fab37c2b8a618d85d3b60fab9f806c416474 | /src/python/pants/backend/experimental/terraform/lint/tfsec/register.py | da368f57c9b4d2296cb0466cd1219a8d9616b2f6 | [
"Apache-2.0"
] | permissive | pantsbuild/pants | 4988d1ac5474ec95f94ce2218aeb759401e4b011 | 98cbda8545f0d58c586ed2daa76fefd729d5e0d5 | refs/heads/main | 2023-09-05T03:44:17.646899 | 2023-09-01T19:52:09 | 2023-09-01T19:52:09 | 7,209,075 | 2,708 | 593 | Apache-2.0 | 2023-09-14T19:33:33 | 2012-12-17T17:39:04 | Python | UTF-8 | Python | false | false | 245 | py | # Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.terraform.lint.tfsec.rules import rules as tfsec_rules
def rules():
return tfsec_rules()
| [
"[email protected]"
] | |
e0d0bff373d69b9455fd52b2ddecf9431c15390d | e95eb3b5332ba010669f921fe6ac22f85837da2a | /examples/analysis/parse_demo.py | 1470b8acd579aa33a07b0bd3b49fffc8f89cffa2 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | LABSN/expyfun | a5998722f09bfb08e3167d6309ce0d5d534b8b18 | f324eb8c65afa9530698f15ca058700518355a8f | refs/heads/main | 2023-08-05T13:06:15.026909 | 2023-07-25T19:07:03 | 2023-07-25T19:07:03 | 11,614,571 | 13 | 19 | BSD-3-Clause | 2023-07-25T19:07:05 | 2013-07-23T17:28:02 | Python | UTF-8 | Python | false | false | 733 | py | # -*- coding: utf-8 -*-
"""
============
Parsing demo
============
This example shows some of the functionality of ``read_tab``.
"""
# Author: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import ast
from expyfun.io import read_tab
print(__doc__)
data = read_tab('sample.tab') # from simple_experiment
print('Number of trials: %s' % len(data))
keys = list(data[0].keys())
print('Data keys: %s\n' % keys)
for di, d in enumerate(data):
if d['trial_id'][0][0] == 'multi-tone':
print('Trial %s multi-tone' % (di + 1))
targs = ast.literal_eval(d['multi-tone trial'][0][0])
presses = [int(k[0]) for k in d['keypress']]
print(' Targs: %s\n Press: %s' % (targs, presses))
| [
"[email protected]"
] | |
5bbb358a632d9bba20e2078a0a95695607f33fff | 1a87d286396a2c6f6b6ac7c53495f80690836c7b | /LC/LC_testJustification.py | e1b9c5fe604fe74fbcb2713c10b062f9b244c481 | [] | no_license | kickbean/LeetCode | 14d33eea9dd70821114ca6d7e1a32111d4d64bf0 | 92e4de152e2aae297ef0e93c9eea61d7ad718f4e | refs/heads/master | 2016-09-10T14:38:33.692759 | 2014-04-08T00:26:51 | 2014-04-08T00:26:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,565 | py | '''
Given an array of words and a length L, format the text such that each line has exactly L characters and is fully (left and right) justified.
You should pack your words in a greedy approach; that is, pack as many words as you can in each line. Pad extra spaces ' ' when necessary so that each line has exactly L characters.
Extra spaces between words should be distributed as evenly as possible. If the number of spaces on a line do not divide evenly between words, the empty slots on the left will be assigned more spaces than the slots on the right.
For the last line of text, it should be left justified and no extra space is inserted between words.
For example,
words: ["This", "is", "an", "example", "of", "text", "justification."]
L: 16.
Return the formatted lines as:
[
"This is an",
"example of text",
"justification. "
]
Note: Each word is guaranteed not to exceed L in length.
click to show corner cases.
Corner Cases:
A line other than the last line might contain only one word. What should you do in this case?
In this case, that line should be left-justified.
Created on Feb 3, 2014
@author: Songfan
'''
def solution(words, L):
n = len(words)
if n == 0: return words
res = []
currWords = []
availableSpace = L
for wi in range(n):
w = words[wi]
wLen = len(w)
if wLen < availableSpace:
currWords.append(w)
availableSpace -= wLen + 1
else:
res.append(combineWords(currWords, L))
currWords = [w]
availableSpace = L - wLen - 1
if len(currWords):
res.append(w + ' ' * (L - wLen))
return res
def combineWords(words, L):
wordNum = len(words)
wordLen = 0
for w in words:
wordLen += len(w)
spaceNumTotal = L - wordLen
if wordNum == 1:
return words[0] + ' ' * spaceNumTotal
spaceNum = spaceNumTotal // (wordNum - 1)
additionalSpace = spaceNumTotal % (wordNum - 1)
res = ''
for wi in range(wordNum):
if wi == wordNum - 1:
res += words[wi]
elif additionalSpace > 0:
res += words[wi] + ' ' * (spaceNum + 1)
additionalSpace -= 1
else:
res += words[wi] + ' ' * spaceNum
return res
words = ["This", "is", "an", "example", "of", "text", "justification."]
L = 16
print solution(words, L)
words = ["This", "is", "an", "vervverycrazy", "example", "of", "text", "justification."]
L = 16
print solution(words, L)
| [
"[email protected]"
] | |
015f23d3858690ee7470909983c15dd848b5709a | 46f91363f5cc43b1644a7da93938aef3c0de29c5 | /leonardo/module/media/__init__.py | 233a0f5b0e426c65d5e8688c40baf9bf33e3e777 | [
"BSD-2-Clause"
] | permissive | shinichi81/django-leonardo | 55e1f7492813b8a877dac92aadb114785ea2eb83 | 152ad02ba23b8bc94f676a7221c15338181c67b7 | refs/heads/master | 2021-01-14T12:45:14.400206 | 2015-11-01T09:38:55 | 2015-11-01T09:38:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,222 | py |
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
from .widget import *
default_app_config = 'leonardo.module.media.MediaConfig'
class Default(object):
optgroup = 'Media'
@property
def apps(self):
return [
'leonardo.module',
'leonardo.module.media',
]
@property
def widgets(self):
return [
DownloadListWidget,
DownloadItemWidget,
InternetVideoWidget,
MediaGalleryWidget,
SimpleImageWidget,
VectorGraphicsWidget,
PdfDocumentWidget,
FlashObjectWidget,
]
plugins = [
('leonardo.module.media.apps.category_nested', 'List of directories'),
('leonardo.module.media.apps.category_simple', 'Simple list of directories'),
]
config = {
'MEDIA_PAGINATE_BY': (25, _('Pagination count for media files')),
'MEDIA_PUBLIC_UPLOAD_TO': ('public', _('Prefix for public files from MEDIA_ROOT')),
'MEDIA_PRIVATE_UPLOAD_TO': ('private', _('Prefix for private files from MEDIA_ROOT')),
'MEDIA_IS_PUBLIC_DEFAULT': (True, _('Set uploaded files to public automatically')),
'MEDIA_ENABLE_PERMISSIONS': (True, _(
'Permissions for downloadable items. Experimental feature.')),
'MEDIA_ALLOW_REGULAR_USERS_TO_ADD_ROOT_FOLDERS': (False, _('ALLOW_REGULAR_USERS_TO_ADD_ROOT_FOLDERS')),
'MEDIA_THUMB_SMALL_GEOM': ('64x64', _('MEDIA_THUMB_SMALL_GEOM')),
'MEDIA_THUMB_SMALL_OPT': ('', _('Another options for small thumnails')),
'MEDIA_THUMB_MEDIUM_GEOM': ('256x256', _('MEDIA_THUMB_MEDIUM_GEOM')),
'MEDIA_THUMB_MEDIUM_OPT': ('', _('Another options for medium thumnails')),
'MEDIA_THUMB_LARGE_GEOM': ('768x768', _('MEDIA_THUMB_LARGE_GEOM')),
'MEDIA_THUMB_LARGE_OPT': ('', _('Another options for large thumnails')),
'MEDIA_LOGICAL_STRUCTURE': (False, _('If is True all folders and files will has same path in the OS')),
}
page_actions = ['media/_actions.html']
class MediaConfig(AppConfig, Default):
name = 'leonardo.module.media'
verbose_name = "Media"
default = Default()
| [
"[email protected]"
] | |
0ea35b60098989cbad8bece1f505638fa7a685d2 | 01ed217a3c3c028e6cf4e3675cb86f4eef992e13 | /SimG4Core/PrintGeomInfo/test/python/runPrintSolid_cfg.py | bb9e7a06455f3f00c6cc1a434b1f718f2240c745 | [
"Apache-2.0"
] | permissive | dtp2-tpg-am/cmssw | ae318d154779c311e2e93cdffe0c7bc24d6d2593 | 7a32f48e079f78b501deee6cc9d19caba269e7fb | refs/heads/AM_12_0_2_dev | 2022-11-04T12:05:05.822865 | 2021-10-28T07:25:28 | 2021-10-28T07:25:28 | 185,209,257 | 2 | 1 | Apache-2.0 | 2022-04-26T07:18:06 | 2019-05-06T14:07:10 | C++ | UTF-8 | Python | false | false | 1,897 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Run3_cff import Run3
process = cms.Process('G4PrintGeometry',Run3)
process.load('Configuration.Geometry.GeometryExtended2021Reco_cff')
#from Configuration.Eras.Era_Run3_dd4hep_cff import Run3_dd4hep
#process = cms.Process('G4PrintGeometry',Run3_dd4hep)
#process.load('Configuration.Geometry.GeometryDD4hepExtended2021Reco_cff')
process.load('SimGeneral.HepPDTESSource.pdt_cfi')
process.load('IOMC.RandomEngine.IOMC_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedFlat_cfi')
process.load('GeneratorInterface.Core.generatorSmeared_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('SimG4Core.Application.g4SimHits_cfi')
process.load('SimG4Core.PrintGeomInfo.printGeomSolids_cff')
if hasattr(process,'MessageLogger'):
process.MessageLogger.G4cout=dict()
process.MessageLogger.G4cerr=dict()
process.MessageLogger.PrintGeom=dict()
process.source = cms.Source("EmptySource")
process.generator = cms.EDProducer("FlatRandomEGunProducer",
PGunParameters = cms.PSet(
PartID = cms.vint32(14),
MinEta = cms.double(-3.5),
MaxEta = cms.double(3.5),
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
MinE = cms.double(9.99),
MaxE = cms.double(10.01)
),
AddAntiParticle = cms.bool(False),
Verbosity = cms.untracked.int32(0),
firstRun = cms.untracked.uint32(1)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.g4SimHits.UseMagneticField = False
process.g4SimHits.Physics.type = 'SimG4Core/Physics/DummyPhysics'
process.g4SimHits.Physics.DummyEMPhysics = True
process.g4SimHits.Physics.DefaultCutValue = 10.
process.p1 = cms.Path(process.generator*process.VtxSmeared*process.generatorSmeared*process.g4SimHits*process.printGeomSolids)
| [
"[email protected]"
] | |
b829831b94ca8a1f3262021ef1aab5dcd77a1e7a | e57d7785276053332c633b57f6925c90ad660580 | /sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2019_08_01/aio/operations/_managed_clusters_operations.py | 56d3e44113621eb06dcba8abc584742b0bad79cf | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | adriananeci/azure-sdk-for-python | 0d560308497616a563b6afecbb494a88535da4c5 | b2bdfe659210998d6d479e73b133b6c51eb2c009 | refs/heads/main | 2023-08-18T11:12:21.271042 | 2021-09-10T18:48:44 | 2021-09-10T18:48:44 | 405,684,423 | 1 | 0 | MIT | 2021-09-12T15:51:51 | 2021-09-12T15:51:50 | null | UTF-8 | Python | false | false | 62,898 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ManagedClustersOperations:
"""ManagedClustersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ManagedClusterListResult"]:
"""Gets a list of managed clusters in the specified subscription.
Gets a list of managed clusters in the specified subscription. The operation returns properties
of each managed cluster.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2019_08_01.models.ManagedClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedClusterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedClusters'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ManagedClusterListResult"]:
"""Lists managed clusters in the specified subscription and resource group.
Lists managed clusters in the specified subscription and resource group. The operation returns
properties of each managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2019_08_01.models.ManagedClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedClusterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters'} # type: ignore
async def get_upgrade_profile(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.ManagedClusterUpgradeProfile":
"""Gets upgrade profile for a managed cluster.
Gets the details of the upgrade profile for a managed cluster with a specified resource group
and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterUpgradeProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_08_01.models.ManagedClusterUpgradeProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterUpgradeProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self.get_upgrade_profile.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterUpgradeProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_upgrade_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/upgradeProfiles/default'} # type: ignore
async def get_access_profile(
self,
resource_group_name: str,
resource_name: str,
role_name: str,
**kwargs: Any
) -> "_models.ManagedClusterAccessProfile":
"""Gets an access profile of a managed cluster.
Gets the accessProfile for the specified role name of the managed cluster with a specified
resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param role_name: The name of the role for managed cluster accessProfile resource.
:type role_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterAccessProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_08_01.models.ManagedClusterAccessProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterAccessProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self.get_access_profile.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'roleName': self._serialize.url("role_name", role_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterAccessProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_access_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/accessProfiles/{roleName}/listCredential'} # type: ignore
async def list_cluster_admin_credentials(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.CredentialResults":
"""Gets cluster admin credential of a managed cluster.
Gets cluster admin credential of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_08_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self.list_cluster_admin_credentials.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_admin_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterAdminCredential'} # type: ignore
async def list_cluster_user_credentials(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.CredentialResults":
"""Gets cluster user credential of a managed cluster.
Gets cluster user credential of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_08_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self.list_cluster_user_credentials.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_user_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterUserCredential'} # type: ignore
async def get(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.ManagedCluster":
"""Gets a managed cluster.
Gets the details of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedCluster, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_08_01.models.ManagedCluster
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedCluster",
**kwargs: Any
) -> "_models.ManagedCluster":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagedCluster')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedCluster",
**kwargs: Any
) -> AsyncLROPoller["_models.ManagedCluster"]:
"""Creates or updates a managed cluster.
Creates or updates a managed cluster with the specified configuration for agents and Kubernetes
version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Create or Update a Managed Cluster operation.
:type parameters: ~azure.mgmt.containerservice.v2019_08_01.models.ManagedCluster
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2019_08_01.models.ManagedCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.ManagedCluster":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.ManagedCluster"]:
"""Updates tags on a managed cluster.
Updates a managed cluster with the specified tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Update Managed Cluster Tags operation.
:type parameters: ~azure.mgmt.containerservice.v2019_08_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2019_08_01.models.ManagedCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a managed cluster.
Deletes the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def _reset_service_principal_profile_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterServicePrincipalProfile",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._reset_service_principal_profile_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagedClusterServicePrincipalProfile')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_service_principal_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile'} # type: ignore
async def begin_reset_service_principal_profile(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterServicePrincipalProfile",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Reset Service Principal Profile of a managed cluster.
Update the service principal Profile for a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Reset Service Principal Profile operation for a
Managed Cluster.
:type parameters: ~azure.mgmt.containerservice.v2019_08_01.models.ManagedClusterServicePrincipalProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reset_service_principal_profile_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_service_principal_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile'} # type: ignore
async def _reset_aad_profile_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterAADProfile",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._reset_aad_profile_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagedClusterAADProfile')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_aad_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile'} # type: ignore
async def begin_reset_aad_profile(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterAADProfile",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Reset AAD Profile of a managed cluster.
Update the AAD Profile for a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Reset AAD Profile operation for a Managed
Cluster.
:type parameters: ~azure.mgmt.containerservice.v2019_08_01.models.ManagedClusterAADProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reset_aad_profile_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_aad_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile'} # type: ignore
async def _rotate_cluster_certificates_initial(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self._rotate_cluster_certificates_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_rotate_cluster_certificates_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates'} # type: ignore
async def begin_rotate_cluster_certificates(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Rotate certificates of a managed cluster.
Rotate certificates of a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._rotate_cluster_certificates_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_rotate_cluster_certificates.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates'} # type: ignore
| [
"[email protected]"
] | |
1897d9ce65665335394d0b57ff2ccf5a2082d7f6 | 5f2608d4a06e96c3a032ddb66a6d7e160080b5b0 | /week6/homework_w6_q_c1.py | 406a821246f24f931111b8aadf5a01215a8e8aea | [] | no_license | sheikhusmanshakeel/statistical-mechanics-ens | f3e150030073f3ca106a072b4774502b02b8f1d0 | ba483dc9ba291cbd6cd757edf5fc2ae362ff3df7 | refs/heads/master | 2020-04-08T21:40:33.580142 | 2014-04-28T21:10:19 | 2014-04-28T21:10:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,542 | py | import math, random, pylab
def rho_free(x, y, beta):
return math.exp(-(x - y) ** 2 / (2.0 * beta))
def levy_free_path(xstart, xend, dtau, N):
x = [xstart]
for k in range(1, N):
dtau_prime = (N - k) * dtau
x_mean = (dtau_prime * x[k - 1] + dtau * xend) / (dtau + dtau_prime)
sigma = math.sqrt(1.0 / (1.0 / dtau + 1.0 / dtau_prime))
x.append(random.gauss(x_mean, sigma))
return x
beta = 20.0
N = 80
dtau = beta / N
n_steps = 100000
x = [0.0] * N
data = []
Weight_trott = lambda y: math.exp(sum(-a **2/ 2.0 * dtau for a in y))
for step in range(n_steps):
Ncut = random.randint(0, N-1)
# x_new = levy_free_path(x[0], x[0], dtau, N)
x_new = levy_free_path(x[0], x[Ncut], dtau, Ncut) + x[Ncut:]
if random.uniform(0, 1) < min(1, Weight_trott(x_new) / Weight_trott(x)):
x = x_new[:]
k = random.randint(0, N - 1)
data.append(x[k])
print len(data)
pylab.hist(data, bins=50, normed=True, label='QMC')
x_values = [0.1 * a for a in range (-30, 30)]
y_values = [math.sqrt(math.tanh(beta / 2.0)) / math.sqrt(math.pi) * \
math.exp( - xx **2 * math.tanh( beta / 2.0)) for xx in x_values]
pylab.plot(x_values, y_values, label='exact')
pylab.xlabel('$x$')
pylab.ylabel('$\\pi(x)$ (normalized)')
pylab.axis([-3.0, 3.0, 0.0, 0.8])
pylab.legend()
ProgType = 'Levy_free_path'
pylab.title(ProgType + ' beta = ' + str(beta) + ', dtau = ' + str(dtau) +
', Nsteps = '+ str(n_steps))
pylab.savefig(ProgType + str(beta) + '.png')
pylab.show()
| [
"[email protected]"
] | |
06ffea8d37e7baecbc877318ae07f0960176aa71 | 1255cedc3b8c486f07fb12b90b75b8773b4714be | /xnote/app/migrations/0002_auto_20210704_1851.py | ab7cafc76b864f0fe4f3aa7f3cbd0fcd44849f6c | [
"Apache-2.0"
] | permissive | sebastianczech/Xnote | 81c4cd00b2759037b2e538172ca70abdfba2740c | 6b6785f5d1db37322b74818aa355eddad3a7a8a9 | refs/heads/main | 2023-07-19T14:22:43.026363 | 2021-09-18T14:15:54 | 2021-09-18T14:15:54 | 376,524,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,100 | py | # Generated by Django 3.2.4 on 2021-07-04 18:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='walletaccount',
name='month',
field=models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=7),
),
migrations.AlterField(
model_name='walletcar',
name='month',
field=models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=7),
),
migrations.AlterField(
model_name='walletcredit',
name='month',
field=models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=7),
),
migrations.AlterField(
model_name='walletdeposit',
name='month',
field=models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=7),
),
migrations.AlterField(
model_name='walletexpense',
name='month',
field=models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=7),
),
migrations.AlterField(
model_name='wallethouse',
name='month',
field=models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=7),
),
migrations.AlterField(
model_name='walletincome',
name='month',
field=models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=7),
),
]
| [
"[email protected]"
] | |
eba0cd90799ab695a36c1fe7f44805e350c2d266 | 45da48ae0a87f4bb27409bfe2e947b29a2d4a0d0 | /znake/systest/data/fails/systest/tests/test_systest.py | b8cd8024868589412413ece7cb15171aecabc6bf | [
"Apache-2.0"
] | permissive | per-bohlin/opensourcelib | 3923165982ae1b2c78602a3485684ded75c28c36 | e48427fd0b5d87ea21484e85d2575c8b8879b9a3 | refs/heads/master | 2020-05-21T21:34:15.112527 | 2019-05-11T16:57:58 | 2019-05-11T16:57:58 | 186,156,987 | 0 | 0 | NOASSERTION | 2019-05-11T16:34:39 | 2019-05-11T16:34:39 | null | UTF-8 | Python | false | false | 37 | py | def test_systest():
assert False
| [
"[email protected]"
] | |
ef1c3842e4def65a489bb02d1b5e6ceffb8692bf | e56214188faae8ebfb36a463e34fc8324935b3c2 | /test/test_appliance_upgrade_ref.py | 1ae1a993fdadb83c88a412f85ab4532318492641 | [
"Apache-2.0"
] | permissive | CiscoUcs/intersight-python | 866d6c63e0cb8c33440771efd93541d679bb1ecc | a92fccb1c8df4332ba1f05a0e784efbb4f2efdc4 | refs/heads/master | 2021-11-07T12:54:41.888973 | 2021-10-25T16:15:50 | 2021-10-25T16:15:50 | 115,440,875 | 25 | 18 | Apache-2.0 | 2020-03-02T16:19:49 | 2017-12-26T17:14:03 | Python | UTF-8 | Python | false | false | 1,923 | py | # coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import intersight
from intersight.models.appliance_upgrade_ref import ApplianceUpgradeRef # noqa: E501
from intersight.rest import ApiException
class TestApplianceUpgradeRef(unittest.TestCase):
"""ApplianceUpgradeRef unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testApplianceUpgradeRef(self):
"""Test ApplianceUpgradeRef"""
# FIXME: construct object with mandatory attributes with example values
# model = intersight.models.appliance_upgrade_ref.ApplianceUpgradeRef() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
4d28d031c27a0637460b632a9b19cba410228c5b | ebe29aa1cc69cd4de540f1310086bac47f3bbc38 | /fakturo/billingstack/auth.py | 637df96d8fe8f8a7f0b3a14ac9b442e3569ba857 | [
"Apache-2.0"
] | permissive | billingstack/python-fakturo-billingstack | b352262adc5c7046c46ff464290abafd709e8049 | fb641b43ee0ab2a92aea64cc010c989bfbfe5436 | refs/heads/master | 2021-01-10T21:39:35.998727 | 2013-04-05T22:01:15 | 2013-04-05T22:01:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,879 | py | import logging
import simplejson as json
from requests.auth import AuthBase
from fakturo.core import client
LOG = logging.getLogger(__name__)
class AuthHelper(AuthBase, client.BaseClient):
def __init__(self, url, username=None, password=None,
account_name=None):
super(AuthHelper, self).__init__(url)
self.auth_info = {}
if not account_name:
raise ValueError('No account given.')
cred_info = {
'username': username,
'password': password,
'merchant': account_name
}
self.cred_info = cred_info
if self.cred_valid:
self.refresh_auth()
@property
def cred_valid(self):
c = self.cred_info
return True if c.get('username') and c.get('password') else False
def get_token_key(self, key):
"""
Return something from the token info, None if no key or no info is
there.
:param key: What to get
"""
token_info = self.auth_info.get('token')
return token_info.get('id') if token_info else token_info
@property
def token(self):
return self.get_token_key('id')
@property
def endpoint(self):
return self.auth_info.get('endpoint')
@property
def account(self):
return self.auth_info.get('merchant')
def __call__(self, request):
if not self.token and self.cred_valid:
self.refresh_auth()
request.headers['X-Auth-Token'] = self.token
return request
def refresh_auth(self):
auth_data = dict([(k, v) for k, v in self.cred_info.items() if v])
LOG.debug('Authenticating on URL %s CREDENTIALS %s' %
(self.url, auth_data))
response = self.post('/authenticate', data=json.dumps(auth_data))
self.auth_info.update(response.json)
| [
"[email protected]"
] | |
b0789b65346da9d46568ef7fc745efe52ce14c2c | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_rhetoricians.py | b82e3e2f934329cba730d00cb0c53fa56ef00f97 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py |
from xai.brain.wordbase.nouns._rhetorician import _RHETORICIAN
#calss header
class _RHETORICIANS(_RHETORICIAN, ):
def __init__(self,):
_RHETORICIAN.__init__(self)
self.name = "RHETORICIANS"
self.specie = 'nouns'
self.basic = "rhetorician"
self.jsondata = {}
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.