blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
24d5d233b19405e72bb7342861b126dfc737fb56 | 393a387cdb286cde75b4b7d760625d5851b6b080 | /range.py | 6a0fd7aece9c532af35818008db4fa6baffa1ce3 | [] | no_license | nami-h/Python | b57f12ae48d5bc17a3de72ec7c5abb5622ba0cd2 | 7b067950da29df705237405742564d2f127f1446 | refs/heads/master | 2021-06-27T16:00:10.113762 | 2020-09-22T19:59:05 | 2020-09-22T19:59:05 | 136,550,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | digits=range(10)
print(list(digits),"\n")
| [
"[email protected]"
] | |
54de617615d548ce9e728d250f3a9fc61f6bfc3f | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/kusto/azext_kusto/vendored_sdks/kusto/aio/operations_async/_database_operations_async.py | f8d3664d49bb035be6717ec0313a6e916c6735bd | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 41,803 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest,
AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DatabaseOperations:
"""DatabaseOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~kusto_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def check_name_availability(
self,
resource_group_name: str,
cluster_name: str,
name: str,
type: Union[str, "models.Type"],
**kwargs
) -> "models.CheckNameResult":
"""Checks that the database name is valid and is not already in use.
:param resource_group_name: The name of the resource group containing the Kusto cluster.
:type resource_group_name: str
:param cluster_name: The name of the Kusto cluster.
:type cluster_name: str
:param name: Resource name.
:type name: str
:param type: The type of resource, for instance Microsoft.Kusto/clusters/databases.
:type type: str or ~kusto_management_client.models.Type
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckNameResult, or the result of cls(response)
:rtype: ~kusto_management_client.models.CheckNameResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop(
'cls', None) # type: ClsType["models.CheckNameResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
resource_name = models.CheckNameRequest(name=name, type=type)
api_version = "2020-09-18"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.check_name_availability.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query(
"api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header(
"content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(resource_name, 'CheckNameRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(
url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code,
response=response, error_map=error_map)
raise HttpResponseError(
response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CheckNameResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_name_availability.metadata = {
'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/checkNameAvailability'} # type: ignore
def list_by_cluster(
self,
resource_group_name: str,
cluster_name: str,
**kwargs
) -> AsyncIterable["models.DatabaseListResult"]:
"""Returns the list of databases of the given Kusto cluster.
:param resource_group_name: The name of the resource group containing the Kusto cluster.
:type resource_group_name: str
:param cluster_name: The name of the Kusto cluster.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DatabaseListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~kusto_management_client.models.DatabaseListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop(
'cls', None) # type: ClsType["models.DatabaseListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-18"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.list_by_cluster.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query(
"api_version", api_version, 'str')
request = self._client.get(
url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(
url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize(
'DatabaseListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code,
response=response, error_map=error_map)
raise HttpResponseError(
response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_cluster.metadata = {
'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases'} # type: ignore
async def get(
self,
resource_group_name: str,
cluster_name: str,
database_name: str,
**kwargs
) -> "models.Database":
"""Returns a database.
:param resource_group_name: The name of the resource group containing the Kusto cluster.
:type resource_group_name: str
:param cluster_name: The name of the Kusto cluster.
:type cluster_name: str
:param database_name: The name of the database in the Kusto cluster.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Database, or the result of cls(response)
:rtype: ~kusto_management_client.models.Database
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.Database"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-18"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query(
"api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code,
response=response, error_map=error_map)
raise HttpResponseError(
response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Database', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
cluster_name: str,
database_name: str,
parameters: "models.Database",
**kwargs
) -> "models.Database":
cls = kwargs.pop('cls', None) # type: ClsType["models.Database"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-18"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query(
"api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header(
"content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'Database')
body_content_kwargs['content'] = body_content
request = self._client.put(
url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code,
response=response, error_map=error_map)
raise HttpResponseError(
response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Database', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Database', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Database', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {
'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
cluster_name: str,
database_name: str,
parameters: "models.Database",
**kwargs
) -> AsyncLROPoller["models.Database"]:
"""Creates or updates a database.
:param resource_group_name: The name of the resource group containing the Kusto cluster.
:type resource_group_name: str
:param cluster_name: The name of the Kusto cluster.
:type cluster_name: str
:param database_name: The name of the database in the Kusto cluster.
:type database_name: str
:param parameters: The database parameters supplied to the CreateOrUpdate operation.
:type parameters: ~kusto_management_client.models.Database
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Database or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~kusto_management_client.models.Database]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop(
'polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.Database"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token',
None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
database_name=database_name,
parameters=parameters,
cls=lambda x, y, z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Database', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False:
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {
'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
cluster_name: str,
database_name: str,
parameters: "models.Database",
**kwargs
) -> "models.Database":
cls = kwargs.pop('cls', None) # type: ClsType["models.Database"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-18"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query(
"api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header(
"content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'Database')
body_content_kwargs['content'] = body_content
request = self._client.patch(
url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code,
response=response, error_map=error_map)
raise HttpResponseError(
response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Database', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Database', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Database', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {
'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
cluster_name: str,
database_name: str,
parameters: "models.Database",
**kwargs
) -> AsyncLROPoller["models.Database"]:
"""Updates a database.
:param resource_group_name: The name of the resource group containing the Kusto cluster.
:type resource_group_name: str
:param cluster_name: The name of the Kusto cluster.
:type cluster_name: str
:param database_name: The name of the database in the Kusto cluster.
:type database_name: str
:param parameters: The database parameters supplied to the Update operation.
:type parameters: ~kusto_management_client.models.Database
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Database or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~kusto_management_client.models.Database]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop(
'polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.Database"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token',
None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
database_name=database_name,
parameters=parameters,
cls=lambda x, y, z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Database', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False:
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {
'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
cluster_name: str,
database_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-18"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query(
"api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code,
response=response, error_map=error_map)
raise HttpResponseError(
response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {
'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
cluster_name: str,
database_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the database with the given name.
:param resource_group_name: The name of the resource group containing the Kusto cluster.
:type resource_group_name: str
:param cluster_name: The name of the Kusto cluster.
:type cluster_name: str
:param database_name: The name of the database in the Kusto cluster.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop(
'polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token',
None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
database_name=database_name,
cls=lambda x, y, z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False:
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {
'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}'} # type: ignore
def list_principal(
self,
resource_group_name: str,
cluster_name: str,
database_name: str,
**kwargs
) -> AsyncIterable["models.DatabasePrincipalListResult"]:
"""Returns a list of database principals of the given Kusto cluster and database.
:param resource_group_name: The name of the resource group containing the Kusto cluster.
:type resource_group_name: str
:param cluster_name: The name of the Kusto cluster.
:type cluster_name: str
:param database_name: The name of the database in the Kusto cluster.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DatabasePrincipalListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~kusto_management_client.models.DatabasePrincipalListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop(
'cls', None) # type: ClsType["models.DatabasePrincipalListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-18"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.list_principal.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query(
"api_version", api_version, 'str')
request = self._client.post(
url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(
url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize(
'DatabasePrincipalListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code,
response=response, error_map=error_map)
raise HttpResponseError(
response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_principal.metadata = {
'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/listPrincipals'} # type: ignore
async def add_principal(
self,
resource_group_name: str,
cluster_name: str,
database_name: str,
value: Optional[List["models.DatabasePrincipal"]] = None,
**kwargs
) -> "models.DatabasePrincipalListResult":
"""Add Database principals permissions.
:param resource_group_name: The name of the resource group containing the Kusto cluster.
:type resource_group_name: str
:param cluster_name: The name of the Kusto cluster.
:type cluster_name: str
:param database_name: The name of the database in the Kusto cluster.
:type database_name: str
:param value: The list of Kusto database principals.
:type value: list[~kusto_management_client.models.DatabasePrincipal]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DatabasePrincipalListResult, or the result of cls(response)
:rtype: ~kusto_management_client.models.DatabasePrincipalListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop(
'cls', None) # type: ClsType["models.DatabasePrincipalListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
database_principals_to_add = models.DatabasePrincipalListRequest(
value=value)
api_version = "2020-09-18"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.add_principal.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query(
"api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header(
"content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(
database_principals_to_add, 'DatabasePrincipalListRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(
url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code,
response=response, error_map=error_map)
raise HttpResponseError(
response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize(
'DatabasePrincipalListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
add_principal.metadata = {
'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/addPrincipals'} # type: ignore
async def remove_principal(
self,
resource_group_name: str,
cluster_name: str,
database_name: str,
value: Optional[List["models.DatabasePrincipal"]] = None,
**kwargs
) -> "models.DatabasePrincipalListResult":
"""Remove Database principals permissions.
:param resource_group_name: The name of the resource group containing the Kusto cluster.
:type resource_group_name: str
:param cluster_name: The name of the Kusto cluster.
:type cluster_name: str
:param database_name: The name of the database in the Kusto cluster.
:type database_name: str
:param value: The list of Kusto database principals.
:type value: list[~kusto_management_client.models.DatabasePrincipal]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DatabasePrincipalListResult, or the result of cls(response)
:rtype: ~kusto_management_client.models.DatabasePrincipalListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop(
'cls', None) # type: ClsType["models.DatabasePrincipalListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
database_principals_to_remove = models.DatabasePrincipalListRequest(
value=value)
api_version = "2020-09-18"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.remove_principal.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query(
"api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header(
"content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(
database_principals_to_remove, 'DatabasePrincipalListRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(
url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code,
response=response, error_map=error_map)
raise HttpResponseError(
response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize(
'DatabasePrincipalListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
remove_principal.metadata = {
'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/removePrincipals'} # type: ignore
| [
"[email protected]"
] | |
08442036c4b07c98ed97b71bbee402abbcfc2004 | be9a56d49a308b5d70c57989d11c7e6207d9d349 | /pynext/stats.py | 45a94ac1e650e4253f9a9df750d0fddbc56f5793 | [] | no_license | jjgomezcadenas/pynextsw | ab7e9823f8eb12424084c849c7c099ac6a64351b | 84db6ce3eb2cac3567dce9950a35fbbe4027f0fd | refs/heads/master | 2020-12-14T14:38:48.940016 | 2020-02-10T17:39:51 | 2020-02-10T17:39:51 | 234,772,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,040 | py | import numpy as np
from typing import Tuple, List
from numpy import sqrt
NN = np.nan
from . pynext_types import Number, Array, Str, Range
def in_range(data, minval=-np.inf, maxval=np.inf):
"""
Find values in range [minval, maxval).
Parameters
---------
data : np.ndarray
Data set of arbitrary dimension.
minval : int or float, optional
Range minimum. Defaults to -inf.
maxval : int or float, optional
Range maximum. Defaults to +inf.
Returns
-------
selection : np.ndarray
Boolean array with the same dimension as the input. Contains True
for those values of data in the input range and False for the others.
"""
return (minval <= data) & (data < maxval)
def relative_error_ratio(a : float, sigma_a: float, b :float, sigma_b : float) ->float:
return sqrt((sigma_a / a)**2 + (sigma_b / b)**2)
def mean_and_std(x : np.array, range_ : Tuple[Number, Number])->Tuple[Number, Number]:
"""Computes mean and std for an array within a range: takes into account nans"""
mu = NN
std = NN
if np.count_nonzero(np.isnan(x)) == len(x): # all elements are nan
mu = NN
std = NN
elif np.count_nonzero(np.isnan(x)) > 0:
mu = np.nanmean(x)
std = np.nanstd(x)
else:
x = np.array(x)
if len(x) > 0:
y = x[in_range(x, *range_)]
if len(y) == 0:
print(f'warning, empty slice of x = {x} in range = {range_}')
print(f'returning mean and std of x = {x}')
y = x
mu = np.mean(y)
std = np.std(y)
return mu, std
def gaussian_experiment(nevt : Number = 1e+3,
mean : float = 100,
std : float = 10)->np.array:
Nevt = int(nevt)
e = np.random.normal(mean, std, Nevt)
return e
def gaussian_experiments(mexperiments : Number = 1000,
nsample : Number = 1000,
mean : float = 1e+4,
std : float = 100)->List[np.array]:
return [gaussian_experiment(nsample, mean, std) for i in range(mexperiments)]
def gaussian_experiments_variable_mean_and_std(mexperiments : Number = 1000,
nsample : Number = 100,
mean_range : Range =(100, 1000),
std_range : Range =(1, 50))->List[np.array]:
Nevt = int(mexperiments)
sample = int(nsample)
stds = np.random.uniform(low=std_range[0], high=std_range[1], size=sample)
means = np.random.uniform(low=mean_range[0], high=mean_range[1], size=sample)
exps = [gaussian_experiment(Nevt, mean, std) for mean in means for std in stds]
return means, stds, exps
def smear_e(e : np.array, std : float)->np.array:
return np.array([np.random.normal(x, std) for x in e])
| [
"[email protected]"
] | |
0ad83e8f3d57405b7257baea33455b48fb6456a6 | efb9647a0c0f8f80e5c25abacbb097e9d74dc042 | /hooks/push-git-commit-ectomy-gh-pages | 0a8f6f141f5c5c8113aa28c73a0de44df1effa49 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"Apache-2.0"
] | permissive | charlesreid1/b-captain-hook | 3bf38f24b9cd017e36f90e3481dd2780e553c8bc | 361f59c21a733a484f48e9bd60bce2d94dbf7b1b | refs/heads/master | 2020-03-17T06:00:49.865927 | 2019-07-13T06:06:30 | 2019-07-13T06:06:30 | 133,337,907 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,007 | #!/usr/bin/env python
from datetime import datetime
repo = "git-commit-ectomy"
org = "charlesreid1"
branch = "gh-pages"
action = 'push'
name = '%s'%(repo)
git_url = 'https://git.charlesreid1.com/%s/%s.git'%(org,repo)
logfile = '/tmp/{action}-{name}-{branch}.log'.format(action=action,
name=name,
branch=branch)
with open(logfile,'w') as f:
f.write("\n")
f.write("-"*40)
f.write(datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
f.write("\n")
import subprocess
import os
# ------------------
# pages
#
# for a hypothetical repo "project":
#
# .git dir: /www/pages.charlesreid1.com/git.project
# htdocs dir: /www/pages.charlesreid1.com/htdocs/project
root = '/www'
pages = 'pages.charlesreid1.com'
basedir = os.path.join(root,pages)
workdir = os.path.join(basedir,"htdocs",name)
gitdir = os.path.join(basedir,"git.%s"%(name))
if( os.path.isdir( gitdir )
and os.path.isdir( os.path.join(basedir,"htdocs")) ):
# pull
pullcmd = ["git","--git-dir=%s"%(gitdir),"--work-tree=%s"%(workdir),"pull","origin","gh-pages"]
f.write("About to run the command:\n")
f.write(" $ " + " ".join(pullcmd))
f.write("\n")
#subprocess.call(pullcmd)
p = subprocess.Popen(pullcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
f.write(p.stdout.readline())
f.write(p.stderr.readline())
else:
# clone
mkdircmd = ["mkdir","-p",basedir]
clonecmd = ["git","clone","--separate-git-dir=%s"%(gitdir),"-b","gh-pages",git_url,workdir]
f.write("About to run the command:\n")
f.write(" $ " + " ".join(clonecmd))
f.write("\n")
p = subprocess.Popen(clonecmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
f.write(p.stdout.readline())
f.write(p.stderr.readline())
| [
"[email protected]"
] | ||
e85ab084b65f72e9f5ae1f4ec9668097345df8dd | 13724823af94e5e5351ffa42ca896397f12f1f05 | /LaMachine-master/lamachine/lib/python3.5/site-packages/pynlpl/tests/FoLiA/foliatools/folia2columns.py | 6216095f2be4a7ef5cf5bb37d0e68be00b20ffe5 | [] | no_license | AymanYac/Neonec-Deep-Classsifier | 21e00cb0c5561f4ac22968f748ada0aa299e0a94 | a7978f434cc09d9e00a7df5d391bae77daf17637 | refs/heads/master | 2022-06-08T12:44:10.203386 | 2018-07-06T15:28:00 | 2018-07-06T15:28:00 | 139,996,406 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,807 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
from __future__ import print_function, unicode_literals, division, absolute_import
import getopt
import io
import sys
import os
import glob
try:
from pynlpl.formats import folia
except:
print("ERROR: pynlpl not found, please obtain PyNLPL from the Python Package Manager ($ sudo pip install pynlpl) or directly from github: $ git clone git://github.com/proycon/pynlpl.git", file=sys.stderr)
sys.exit(2)
def usage():
print("folia2columns", file=sys.stderr)
print(" by Maarten van Gompel (proycon)", file=sys.stderr)
print(" Centre for Language and Speech Technology, Radboud University Nijmegen",file=sys.stderr)
print(" 2016 - Licensed under GPLv3", file=sys.stderr)
print("", file=sys.stderr)
print("This conversion script reads a FoLiA XML document and produces a", file=sys.stderr)
print("simple columned output format in which each token appears on one", file=sys.stderr)
print("line. Note that only simple token annotations are supported and a lot", file=sys.stderr)
print("of FoLiA data can not be intuitively expressed in a simple columned format!", file=sys.stderr)
print("", file=sys.stderr)
print("Usage: folia2columns [options] -C [columns] file-or-dir1 file-or-dir2 ..etc..", file=sys.stderr)
print("Parameters:", file=sys.stderr)
print(" -c [columns] Comma separated list of desired column layout (mandatory), choose from:", file=sys.stderr)
print(" id - output word ID", file=sys.stderr)
print(" text - output the text of the word (the word itself)", file=sys.stderr)
print(" pos - output PoS annotation class", file=sys.stderr)
print(" poshead - output PoS annotation head feature", file=sys.stderr)
print(" lemma - output lemma annotation class", file=sys.stderr)
print(" sense - output sense annotation class", file=sys.stderr)
print(" phon - output phonetic annotation class", file=sys.stderr)
print(" senid - output sentence ID", file=sys.stderr)
print(" parid - output paragraph ID", file=sys.stderr)
print(" N - word/token number (absolute)", file=sys.stderr)
print(" n - word/token number (relative to sentence)", file=sys.stderr)
print("Options:", file=sys.stderr)
print(" --csv Output in CSV format", file=sys.stderr)
print(" -o [filename] Output to a single output file instead of stdout", file=sys.stderr)
print(" -O Output each file to similarly named file (.columns or .csv)", file=sys.stderr)
print(" -e [encoding] Output encoding (default: utf-8)", file=sys.stderr)
print(" -H Suppress header output", file=sys.stderr)
print(" -S Suppress sentence spacing (no whitespace between sentences)", file=sys.stderr)
print(" -x [sizeinchars] Space columns for human readability (instead of plain tab-separated columns)", file=sys.stderr)
print("Parameters for processing directories:", file=sys.stderr)
print(" -r Process recursively", file=sys.stderr)
print(" -E [extension] Set extension (default: xml)", file=sys.stderr)
print(" -O Output each file to similarly named .txt file", file=sys.stderr)
print(" -P Like -O, but outputs to current working directory", file=sys.stderr)
print(" -q Ignore errors", file=sys.stderr)
class settings:
output_header = True
csv = False
outputfile = None
sentencespacing = True
ignoreerrors = False
nicespacing = 0
autooutput = False
extension = 'xml'
recurse = False
encoding = 'utf-8'
columnconf = []
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "o:OPhHSc:x:E:rq", ["help", "csv"])
except getopt.GetoptError as err:
print(str(err), file=sys.stderr)
usage()
sys.exit(2)
outputfile = None
for o, a in opts:
if o == '-c':
for a in a.split(','):
settings.columnconf.append(a)
elif o == '-h':
usage()
sys.exit(0)
elif o == '-H':
settings.output_header = False
elif o == '-S':
settings.sentencespacing = False
elif o == '-e':
settings.encoding = a
elif o == '-o':
outputfile = a
elif o == '-O':
settings.autooutput = True
elif o == '-P':
settings.autooutput = True
settings.autooutput_cwd = True
elif o == '-x':
settings.nicespacing = int(a)
elif o == '-E':
settings.extension = a
elif o == '-r':
settings.recurse = True
elif o == '-q':
settings.ignoreerrors = True
elif o == '--csv':
settings.csv = True
else:
raise Exception("No such option: " + o)
if not settings.columnconf:
print("ERROR: No column configuration specified (use -c)", file=sys.stderr)
usage()
sys.exit(2)
if args:
if outputfile: outputfile = io.open(outputfile,'w',encoding=settings.encoding)
for x in args:
if os.path.isdir(x):
processdir(x,outputfile)
elif os.path.isfile(x):
process(x, outputfile)
else:
print("ERROR: File or directory not found: " + x, file=sys.stderr)
sys.exit(3)
if outputfile: outputfile.close()
else:
print ("ERROR: Nothing to do, specify one or more files or directories", file=sys.stderr)
def resize(s, i, spacing):
if len(s) >= spacing[i]:
s = s[0:spacing[i] - 1] + ' '
elif len(s) < spacing[i]:
s = s + (' ' * (spacing[i] - len(s)))
#print '[' + s + ']', len(s), spacing[i]
return s
def processdir(d, outputfile = None):
print("Searching in " + d, file=sys.stderr)
for f in glob.glob(os.path.join(d, '*')):
if f[-len(settings.extension) - 1:] == '.' + settings.extension:
process(f, outputfile)
elif settings.recurse and os.path.isdir(f):
processdir(f, outputfile)
def process(filename, outputfile=None):
try:
print("Processing " + filename, file=sys.stderr)
doc = folia.Document(file=filename)
prevsen = None
if settings.autooutput:
if settings.csv:
ext = '.csv'
else:
ext = '.columns'
if filename[-len(settings.extension) - 1:].lower() == '.' +settings.extension:
outfilename = filename[:-len(settings.extension) - 1] + ext
else:
outfilename += ext
if settings.autooutput_cwd:
outfilename = os.path.basename(outfilename)
print(" Saving as " + outfilename, file=sys.stderr)
outputfile = io.open(outfilename,'w',encoding=settings.encoding)
if settings.nicespacing:
spacing = []
for c in settings.columnconf:
if c == 'n':
spacing.append(3)
elif c == 'N':
spacing.append(7)
elif c == 'poshead':
spacing.append(5)
else:
spacing.append(settings.nicespacing)
if settings.output_header:
if settings.csv:
columns = [ '"' + x.upper() + '"' for x in settings.columnconf ]
else:
columns = [ x.upper() for x in settings.columnconf ]
if settings.nicespacing and not settings.csv:
columns = [ resize(x, i, spacing) for i, x in enumerate(settings.columnconf) ]
if settings.csv:
line = ','.join(columns)
else:
line = '\t'.join(columns)
if outputfile:
outputfile.write(line)
outputfile.write('\n')
else:
if sys.version < '3':
print(line.encode(settings.encoding))
else:
print(line)
wordnum = 0
for i, w in enumerate(doc.words()):
if w.sentence() != prevsen and i > 0:
if settings.sentencespacing:
if outputfile:
outputfile.write('\n')
else:
print()
wordnum = 0
prevsen = w.sentence()
wordnum += 1
columns = []
for c in settings.columnconf:
if c == 'id':
columns.append(w.id)
elif c == 'text':
columns.append(w.text())
elif c == 'n':
columns.append(str(wordnum))
elif c == 'N':
columns.append(str(i+1))
elif c == 'pos':
try:
columns.append(w.annotation(folia.PosAnnotation).cls)
except:
columns.append('-')
elif c == 'poshead':
try:
columns.append(w.annotation(folia.PosAnnotation).feat('head'))
except:
columns.append('-')
elif c == 'lemma':
try:
columns.append(w.annotation(folia.LemmaAnnotation).cls)
except:
columns.append('-')
elif c == 'sense':
try:
columns.append(w.annotation(folia.SenseAnnotation).cls)
except:
columns.append('-')
elif c == 'phon':
try:
columns.append(w.annotation(folia.PhonAnnotation).cls)
except:
columns.append('-')
elif c == 'senid':
columns.append(w.sentence().id)
elif c == 'parid':
try:
columns.append(w.paragraph().id)
except:
columns.append('-')
elif c:
print("ERROR: Unsupported configuration: " + c, file=sys.stderr)
sys.exit(1)
if settings.nicespacing and not settings.csv:
columns = [ resize(x,j, spacing) for j,x in enumerate(columns) ]
if settings.csv:
line = ",".join([ '"' + x + '"' for x in columns ])
else:
line = "\t".join(columns)
if outputfile:
outputfile.write(line)
outputfile.write('\n')
else:
if sys.version < '3':
print(line.encode(settings.encoding))
else:
print(line)
if settings.autooutput:
outputfile.close()
elif outputfile:
outputfile.flush()
except Exception as e:
if settings.ignoreerrors:
print("ERROR: An exception was raised whilst processing " + filename, e, file=sys.stderr)
else:
raise
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
32c3a1099738887f52f94cc6ce3f142833c8b14a | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=2.5_rd=1_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=68/params.py | 3d804381e49cd62d377929a2bbcf69c13ca14dea | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | {'cpus': 4,
'duration': 30,
'final_util': '2.537238',
'max_util': '2.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '1',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'GSN-EDF',
'trial': 68,
'utils': 'uni-medium-3'}
| [
"[email protected]"
] | |
2661e70feec5cebeefae3f67c6b64bbe35929ef3 | f700710ad4f7b776a2715c3bded94f6e763703b3 | /BucketConfig.py | 426a618961853f48fcbef5b45de3590a09d85638 | [
"Apache-2.0"
] | permissive | hasithadkr7/udp_150 | 445496d7d1eb316dd787a1fadafc70627cad9abb | b88e27cd254e12c97a4120e311d7269b1f7cf724 | refs/heads/master | 2020-03-09T11:05:13.655372 | 2018-04-26T06:51:25 | 2018-04-26T06:51:25 | 128,752,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | #!/bin/python
#Retrieving rain cell and men-ref files from google buckets.
# Common.
FILE_GEN_TIME = '18:00'
BUCKET_NAME = 'curwsl_nfs_1'
WRF_NODE = 'wrf0'
INITIAL_PATH_PREFIX = 'results/'
KEY_FILE_PATH = '/hec-hms/uwcc-admin.json'
# For Rain cell
RAIN_CELL_DIR = '/hec-hms/Raincell/'
WRF_RAINCELL_FILE_ZIP = 'RAINCELL_150m.zip'
WRF_RAIN_CELL_FILE = 'RAINCELL_150m.DAT'
RAIN_CELL_FILE = 'RAINCELL.DAT'
DEFAULT_DATE_SHIFT = 1
# For Mean-Rf
MEAN_REF_DIR = '/hec-hms/Meanref/'
MEAN_REF_FILE = 'kub_mean_rf.txt'
# For RF data
RF_DIR = '/hec-hms/Rainfall/'
RF_FILE = 'Norwood_stations_rf.txt'
# For RF data
SUB_REF_DIR = '/hec-hms/Subref/'
SUB_REF_FILE = 'klb_mean_rf.txt'
DEFAULT_DATE_SHIFT = 1
RF_FILE_SUFFIX = 'stations_rf.txt' | [
"[email protected]"
] | |
2d2d33b1f96726237fe2033b2cfd6180cb799052 | 74768f285874ee5d7606cde6efc21e291782996b | /web/dispatch/resource/dispatch.py | 7b2cc6650ae1e75a541e322212cc6912ee919c08 | [
"MIT"
] | permissive | marrow/web.dispatch.resource | 7354ec6b124b7c17744a810f5823c7856a2b6992 | 5f4e0a8ddbedba2390d9aaa0b8bf26292e8605f9 | refs/heads/master | 2023-01-24T11:26:03.140864 | 2016-09-26T15:02:39 | 2016-09-26T15:02:39 | 32,566,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,822 | py | # encoding: utf-8
import warnings
if __debug__:
from collections import deque
from functools import partial
from inspect import isclass, ismethod
from .exc import InvalidMethod
log = __import__('logging').getLogger(__name__)
def invalid_method(*args, **kw):
raise InvalidMethod()
class ResourceDispatch(object):
__slots__ = ()
def __repr__(self):
return "ResourceDispatch(0x{id})".format(id=id(self), self=self)
def __call__(self, context, obj, path):
verb = getattr(context, 'environ', context)['REQUEST_METHOD'].lower()
if __debug__:
if not isinstance(path, deque): # pragma: no cover
warnings.warn(
"Your code is not providing the path as a deque; this will be cast in development but"
"will explode gloriously if run in a production environment.",
RuntimeWarning, stacklevel=1
)
if isinstance(path, str):
path = deque(path.split('/')[1 if not path or path.startswith('/') else 0:])
else:
path = deque(path)
log.debug("Preparing resource dispatch. " + repr(obj), extra=dict(
dispatcher = repr(self),
context = repr(context),
obj = repr(obj),
path = list(path),
verb = verb,
))
if isclass(obj):
obj = obj(context, None, None)
yield None, obj, False # Announce class instantiation.
context.resource = obj
consumed = None
Resource = getattr(obj, '__resource__', None)
safe = {i for i in dir(obj) if i[0] != '_'} | {'options'}
if 'get' in safe: safe.add('head')
if 'collection' not in context:
context.collection = None
if 'response' in context:
context.response.allow = {i.upper() for i in safe if ismethod(getattr(obj, i, None)) or i in {'head', 'options'}}
if path and path[0] in safe:
consumed = attr = path.popleft()
attr = getattr(obj, attr, None)
if not attr and consumed in {'head', 'options'}:
attr = partial(getattr(self, consumed), obj)
if isclass(attr):
yield consumed, attr(context, obj, None), False
return
yield consumed, attr, True
return
if path and Resource:
context.collection = obj
try:
obj = Resource(context, obj, obj[path[0]])
except KeyError:
pass
else:
yield path.popleft(), obj, False
return
if verb and verb in safe:
obj = getattr(obj, verb, None)
if not obj and verb in {'head', 'options'}:
obj = partial(getattr(self, verb), obj)
yield None, obj, True
return
yield None, invalid_method, True
def head(self, obj, *args, **kw):
"""Allow the get method to set headers, but return no content.
This performs an internal GET and strips the body from the response.
"""
obj.get(*args, **kw)
return
def options(self, obj, *args, **kw):
"""The allowed methods are present in the returned headers."""
return None
| [
"[email protected]"
] | |
4e7c5cb3a9bddaad548a8537a1f053a9bb28304a | 3416464630bc3322dd677001811de1a6884c7dd0 | /dynamic_program/q121_bestTimeToBuyAndSellStock/dp_solution.py | e9bd3a005e1a0987481b0a013628cfb1511719b8 | [] | no_license | ttomchy/LeetCodeInAction | f10403189faa9fb21e6a952972d291dc04a01ff8 | 14a56b5eca8d292c823a028b196fe0c780a57e10 | refs/heads/master | 2023-03-29T22:10:04.324056 | 2021-03-25T13:37:01 | 2021-03-25T13:37:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
FileName: dp_solution.py
Description:
Author: Barry Chow
Date: 2020/10/19 10:52 PM
Version: 0.1
"""
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if len(prices) == 0:
return 0
max_profit = 0
min_price = prices[0]
for i in range(1, len(prices)):
if prices[i] > min_price:
if (prices[i] - min_price) > max_profit:
max_profit = prices[i] - min_price
else:
min_price = prices[i]
return max_profit
| [
"[email protected]"
] | |
5052bed389896a4f70a830f17f2280b6968dce56 | 38238f576b302835a285954711c62c69e65009c0 | /about_page/migrations/0007_auto_20201124_1313.py | 02fac6e66687346980f0c787624df864fb9ac062 | [] | no_license | iamdarshan7/New | ca107680c247fa94340bfc3937edc6dff7b8060e | 9075f50438e3f9911dd0b27d7c5e2806f25f4d3c | refs/heads/master | 2023-01-21T00:01:31.489631 | 2020-11-27T05:00:48 | 2020-11-27T05:00:48 | 316,404,056 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | # Generated by Django 2.2.17 on 2020-11-24 13:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('about_page', '0006_auto_20201124_0926'),
]
operations = [
migrations.CreateModel(
name='Teamsec1',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sec1_title', models.CharField(blank=True, max_length=100, null=True)),
('sec1_image', models.ImageField(upload_to='Images/')),
],
),
migrations.CreateModel(
name='Teamsec2',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sec2_title', models.CharField(blank=True, max_length=100, null=True)),
('sec2_name', models.CharField(blank=True, max_length=100, null=True)),
('sec2_image', models.ImageField(upload_to='Images/')),
],
),
migrations.AlterField(
model_name='aboutsec6',
name='sec6_title',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
| [
"[email protected]"
] | |
7c81a30415a63c9cc197660d8bea9891378f1cb6 | de712ec0d239fc36f7d7b4b11e9c0e6d6a45458b | /src/aptus/gui/help.py | d48d15c5dc912347cfb79a493963db3f7e5c2509 | [] | no_license | nedbat/aptus | b76f241df5aedc2dc92ffe1f6b6bfe222aca0810 | b58a914efa868ce85151ba8f0361912d77c3a2cb | refs/heads/master | 2023-08-20T03:40:57.405977 | 2023-08-08T14:13:26 | 2023-08-08T14:13:26 | 276,628,031 | 21 | 3 | null | null | null | null | UTF-8 | Python | false | false | 6,265 | py | """ Help dialog for Aptus.
"""
import webbrowser
import sys
import numpy
import wx
import wx.html2
import wx.lib.layoutf
from PIL import Image
from aptus import data_file, __version__
from aptus.options import AptusOptions
class HtmlDialog(wx.Dialog):
""" A simple dialog for displaying HTML, with clickable links that launch
a web browser, or change the page displayed in the dialog.
"""
def __init__(self, parent, caption, pages, subs=None,
pos=wx.DefaultPosition, size=(500,530),
style=wx.DEFAULT_DIALOG_STYLE):
wx.Dialog.__init__(self, parent, -1, caption, pos, size, style)
if pos == (-1, -1):
self.CenterOnScreen(wx.BOTH)
self.pages = pages
self.subs = subs or {}
self.html = wx.html2.WebView.New(self)
self.html.Bind(wx.html2.EVT_WEBVIEW_NAVIGATING, self.on_navigating)
ok = wx.Button(self, wx.ID_OK, "OK")
ok.SetDefault()
lc = wx.lib.layoutf.Layoutf('t=t#1;b=t5#2;l=l#1;r=r#1', (self,ok))
self.html.SetConstraints(lc)
self.set_page('interactive')
lc = wx.lib.layoutf.Layoutf('b=b5#1;r=r5#1;w!80;h*', (self,))
ok.SetConstraints(lc)
self.SetAutoLayout(1)
self.Layout()
def on_navigating(self, event):
url = event.GetURL()
if url == "":
event.Veto()
elif url.startswith(("http:", "https:")):
webbrowser.open(url)
event.Veto()
elif url.startswith('internal:'):
self.set_page(url.split(':')[1])
def set_page(self, pagename):
html = self.pages['head'] + self.pages[pagename]
html = html % self.subs
self.html.SetPage(html, "")
# The help text
is_mac = ('wxMac' in wx.PlatformInfo)
TERMS = {
'ctrl': 'cmd' if is_mac else 'ctrl',
'iconsrc': data_file('icon48.png'),
'version': __version__,
'python_version': sys.version,
'wx_version': wx.__version__,
'numpy_version': numpy.__version__,
'pil_version': Image.__version__,
}
HELP_PAGES = {
'head': """\
<style>
kbd {
display: inline-block;
background: #f0f0f0;
border: 2px solid #888;
border-color: #888 #333 #333 #888;
border-radius: .25em;
padding: .1em .25em;
margin: .1em;
}
</style>
<table width='100%%'>
<tr>
<td width='50' valign='top'><img src='%(iconsrc)s'/></td>
<td valign='top'>
<b>Aptus %(version)s</b>, Mandelbrot set explorer.<br>
Copyright 2007-2020, Ned Batchelder.<br>
<a href='https://nedbatchelder.com/code/aptus'>http://nedbatchelder.com/code/aptus</a>
</td>
</tr>
</table>
<p>
<a href='internal:interactive'>Interactive</a> |
<a href='internal:command'>Command line</a> |
<a href='internal:about'>About</a></p>
<hr>
""",
'interactive': """
<p><b>Interactive controls:</b></p>
<blockquote>
<kbd>a</kbd>: set the angle of rotation.<br>
<kbd>c</kbd>: toggle continuous coloring.<br>
<kbd>f</kbd>: toggle full-screen display.<br>
<kbd>h</kbd> or <kbd>?</kbd>: show this help.<br>
<kbd>i</kbd>: set the limit on iterations.<br>
<kbd>j</kbd>: jump among a few pre-determined locations.<br>
<kbd>n</kbd>: create a new window.<br>
<kbd>o</kbd>: open a saved settings or image file.<br>
<kbd>r</kbd>: redraw the current image.<br>
<kbd>s</kbd>: save the current image or settings.<br>
<kbd>w</kbd>: set the window size.<br>
<kbd><</kbd> or <kbd>></kbd>: switch to the next palette.<br>
<kbd>,</kbd> or <kbd>.</kbd>: cycle the current palette one color.<br>
<kbd>;</kbd> or <kbd>'</kbd>: stretch the palette colors (+%(ctrl)s: just a little), if continuous.<br>
<kbd>[</kbd> or <kbd>]</kbd>: adjust the hue of the palette (+%(ctrl)s: just a little).<br>
<kbd>{</kbd> or <kbd>}</kbd>: adjust the saturation of the palette (+%(ctrl)s: just a little).<br>
<kbd>0</kbd> (zero): reset all palette adjustments.<br>
<kbd>space</kbd>: drag mode: click to drag the image to a new position.<br>
<kbd>shift</kbd>: indicate a point of interest for Julia set and point info.<br>
<b>left-click</b>: zoom in (+%(ctrl)s: just a little).<br>
<b>right-click</b>: zoom out (+%(ctrl)s: just a little).<br>
<b>left-drag</b>: select a new rectangle to display.<br>
<b>middle-drag</b>: drag the image to a new position.<br>
</blockquote>
<p><b>Tool windows: press a key to toggle on and off:</b></p>
<blockquote>
<kbd>J</kbd> (shift-j): Show a Julia set for the current (shift-hovered) point.<br>
<kbd>l</kbd> (ell): Show zoom snapshots indicating the current position.<br>
<kbd>p</kbd>: Show a list of palettes that can be applied to the current view.<br>
<kbd>q</kbd>: Show point info for the current (shift-hovered) point.<br>
<kbd>v</kbd>: Show statistics for the latest calculation.
</blockquote>
""",
'command': """
<p>On the command line, use <tt><b>--help</b></tt> to see options:</p>
<pre>""" + AptusOptions(None).options_help() + "</pre>",
'about': """
<p>Built with
<a href='http://python.org'>Python</a>, <a href='http://wxpython.org'>wxPython</a>,
<a href='http://numpy.scipy.org/'>numpy</a>, and
<a href='http://www.pythonware.com/library/pil/handbook/index.htm'>PIL</a>.</p>
<p>Thanks to Rob McMullen and Paul Ollis for help with the drawing code.</p>
<hr>
<p>Installed versions:</p>
<p>
Aptus: %(version)s<br>
Python: %(python_version)s<br>
wx: %(wx_version)s<br>
numpy: %(numpy_version)s<br>
PIL: %(pil_version)s
</p>
""",
}
class HelpDlg(HtmlDialog):
""" The help dialog for Aptus.
"""
def __init__(self, parent):
HtmlDialog.__init__(self, parent, "Aptus", HELP_PAGES, subs=TERMS, size=(650,530))
| [
"[email protected]"
] | |
ed02247eb0a5e87b05a7dea03227101bca64ab60 | 71ed291b47017982a38524b4ff8fe94aa947cc55 | /String/LC389. Find the difference.py | 46b2982b623687adc315be5bab3e80fd64c6bc44 | [] | no_license | pingting420/LeetCode_Algorithms | da83b77e8f37bd4f461b0a7e59c804871b6151e5 | f8786864796027cf4a7a8b0ad76e0b516cd99b54 | refs/heads/main | 2023-07-17T22:46:08.803128 | 2021-09-02T22:06:38 | 2021-09-02T22:06:38 | 375,401,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | def findTheDifference(s,t):
s = Counter(s)
t = COunter(t)
for i in (t-s):
return i
def findTheDifference(s,t):
return list(Counter(t) - Counter(s))[0]
def findTheDifference(s,t):
for i in set(t):
if s.count(i) != t.count(i):
return i
| [
"[email protected]"
] | |
dd408d7bb7c75f2b873fb135914f22ae124a2df8 | 5d622c4b5df54f880f9476931ffb697afc63a9e2 | /src/runtime/workflows/guard_engine/guard_engine_wf.py | e1df46e5ac3932440dee2b15b1ee176242671edd | [] | no_license | anirudh458/final-lab-test | 96d27219387c91f7f7fd346899324dd672eb21cb | e9aab1cd5c82993941d605cfa4a045a8db01036f | refs/heads/master | 2021-06-16T19:06:41.569044 | 2017-05-31T19:22:59 | 2017-05-31T19:22:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,220 | py |
from runtime.components.guard.guard import Guard
from runtime.components.engine.engine import Guard
from runtime.emgrs.svem.svem import EntityMgr
from runtime.components import guard
from runtime.components import engine
class GuardSysWf():
def __init__(self):
em = EntityMgr()
guard = Guard(em)
engine = Engine(em)
# set up routes in the guard
guard.add_command_handler(Cmd.add_user, guard.add_user.add_user.AddUser.do)
guard.add_command_handler(Cmd.del_user, guard.del_user.del_user.DelUser.do)
guard.add_command_handler(Cmd.show_users, guard.show_users.show_users.ShowUsers.do)
# set up routes in the engine
engine.add_command_handler(Cmd.add_user, engine.add_user.add_user.AddUser.do)
engine.add_command_handler(Cmd.del_user, engine.del_user.del_user.DelUser.do)
engine.add_command_handler(Cmd.show_users, engine.show_users.show_users.ShowUsers.do)
self.em = em
self.guard = guard
self.engine = engine
def run(instr):
result = None
try:
# action same as instr
action = self.guard.do(instr)
result = self.sys.do(action)
except Exception as e:
result = e
finally:
return result
| [
"[email protected]"
] | |
44f7af07f42fb608cedc3c5e1f67676fcd65524f | ebc00ddf4c8c5f5076471e8b8d56c2b634c51230 | /test/functional/mempool_persist.py | 9e50e3a3ba0e5b22e31c19a362dc431b32c0d05b | [
"MIT"
] | permissive | BlockMechanic/rain | 584a9e245cfb7ab5fb1add97b699b86833bfbc5b | e8818b75240ff9277b0d14d38769378f05d0b525 | refs/heads/master | 2021-07-03T03:48:53.977665 | 2021-03-04T01:28:20 | 2021-03-04T01:28:20 | 228,412,343 | 0 | 0 | MIT | 2019-12-16T15:03:28 | 2019-12-16T15:03:27 | null | UTF-8 | Python | false | false | 6,556 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2020 The Rain Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool persistence.
By default, raind will dump mempool on shutdown and
then reload it on startup. This can be overridden with
the -persistmempool=0 command line option.
Test is as follows:
- start node0, node1 and node2. node1 has -persistmempool=0
- create 5 transactions on node2 to its own address. Note that these
are not sent to node0 or node1 addresses because we don't want
them to be saved in the wallet.
- check that node0 and node1 have 5 transactions in their mempools
- shutdown all nodes.
- startup node0. Verify that it still has 5 transactions
in its mempool. Shutdown node0. This tests that by default the
mempool is persistent.
- startup node1. Verify that its mempool is empty. Shutdown node1.
This tests that with -persistmempool=0, the mempool is not
dumped to disk when the node is shut down.
- Restart node0 with -persistmempool=0. Verify that its mempool is
empty. Shutdown node0. This tests that with -persistmempool=0,
the mempool is not loaded from disk on start up.
- Restart node0 with -persistmempool. Verify that it has 5
transactions in its mempool. This tests that -persistmempool=0
does not overwrite a previously valid mempool stored on disk.
- Remove node0 mempool.dat and verify savemempool RPC recreates it
and verify that node1 can load it and has 5 transactions in its
mempool.
- Verify that savemempool throws when the RPC is called if
node1 can't write to disk.
"""
from decimal import Decimal
import os
from test_framework.test_framework import RainTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, wait_until
class MempoolPersistTest(RainTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [[], ["-persistmempool=0"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
self.log.debug("Mine a single block to get out of IBD")
self.nodes[0].generate(1)
self.sync_all()
self.log.debug("Send 5 transactions from node2 (to its own address)")
for i in range(5):
last_txid = self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10"))
node2_balance = self.nodes[2].getbalance()
self.sync_all()
self.log.debug("Verify that node0 and node1 have 5 transactions in their mempools")
assert_equal(len(self.nodes[0].getrawmempool()), 5)
assert_equal(len(self.nodes[1].getrawmempool()), 5)
self.log.debug("Prioritize a transaction on node0")
fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
assert_equal(fees['base'], fees['modified'])
self.nodes[0].prioritisetransaction(txid=last_txid, fee_delta=1000)
fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified'])
self.log.debug("Stop-start the nodes. Verify that node0 has the transactions in its mempool and node1 does not. Verify that node2 calculates its balance correctly after loading wallet transactions.")
self.stop_nodes()
# Give this node a head-start, so we can be "extra-sure" that it didn't load anything later
# Also don't store the mempool, to keep the datadir clean
self.start_node(1, extra_args=["-persistmempool=0"])
self.start_node(0)
self.start_node(2)
wait_until(lambda: self.nodes[0].getmempoolinfo()["loaded"], timeout=1)
wait_until(lambda: self.nodes[2].getmempoolinfo()["loaded"], timeout=1)
assert_equal(len(self.nodes[0].getrawmempool()), 5)
assert_equal(len(self.nodes[2].getrawmempool()), 5)
# The others have loaded their mempool. If node_1 loaded anything, we'd probably notice by now:
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.log.debug('Verify prioritization is loaded correctly')
fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified'])
# Verify accounting of mempool transactions after restart is correct
self.nodes[2].syncwithvalidationinterfacequeue() # Flush mempool to wallet
assert_equal(node2_balance, self.nodes[2].getbalance())
self.log.debug("Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.")
self.stop_nodes()
self.start_node(0, extra_args=["-persistmempool=0"])
wait_until(lambda: self.nodes[0].getmempoolinfo()["loaded"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.log.debug("Stop-start node0. Verify that it has the transactions in its mempool.")
self.stop_nodes()
self.start_node(0)
wait_until(lambda: self.nodes[0].getmempoolinfo()["loaded"])
assert_equal(len(self.nodes[0].getrawmempool()), 5)
mempooldat0 = os.path.join(self.nodes[0].datadir, 'regtest', 'mempool.dat')
mempooldat1 = os.path.join(self.nodes[1].datadir, 'regtest', 'mempool.dat')
self.log.debug("Remove the mempool.dat file. Verify that savemempool to disk via RPC re-creates it")
os.remove(mempooldat0)
self.nodes[0].savemempool()
assert os.path.isfile(mempooldat0)
self.log.debug("Stop nodes, make node1 use mempool.dat from node0. Verify it has 5 transactions")
os.rename(mempooldat0, mempooldat1)
self.stop_nodes()
self.start_node(1, extra_args=[])
wait_until(lambda: self.nodes[1].getmempoolinfo()["loaded"])
assert_equal(len(self.nodes[1].getrawmempool()), 5)
self.log.debug("Prevent raind from writing mempool.dat to disk. Verify that `savemempool` fails")
# to test the exception we are creating a tmp folder called mempool.dat.new
# which is an implementation detail that could change and break this test
mempooldotnew1 = mempooldat1 + '.new'
os.mkdir(mempooldotnew1)
assert_raises_rpc_error(-1, "Unable to dump mempool to disk", self.nodes[1].savemempool)
os.rmdir(mempooldotnew1)
if __name__ == '__main__':
MempoolPersistTest().main()
| [
"[email protected]"
] | |
1362efbb6d53f3383cea29321ab304f0e370154a | 8dc6423cca2eb626b1f9ce76d576e95ac17181f8 | /news/migrations/0003_auto_20180919_1420.py | 0e3a984792ef8d8547e078b029e9116b1aab56dd | [] | no_license | codeSapience/django_news_app | 07d67c44105ee30626e740ec6c534d7d6cc07ee4 | 2a55d0503d128d88d4c7b03b766d68c9c99516a0 | refs/heads/master | 2020-03-29T01:49:12.729978 | 2018-10-05T00:00:15 | 2018-10-05T00:00:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-09-19 14:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0002_auto_20180919_1304'),
]
operations = [
migrations.AlterModelOptions(
name='news',
options={'ordering': ['-pub_date']},
),
migrations.AlterField(
model_name='news',
name='slug',
field=models.SlugField(blank=True, null=True),
),
]
| [
"[email protected]"
] | |
35123285e1569f4fd529e804592e81ec15765527 | 2a61b02c26e77686e38cd9039e6f4b0530ddb7c9 | /bitbots_navigation/bitbots_localization/src/bitbots_localization/localization_dsd/actions/initialize.py | 01deaa97243938ce01c27e519039c7f8aa8a5bea | [
"MIT"
] | permissive | fly-pigTH/bitbots_thmos_meta | 931413e86929751024013b8e35f87b799243e22c | f45ccc362dc689b69027be5b0d000d2a08580de4 | refs/heads/master | 2023-08-27T02:58:08.397650 | 2021-10-22T17:17:11 | 2021-10-22T17:17:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,154 | py | import rospy
from dynamic_stack_decider.abstract_action_element import AbstractActionElement
from bitbots_localization.srv import ResetFilter
class AbstractInitialize(AbstractActionElement):
def __init__(self, blackboard, dsd, parameters=None):
super(AbstractInitialize, self).__init__(blackboard, dsd, parameters=None)
self.called = False
self.last_service_call = 0
self.time_between_calls = 2 # [s]
self.first_perform = True
def perform(self, reevaluate=False):
raise NotImplementedError
class DoNothing(AbstractInitialize):
def perform(self, reevaluate=False):
rospy.logdebug("doing nothing")
return
class InitPose(AbstractInitialize):
def perform(self, reevaluate=False):
rospy.logdebug("initializing pose")
rospy.wait_for_service('reset_localization')
reset_filter_proxy = rospy.ServiceProxy('reset_localization', ResetFilter)
try:
resp = reset_filter_proxy(0, None, None)
return resp.success
except rospy.ServiceException as e:
rospy.logerr(f"Service call failed: {e}")
class InitLeftHalf(AbstractInitialize):
def perform(self, reevaluate=False):
rospy.logdebug("initializing left half")
rospy.wait_for_service('reset_localization')
reset_filter_proxy = rospy.ServiceProxy('reset_localization', ResetFilter)
try:
resp = reset_filter_proxy(1, None, None)
return resp.success
except rospy.ServiceException as e:
rospy.logerr(f"Service call failed: {e}")
class InitRightHalf(AbstractInitialize):
def perform(self, reevaluate=False):
rospy.logdebug("initializing right half")
rospy.wait_for_service('reset_localization')
reset_filter_proxy = rospy.ServiceProxy('reset_localization', ResetFilter)
try:
resp = reset_filter_proxy(2, None, None)
return resp.success
except rospy.ServiceException as e:
rospy.logerr(f"Service call failed: {e}")
class InitPosition(AbstractInitialize):
def perform(self, reevaluate=False):
self.do_not_reevaluate()
rospy.logdebug("initializing position")
rospy.wait_for_service('reset_localization')
reset_filter_proxy = rospy.ServiceProxy('reset_localization', ResetFilter)
try:
resp = reset_filter_proxy(
3,
self.blackboard.poseX,
self.blackboard.poseY)
except rospy.ServiceException as e:
rospy.logerr(f"Service call failed: {e}")
return self.pop()
class InitSide(AbstractInitialize):
def perform(self, reevaluate=False):
self.do_not_reevaluate()
rospy.logdebug("initializing on the side line of our half")
rospy.wait_for_service('reset_localization')
reset_filter_proxy = rospy.ServiceProxy('reset_localization', ResetFilter)
try:
resp = reset_filter_proxy(0, None, None)
except rospy.ServiceException as e:
rospy.logerr(f"Service call failed: {e}")
return self.pop()
| [
"[email protected]"
] | |
e9a5a4858ee18294253987862fd245e034788500 | 57c13a2500561e72e382489c23e9c0b8347be605 | /network_programming/chat_project/chat_server.py | bd32ca8fa7b9bb2c5a8bc51e0d00e53983448314 | [] | no_license | linheimx/python_master | 7403d7af639e31810c90b2fba14972a6d3dcfcec | 7fb7c467bedaff1515975807552a0ba05e30f15e | refs/heads/master | 2021-01-21T21:54:55.537994 | 2016-12-23T15:05:14 | 2016-12-23T15:05:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,058 | py | import sys
import socket
import select
HOST = ""
PORT = 9090
SOCKET_LIST = []
RECV_BUFFER = 4096
def chat_server():
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
server_socket.bind((HOST, PORT))
server_socket.listen(10)
SOCKET_LIST.append(server_socket)
print("Chat server started on port ", str(PORT))
while True:
ready_to_read, write_to_ready, in_error = select.select(SOCKET_LIST, [], [], 0)
for sock in ready_to_read:
if sock == server_socket:
# a enw connection request recieved
sockfd, addr = server_socket.accept()
SOCKET_LIST.append(sockfd)
print("Client {} connected".format(addr))
msg = "[{}] entered our chatting room".format(addr)
print(msg)
broadcast(server_socket, sockfd, msg)
else:
# process data recieved from client
try:
data = sock.recv(RECV_BUFFER)
if data:
msg = "[{}]{}".format(sock.getpeername(), data.decode("utf-8"))
print(msg)
broadcast(server_socket, sock, msg)
else:
if sock in SOCKET_LIST:
SOCKET_LIST.remove(sock)
msg = "Client offline\n"
broadcast(server_socket, sock, msg)
except:
msg = "Client offline\n"
broadcast(server_socket, sock, msg)
server_socket.close()
def broadcast(server_sock, sock, msg):
for s in SOCKET_LIST:
if s != server_sock and s != sock:
try:
s.send(msg.encode("utf-8"))
except Exception as e:
print(e)
SOCKET_LIST.remove(s)
s.close()
if __name__ == "__main__":
sys.exit(chat_server())
| [
"[email protected]"
] | |
7ce31686205d472fb1883b4327ca1d1dd6db0ec6 | d859e135cb2c7bc4b5d3c62c99c3ca49784b6ca3 | /linehaul/cli.py | c4e3fbce803f1a4da2f591f4440d478ca743b71a | [
"Apache-2.0"
] | permissive | reaperhulk/linehaul | c7dfe2de163d5062572b2fd1626c69d3fae592fd | 1058adfdedec3c75f5e4f32108ff727fcddd4d9d | refs/heads/master | 2021-01-13T03:18:07.795816 | 2016-07-01T12:31:36 | 2016-07-01T12:31:36 | 77,585,767 | 0 | 0 | null | 2016-12-29T05:37:23 | 2016-12-29T05:37:23 | null | UTF-8 | Python | false | false | 4,335 | py | #!/usr/bin/env python3.5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging.config
import click
import prometheus_client
import raven
from . import _tls as tls
from ._click import AsyncCommand
from ._server import Server
from .bigquery import BigQueryClient
from .core import Linehaul
__version__ = raven.fetch_package_version("linehaul")
@click.command(
cls=AsyncCommand,
context_settings={"auto_envvar_prefix": "LINEHAUL"},
)
@click.option("--bind", default="0.0.0.0")
@click.option("--port", type=int, default=512)
@click.option("--token")
@click.option("--account")
@click.option("--key", type=click.File("r"))
@click.option("--reuse-port/--no-reuse-port", default=True)
@click.option(
"--tls-ciphers",
default="ECDHE+CHACHA20:ECDH+AES128GCM:ECDH+AES128:!SHA:!aNULL:!eNULL",
)
@click.option(
"--tls-certificate",
type=click.Path(
exists=True,
dir_okay=False,
readable=True,
resolve_path=True,
),
)
@click.option("--metrics-port", type=int, default=12000)
@click.option("--sentry-dsn")
@click.option("--sentry-ua-dsn")
@click.option("--log-file")
@click.argument("table", envvar="BIGQUERY_TABLE")
@click.pass_context
async def main(ctx, bind, port, token, account, key, reuse_port, tls_ciphers,
tls_certificate, metrics_port, sentry_dsn, sentry_ua_dsn,
log_file, table):
# Configure logging
target_logger = "logfile" if log_file else "console"
logging.config.dictConfig({
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"console": {
"format": "[%(asctime)s][%(levelname)s] %(name)s "
"%(filename)s:%(funcName)s:%(lineno)d | %(message)s",
"datefmt": "%H:%M:%S",
},
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "console",
},
"logfile": {
"level": "DEBUG",
"class": "logging.handlers.WatchedFileHandler",
"formatter": "console",
"filename": log_file or "/dev/null",
},
"sentry": {
"level": "ERROR",
"class": "raven.handlers.logging.SentryHandler",
"dsn": sentry_dsn,
"release": __version__,
},
"ua_sentry": {
"level": "ERROR",
"class": "raven.handlers.logging.SentryHandler",
"dsn": sentry_ua_dsn,
"release": __version__,
},
},
"loggers": {
"": {
"handlers": [target_logger, "sentry"],
"level": "DEBUG",
"propagate": False,
},
"linehaul.user_agents": {
"handlers": [target_logger, "ua_sentry"],
"level": "DEBUG",
"propagate": False,
},
},
})
# Start up our metrics server in another thread.
prometheus_client.start_http_server(metrics_port)
bqc = BigQueryClient(*table.split(":"), client_id=account, key=key.read())
if tls_certificate is not None:
ssl_context = tls.create_context(tls_certificate, tls_ciphers)
else:
ssl_context = None
with Linehaul(token=token, bigquery=bqc, loop=ctx.event_loop) as lh:
async with Server(lh, bind, port,
reuse_port=reuse_port,
ssl=ssl_context,
loop=ctx.event_loop) as s:
try:
await s.wait_closed()
except asyncio.CancelledError:
click.echo(click.style("Shutting Down...", fg="yellow"))
| [
"[email protected]"
] | |
0b5ef3d75664e973d42db706c3d83768ccc1934e | 4f74e6d72b98cd1da2190313e4a7eb9d342cc93d | /environments/admin.py | 352cb786ec086a6c618fb44c7ec20f14f7ba1fc8 | [
"BSD-3-Clause",
"MIT"
] | permissive | adamgogogo/glitchtip-backend | ef0c529b71d5a4632a235b40a10e0b428a1cee3a | ee71d1b732d92868189d520aa111c09b116b7b22 | refs/heads/master | 2023-02-01T23:10:53.734450 | 2020-12-19T19:32:10 | 2020-12-19T19:32:10 | 323,588,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | from django.contrib import admin
from .models import Environment, EnvironmentProject
class EnvironmentAdmin(admin.ModelAdmin):
pass
admin.site.register(Environment, EnvironmentAdmin)
class EnvironmentProjectAdmin(admin.ModelAdmin):
pass
admin.site.register(EnvironmentProject, EnvironmentProjectAdmin)
| [
"[email protected]"
] | |
7f4adc0c433e1c8f76de5eb5b022daf9210bc848 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_1674486_0/Python/fantastication/diamond.py | 4d2de5d2b9dc57390ec5ef8ae812eb4b981604ce | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | def check(n, path):
#print "path to", n, ": ", path
parents = classes[n-1]
for parent in parents:
if parent == -1:
continue
if parent in path:
#print "FOUND", path
return True
else:
path.append(parent)
#print "adding", parent
for parent in parents:
if check(parent, path):
return True
return False
inp = open("input.txt", "r")
out = open("output.txt", "w")
num_cases = int(inp.readline())
for case in xrange(num_cases):
#print "\nnew case:", case+1
num_classes = int(inp.readline())
classes = []
for i in xrange(num_classes):
cl = [int(a) for a in inp.readline().split()]
if cl[0]:
classes.append(cl[1:])
else:
classes.append([])
#print "classes:", classes
hooray = False
for i in xrange(num_classes):
if check(i + 1, []):
hooray = True
break
if hooray:
out.write("Case #{0}: Yes\n".format(case+1))
else:
out.write("Case #{0}: No\n".format(case+1))
| [
"[email protected]"
] | |
f2776cf8026c189c2a2aa1e53aa4d94fd55e6f58 | c825ab84e533f4f306656a758ee469a27c5b232b | /mysite/settings.py | 1833cee2fa370af6a81c1ff217ece67f36bc030f | [] | no_license | wataru-ikeda/my-first-blog | e6244c54b509d0c3e6dd42c1c49f90b6ec93397a | d2f293a112ae9454006edd5647fc5c992673c0f9 | refs/heads/master | 2021-05-25T07:56:25.395351 | 2020-04-08T03:12:01 | 2020-04-08T03:12:01 | 253,728,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,200 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ekqh#8x!38-pkcd0%m1u4!32-l!mc^%o-@6*@@176$#1+)=ag0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static') | [
"[email protected]"
] | |
c2867740cb5f78444e1cd652e06ea817001d4ae3 | 07bdfcb29eda2048278dff96f7e2d3a52a199ece | /Backend Python/ExamSystemPython/examsystemapp/api/college.py | d67de71b0f26e4f0526d89cd1be488c466285a78 | [] | no_license | shreyassiddanagoudar/shreya | 5ac11e4ba0312b55c7a221d82b6c36cafcd291ab | c51bc363e74a300d97b6d78d4f5aee043762ac38 | refs/heads/master | 2023-07-15T09:38:19.653419 | 2021-08-28T09:48:09 | 2021-08-28T09:48:09 | 361,653,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,134 | py | """
Created By : <Auto generated code>
Created On :
Reviewed By :
Reviewed On :
Version :
"""
import json
from django.http import HttpRequest
from examsystemapp.api.base_controller import BaseController
from examsystemapp.models.college import CollegeModel
from examsystemapp.services.college_service import CollegeService
from examsystemapp.utils.constants.constants import DataTypes, HttpMethodType, AppConstants
from examsystemapp.utils.helpers.general_helper import IntHelper, FloatHelper
from examsystemapp.utils.helpers.request_helper import RequestConfig, ParamsObject
class College(BaseController):
def __init__(self, request):
BaseController.__init__(self, request)
def add(self, request: HttpRequest):
college_json = json.loads(request.POST.get("college_json"))
college_object: CollegeModel = CollegeModel()
# college_object.collegeid = college_json.get("collegeid")
college_object.universityid = college_json.get("universityid")
college_object.name = college_json.get("name")
college_object.code = college_json.get("code")
college_object.addr1 = college_json.get("addr1")
college_object.addr2 = college_json.get("addr2")
college_object.addr3 = college_json.get("addr3")
college_object.cityid = college_json.get("cityid")
college_object.stateid = college_json.get("stateid")
college_object.pincode = college_json.get("pincode")
college_object.phone = college_json.get("phone")
college_object.email = college_json.get("email")
college_object.logo = college_json.get("logo")
college_object.url = college_json.get("url")
college_service: CollegeService = CollegeService()
college_object = college_service.add(college_object)
return self.send_response(college_object)
def update(self, request: HttpRequest):
college_json = json.loads(request.POST.get("college_json"))
college_object: CollegeModel = CollegeModel()
college_object.collegeid = college_json.get("collegeid")
college_object.universityid = college_json.get("universityid")
college_object.name = college_json.get("name")
college_object.code = college_json.get("code")
college_object.addr1 = college_json.get("addr1")
college_object.addr2 = college_json.get("addr2")
college_object.addr3 = college_json.get("addr3")
college_object.cityid = college_json.get("cityid")
college_object.stateid = college_json.get("stateid")
college_object.pincode = college_json.get("pincode")
college_object.phone = college_json.get("phone")
college_object.email = college_json.get("email")
college_object.logo = college_json.get("logo")
college_object.url = college_json.get("url")
college_service: CollegeService = CollegeService()
college_object = college_service.update(college_object)
return self.send_response(college_object)
def delete(self, request: HttpRequest):
college_json = json.loads(request.POST.get("college_json"))
college_object: CollegeModel = CollegeModel()
college_object.collegeid = college_json.get("collegeid")
college_service: CollegeService = CollegeService()
college_object = college_service.delete(college_object)
return self.send_response(college_object)
def get(self, request: HttpRequest):
params = [
{"id": RequestConfig(from_session=False, nullable=False, datatype=DataTypes.INT)}
]
params: ParamsObject = self.convert_params(request, HttpMethodType.get, params)
college_service: CollegeService = CollegeService()
data = college_service.get(params)
return self.send_response(data)
def get_list(self, request: HttpRequest):
params = [
{"ids": RequestConfig(from_session=False, nullable=False, datatype=DataTypes.STRING, default='')}
]
params: ParamsObject = self.convert_params(request, HttpMethodType.get, params)
college_service: CollegeService = CollegeService()
data = college_service.get_list(params)
return self.send_response(data)
def get_object(self, request: HttpRequest):
params = []
params: ParamsObject = self.convert_params(request, HttpMethodType.get, params)
college_service: CollegeService = CollegeService()
data = college_service.get_object(params)
return self.send_response(data)
def get_list_object(self, request: HttpRequest):
params = []
params: ParamsObject = self.convert_params(request, HttpMethodType.get, params)
college_service: CollegeService = CollegeService()
data = college_service.get_list_object(params)
return self.send_response(data)
def get_list_object_page(self, request: HttpRequest):
params = [
{"CollegeName ": RequestConfig(from_session=False, nullable=True, datatype=DataTypes.STRING, default=None)},
{"Code": RequestConfig(from_session=False, nullable=True, datatype=DataTypes.INT, default=None)},
{"UniversityID ": RequestConfig(from_session=False, nullable=True, datatype=DataTypes.INT, default=None)},
{"StateID": RequestConfig(from_session=False, nullable=True, datatype=DataTypes.INT, default=None)},
{"CityID ": RequestConfig(from_session=False, nullable=True, datatype=DataTypes.INT, default=None)},
{"page_num": RequestConfig(from_session=False, nullable=True, datatype=DataTypes.INT, default=1)},
{"page_size": RequestConfig(from_session=False, nullable=True, datatype=DataTypes.INT, default=10)},
]
params: ParamsObject = self.convert_params(request, HttpMethodType.get, params)
college_service: CollegeService = CollegeService()
data = college_service.get_list_object_paginated(params)
return self.send_response(data) | [
"[email protected]"
] | |
4ef1d2aaba39ed43d41d7f9b2fb9488b5ee99223 | cf0f3f1bb02048d99be4e74254a4e48f4ca78ac6 | /0x1F-pascal_triangle/0-pascal_triangle.py | 0a8d2ae715fa29481d1397bef23064c44f83ae94 | [] | no_license | andreammgcol/holbertonschool-interview | 89277dc9aebb0f36d77b995b58f6d060c48692bc | 01bc3b29f44f8b76a56879b00bc77d2f9a919306 | refs/heads/master | 2023-07-20T13:23:31.172414 | 2021-08-26T04:42:55 | 2021-08-26T04:42:55 | 280,991,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | #!/usr/bin/python3
""" Pascal triangle """
def pascal_triangle(n):
""" Function that returns a list of lists of integers
representing the Pascal’s triangle of n """
triangle = []
if n <= 0:
return triangle
for i in range(1, (n + 1)):
sub = []
for j in range(i):
sub.append(1)
triangle.append(sub)
for i in range(len(triangle)):
for j in range(i):
if j != 0:
triangle[i][j] = triangle[i - 1][j] + triangle[i - 1][j - 1]
return triangle
| [
"[email protected]"
] | |
0ec96347624c3779acb977f9f453bee286bcb934 | 80d50ea48e10674b1b7d3f583a1c4b7d0b01200f | /examples/v2/incidents/DeleteIncident.py | a134c05b864f6bbffc44a61f18cc8ae6dcb87435 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] | permissive | DataDog/datadog-api-client-python | 3e01fa630278ad0b5c7005f08b7f61d07aa87345 | 392de360e7de659ee25e4a6753706820ca7c6a92 | refs/heads/master | 2023-09-01T20:32:37.718187 | 2023-09-01T14:42:04 | 2023-09-01T14:42:04 | 193,793,657 | 82 | 36 | Apache-2.0 | 2023-09-14T18:22:39 | 2019-06-25T22:52:04 | Python | UTF-8 | Python | false | false | 555 | py | """
Delete an existing incident returns "OK" response
"""
from os import environ
from datadog_api_client import ApiClient, Configuration
from datadog_api_client.v2.api.incidents_api import IncidentsApi
# there is a valid "incident" in the system
INCIDENT_DATA_ID = environ["INCIDENT_DATA_ID"]
configuration = Configuration()
configuration.unstable_operations["delete_incident"] = True
with ApiClient(configuration) as api_client:
api_instance = IncidentsApi(api_client)
api_instance.delete_incident(
incident_id=INCIDENT_DATA_ID,
)
| [
"[email protected]"
] | |
0ea01cbe7bd8170f064d5e9e92d39c5a5fe6765e | bdc12ac21a4c7b83a43258b46d6008c5f36a71e6 | /edmondderothschild/spiders/spider.py | 9d9460836280ea4016ffed933a6d247278938580 | [] | no_license | hristo-grudev/edmondderothschild | 068132e79dd176721b885f238f0cf342e7e57988 | da231da1f56c62760872f5bcc3a70aaae040add8 | refs/heads/main | 2023-03-24T06:17:20.193318 | 2021-03-26T08:49:17 | 2021-03-26T08:49:17 | 351,717,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,736 | py | import re
import scrapy
from scrapy.loader import ItemLoader
from scrapy.spiders import XMLFeedSpider
from w3lib.html import remove_tags
from ..items import EdmondderothschildItem
from itemloaders.processors import TakeFirst
import requests
import xmltodict
class EdmondderothschildSpider(XMLFeedSpider):
name = 'edmondderothschild'
start_urls = ['https://news.edmond-de-rothschild.com/api/ComNewsClient/News/GetAll?languageCode=fr&idPictureFormat=2&countryId=1&pageSize=999999&pageIndex=0&tags=undefined&businessId=undefined']
itertag = 'IdNewsContent'
def parse_node(self, response, node):
_id = node.xpath('//text()').get()
url = f'https://news.edmond-de-rothschild.com/api/ComNewsClient/News/GetByID?IdNews={_id}'
yield scrapy.Request(url, callback=self.parse_link)
def parse_link(self, response):
# data = scrapy.Selector(response, type='xml')
data = response.xpath('//*').get()
title = re.findall(r'<Title>(.*?)</Title>', data, re.DOTALL)[0]
date = re.findall(r'<PublishingDate>(.*?)</PublishingDate>', data, re.DOTALL)[0]
description = re.findall(r'<Content>(.*?)</Content>', data, re.DOTALL)[0]
dict_of_chars = {'#58;': ':', 'quot;': '"', '#160;': '', '<': '<', '>': '>', '&': '', 'bull;': '', 'acute;': '´', 'grave;': '`', 'rsquo;': '`', 'circ;': 'ˆ', 'nbsp;': ' '}
for char in dict_of_chars:
description = re.sub(rf'{char}', f'{dict_of_chars[char]}', description)
description = remove_tags(description)
print(description)
item = ItemLoader(item=EdmondderothschildItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('description', description)
item.add_value('date', date)
return item.load_item()
| [
"[email protected]"
] | |
632e2a617586abeeb950bd4e2968256a403701b3 | 4a014a10f3e144bc778149f8bf6f763581ece2b0 | /src/latexify/config.py | 88ab7e04d0614f1cb1c100563ad6a6c4acad5b3e | [
"Apache-2.0"
] | permissive | google/latexify_py | 394f4608e09e45ac6b8091d6734cf6bf06fa548d | b3ae7fa9483f7055e692bef8acc9c0ec8e91c51f | refs/heads/main | 2023-09-02T06:49:55.607974 | 2023-01-14T10:57:55 | 2023-01-14T10:57:55 | 282,443,202 | 5,202 | 265 | Apache-2.0 | 2023-01-14T10:57:56 | 2020-07-25T12:50:56 | Python | UTF-8 | Python | false | false | 2,790 | py | """Definition of the Config class."""
from __future__ import annotations
import dataclasses
from typing import Any
@dataclasses.dataclass(frozen=True)
class Config:
"""Configurations to control the behavior of latexify.
Attributes:
expand_functions: If set, the names of the functions to expand.
identifiers: If set, the mapping to replace identifier names in the
function. Keys are the original names of the identifiers,
and corresponding values are the replacements.
Both keys and values have to represent valid Python identifiers:
^[A-Za-z_][A-Za-z0-9_]*$
prefixes: Prefixes of identifiers to trim. E.g., if "foo.bar" in prefixes, all
identifiers with the form "foo.bar.suffix" will be replaced to "suffix"
reduce_assignments: If True, assignment statements are used to synthesize
the final expression.
use_math_symbols: Whether to convert identifiers with a math symbol surface
(e.g., "alpha") to the LaTeX symbol (e.g., "\\alpha").
use_set_symbols: Whether to use set symbols or not.
use_signature: Whether to add the function signature before the expression
or not.
"""
expand_functions: set[str] | None
identifiers: dict[str, str] | None
prefixes: set[str] | None
reduce_assignments: bool
use_math_symbols: bool
use_set_symbols: bool
use_signature: bool
def merge(self, *, config: Config | None = None, **kwargs) -> Config:
"""Merge configuration based on old configuration and field values.
Args:
config: If None, the merged one will merge defaults and field values,
instead of merging old configuration and field values.
**kwargs: Members to modify. This value precedes both self and config.
Returns:
A new Config object
"""
def merge_field(name: str) -> Any:
# Precedence: kwargs -> config -> self
arg = kwargs.get(name)
if arg is None:
if config is not None:
arg = getattr(config, name)
else:
arg = getattr(self, name)
return arg
return Config(**{f.name: merge_field(f.name) for f in dataclasses.fields(self)})
@staticmethod
def defaults() -> Config:
"""Generates a Config with default values.
Returns:
A new Config with default values
"""
return Config(
expand_functions=None,
identifiers=None,
prefixes=None,
reduce_assignments=False,
use_math_symbols=False,
use_set_symbols=False,
use_signature=True,
)
| [
"[email protected]"
] | |
623feb305b11da8bd2283836b273711652544b52 | f64e31cb76909a6f7fb592ad623e0a94deec25ae | /leetcode/p1710_maximum_units_on_a_truck.py | 8e89618e55fad5153cf50d1bc81468436154aa8e | [] | no_license | weak-head/leetcode | 365d635cb985e1d154985188f6728c18cab1f877 | 9a20e1835652f5e6c33ef5c238f622e81f84ca26 | refs/heads/main | 2023-05-11T14:19:58.205709 | 2023-05-05T20:57:13 | 2023-05-05T20:57:13 | 172,853,059 | 0 | 1 | null | 2022-12-09T05:22:32 | 2019-02-27T05:58:54 | Python | UTF-8 | Python | false | false | 460 | py | from typing import List
import heapq
def maximumUnits(boxTypes: List[List[int]], truckSize: int) -> int:
"""
Time: O(n)
Space: O(n)
n - number of boxes
"""
q = [(-v[1], v[0]) for v in boxTypes]
heapq.heapify(q)
max_units = 0
left_space = truckSize
while left_space > 0 and q:
units, cnt = heapq.heappop(q)
max_units += -units * min(cnt, left_space)
left_space -= cnt
return max_units
| [
"[email protected]"
] | |
3c3751974ba7fde06708dc6a41c2bedc1bc225c7 | 159aed4755e47623d0aa7b652e178296be5c9604 | /data/scripts/templates/object/tangible/lair/base/shared_poi_all_lair_bones_large_evil_fire_red.py | 0d8c6fc34668df5aa6df3b2b1c35b2da4808979c | [
"MIT"
] | permissive | anhstudios/swganh | fb67d42776864b1371e95f769f6864d0784061a3 | 41c519f6cdef5a1c68b369e760781652ece7fec9 | refs/heads/develop | 2020-12-24T16:15:31.813207 | 2016-03-08T03:54:32 | 2016-03-08T03:54:32 | 1,380,891 | 33 | 44 | null | 2016-03-08T03:54:32 | 2011-02-18T02:32:45 | Python | UTF-8 | Python | false | false | 464 | py | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/base/shared_poi_all_lair_bones_large_evil_fire_red.iff"
result.attribute_template_id = -1
result.stfName("lair_n","bones")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | [
"[email protected]"
] | |
2ed0e34a43fa2c125006f672440b3da4ab09d4ba | cba7110bb180886c22bb3cb844d7f9ff5efee428 | /petit_lisp.py | 6ab3ee8d08b510a5981f4dca6235640722ddfbbd | [
"CC0-1.0"
] | permissive | aroberge/lispy-experiments | d41df042e5737d7d99ac1f03a081e8ce5aed2585 | c54da34500e95150c2ef9c6057339525edf1e03f | refs/heads/master | 2021-01-19T21:51:58.194552 | 2015-02-25T21:41:55 | 2015-02-25T21:41:55 | 31,174,203 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,700 | py | '''New class based version
'''
import sys
from src.file_loader import FileLoader
from src.python_utils import python_fns
from src.parser import Parser
from src.repl import InteractiveInterpreter
loader = FileLoader()
STRINGS = {}
class Env(dict):
"An environment: a dict of {'var': val} pairs, with an outer Env."
def __init__(self, params=(), args=(), outer=None):
self.update(zip(params, args))
self.outer = outer
def find(self, var):
"Find the innermost Env where var appears."
if var in self:
return self
elif self.outer is not None:
return self.outer.find(var)
else:
raise ValueError("{} is not defined".format(var))
class Procedure(object):
"A user-defined procedure."
def __init__(self, params, body, env,
opt_param=False,
evaluate=None,
env_cls=None):
self.params, self.body, self.env = params, body, env
self.opt_param = opt_param
self.evaluate = evaluate
self.env_cls = env_cls
def __call__(self, *args):
if self.opt_param:
args = self.pack_args(args)
return self.evaluate(self.body, self.env_cls(self.params, args, self.env))
def pack_args(self, args):
'''ensures that any extra arguments are packed into a list'''
if len(args) < self.opt_param:
raise Exception("Not enough arguments supplied to procedure.")
elif len(args) == self.opt_param:
newargs = list(args)
newargs.append([])
return tuple(newargs)
elif ((len(args) > self.opt_param + 1) or
(not isinstance(args[self.opt_param], list))):
newargs = [arg for arg in args[:self.opt_param]]
newargs.append(list(args[self.opt_param:]))
return tuple(newargs)
else:
return args
@staticmethod
def set_docstring(obj, s):
'''Sets the docstring of an object; useful for user-defined procedures'''
# strings are stored with enclosing double quote characters
obj.__doc__ = s[1:-1]
class Lisp:
'''Grouping some basic lisp procedures into logical unit
The following static methods are invoked within a lisp program as:
(proc expr1 expr2 expr3 ...)
which we denote below as (proc exprs*). They are then evaluated
exps = [evaluate(exp, env) for exp in exprs*]
and dispatched to the relevant static method as
proc(*exps)
'''
@staticmethod
def begin(*expr):
'''(begin expr1 ... expr_last) ==> evaluates all and returns expr_last'''
return expr[-1]
@staticmethod
def is_atom(atom):
'''(atom? expr) ==> true if expr is not a list'''
return not isinstance(atom, list)
@staticmethod
def are_equal(val1, val2):
'''(eq? expr1 expr2) ==> true if both are atoms and equal'''
return (not isinstance(val1, list)) and (val1 == val2)
@staticmethod
def car(*expr):
'''(car (exp1 exp2 exp3 ...)) ==> exp1'''
return expr[0][0]
@staticmethod
def cdr(*expr):
'''(car (exp1 exp2 exp3 ...)) ==> (exp2 exp3 ...)'''
return list(expr[0][1:])
@staticmethod
def cons(*expr):
'''Usage (cons expr list) => (expr list) '''
if not isinstance(expr[1], list):
raise ValueError("Second argument of cons must be a list.")
return [expr[0]] + expr[1]
lisp_procs = {
'begin': Lisp.begin,
'atom?': Lisp.is_atom,
'eq?': Lisp.are_equal,
'car': Lisp.car,
'cdr': Lisp.cdr,
'cons': Lisp.cons
}
def display(s):
'''Prints a single string. Strings are enclosed between double quotes
and do not allow escaped double quote characters'''
print(s[1:-1]) # strings are stored with enclosing double quote characters
def common_env(env):
"Add some built-in procedures and variables to the environment."
env.update({
'__True__': True,
'__False__': False,
'_DEBUG': False,
'quit': exit,
'print': display,
'load': loader.load,
'set-docstring': Procedure.set_docstring
})
env.update(python_fns)
env.update(lisp_procs)
return env
exit.__doc__ = "Quits the repl."
global_env = common_env(Env())
def evaluate(x, env=None):
"Evaluate an expression in an environment."
if env is None:
env = global_env
if isinstance(x, str): # variable reference
if x in STRINGS:
return STRINGS[x]
return env.find(x)[x]
elif not isinstance(x, list): # constant literal
return x
elif x[0] == 'undefined?': # (undefined? x)
try:
_ = env.find(x[1])
return False # found ... so it is defined
except ValueError:
return True
elif x[0] == 'quote': # (quote exp), or 'exp
(_, exp) = x
return exp
elif x[0] == 'define': # (define var exp)
(_, var, exp) = x
env[var] = evaluate(exp, env)
elif x[0] == 'set!': # (set! var exp)
(_, var, exp) = x
env.find(var)[var] = evaluate(exp, env)
elif x[0] == 'lambda': # (lambda (params*) body)
(_, params, body) = x
opt_param = False
if '.' in params:
opt_param = params.index('.')
params.pop(opt_param)
return Procedure(params, body, env, opt_param, evaluate, Env)
elif x[0] == 'cond': # (cond (p1 e1) ... (pn en))
for (p, e) in x[1:]:
if evaluate(p, env):
return evaluate(e, env)
elif x[0] == 'if': # (if test if_true other)
(_, test, if_true, other) = x
return evaluate((if_true if evaluate(test, env) else other), env)
else: # ("procedure" exp*)
exps = [evaluate(exp, env) for exp in x]
procedure = exps.pop(0)
if (hasattr(procedure, '__kwdefaults__')
and procedure.__kwdefaults__ is not None
and "env" in procedure.__kwdefaults__):
if env is None:
env = global_env
return procedure(*exps, env=env)
else:
return procedure(*exps)
parse = Parser(STRINGS).parse
loader.evaluate = evaluate
loader.parse = parse
if __name__ == "__main__":
if len(sys.argv) > 1:
loader.load(sys.argv[1])
else:
loader.load("src/default_language.lisp")
interpreter = InteractiveInterpreter(evaluate, parse, global_env)
interpreter.start()
| [
"[email protected]"
] | |
e33f6c598c699f04928d5a390d5e4325a4948d24 | 9b4de05054f37a65dce49857fb6a809a370b23ca | /gd/migrations/0017_auto_20171223_1605.py | 97c3517179868639f2ab467e37bf45e031c80896 | [] | no_license | susahe/gis | f6b03b8f23abf7ca22c0069a4cdf603bfe879808 | 6b8d433cd5f672994ac138c1b656136425d0c345 | refs/heads/master | 2021-05-12T01:50:12.862559 | 2018-01-27T02:25:31 | 2018-01-27T02:25:31 | 117,569,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,530 | py | # Generated by Django 2.0 on 2017-12-23 16:05
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('gd', '0016_auto_20171223_1601'),
]
operations = [
migrations.AlterField(
model_name='gramasevadivision',
name='gs_end_date',
field=models.DateTimeField(blank=True, verbose_name='සේවය අවසන් කල දිනය '),
),
migrations.AlterField(
model_name='gramasevadivision',
name='gs_fname',
field=models.CharField(max_length=100, verbose_name='ග්\u200dරාමසේවක මහතාගේ මුල් නම'),
),
migrations.AlterField(
model_name='gramasevadivision',
name='gs_lname',
field=models.CharField(max_length=300, verbose_name='ග්\u200dරාමසේවක මහතාගේ වාසගම'),
),
migrations.AlterField(
model_name='gramasevadivision',
name='gs_oname',
field=models.CharField(max_length=300, verbose_name='ග්\u200dරාමසේවක මහතාගේ අනිකුත් නම්'),
),
migrations.AlterField(
model_name='gramasevadivision',
name='gs_start_date',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='සේවය පටන් ගත් දිනය'),
),
]
| [
"[email protected]"
] | |
54c6f8673ede0ff92aae2a33401611442277cef8 | c8e3ce59771a46723eb460dadc7136ce4337567b | /wordcloud_yelp_pos.py | df0e8b9012472da71ebf042b39ed6ff59d675dfb | [
"MIT"
] | permissive | elisetnp/stylistic-word-clouds | 658c4b5e4bcf903f670078d8d6ebd4a25224afd1 | 0ecc7fa5632cd21ed9b24ccad9e27448a15eed81 | refs/heads/master | 2022-01-18T17:18:51.055745 | 2016-05-08T04:46:00 | 2016-05-08T04:46:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,289 | py | import numpy as np
import csv
import random
from PIL import Image
from wordcloud import WordCloud, STOPWORDS
from palettable.colorbrewer.sequential import Greens_9
def color_func(word, font_size, position, orientation, random_state=None, **kwargs):
return tuple(Greens_9.colors[random.randint(2,8)])
csv_path = "yelp_words_by_stars_1gram.csv"
fa_path = "/Users/maxwoolf/Downloads/exported2048/"
font_path = "/Users/maxwoolf/Fonts/OpenSans-CondBold.ttf"
icon = "smile-o"
words_array = []
with open(csv_path, 'rb') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['stars'] is '5' and row['word'] not in STOPWORDS:
words_array.append((row['word'].upper(), float(row['count'])))
# http://stackoverflow.com/questions/7911451/pil-convert-png-or-gif-with-transparency-to-jpg-without
icon_path = fa_path + "%s.png" % icon
icon = Image.open(icon_path)
mask = Image.new("RGB", icon.size, (255,255,255))
mask.paste(icon,icon)
mask = np.array(mask)
wc = WordCloud(font_path=font_path, background_color="white", max_words=2000, mask=mask,
max_font_size=300, random_state=42)
# generate word cloud
wc.generate_from_frequencies(words_array)
wc.recolor(color_func=color_func, random_state=3)
wc.to_file("yelp_pos_wordcloud.png") | [
"[email protected]"
] | |
74db49958179e9efa98ebfc30bb65ded9c8eee31 | 08b0c27ce98495c0889d7b768ac7d2a97beff158 | /廖雪峰-python/廖雪峰-面向对象之多重继承.py | aa05ccb03ba657e9c34a5bba6dd5ff9567191adf | [] | no_license | jetli123/python_files | 7d3834b8e3f8b8dca5109c2d6aeb8d0fcdb852c3 | 7f5b787820cca2cf5820a1cdf3fed77e5185f04e | refs/heads/master | 2020-03-27T01:36:02.526294 | 2019-07-11T06:58:35 | 2019-07-11T06:58:35 | 145,726,196 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,165 | py | # -*- coding: utf-8 -*-
__author__ = 'JetLi'
"""继承是面向对象编程的一个重要的方式,因为通过继承,子类就可以扩
展父类的功能。"""
"""
Dog - 狗狗;
Bat - 蝙蝠;
Parrot - 鹦鹉;
Ostrich - 鸵鸟。
Mammal 哺乳类:能跑的哺乳类,能飞的哺乳类;
Bird 鸟类:能跑的鸟类,能飞的鸟类。
"""
# 采用多重继承。首先,主要的类层次仍按照哺乳类和鸟类设计
class Animal(object):
pass
class Mammal(Animal): # 大类
pass
class Bird(Animal): # 大类
pass
"""现在,我们要给动物再加上 Runnable 和 Flyable 的功能,只需要先定义
好 Runnable 和 Flyable 的类:"""
class RunnableMixin(object):
@staticmethod
def run():
print 'Running...'
class FlyableMixin(object):
@staticmethod
def fly():
print 'Flying...'
class Dog(Mammal, RunnableMixin): # 对于需要 Runnable 功能的动物,就多继承一个 Runnable,例如 Dog
pass
class Bat(Mammal, FlyableMixin): # 对于需要 Flyable 功能的动物,就多继承一个 Flyable,例如 Bat:
pass
class Parrot(Bird, FlyableMixin):
pass
class Ostrich(Bird, RunnableMixin):
pass
b = Bat()
b.fly()
c = Dog()
c.run()
d = Parrot()
d.fly()
e = Ostrich()
e.run()
"""如果需要“混入”额外的功能,通过多重继承就可以Python3 基础教程【完整版】 http://www.yeayee.com/
195/531
实现,比如,让 Ostrich 除了继承自 Bird 外,再同时继承 Runnable。这
种设计通常称之为 MixIn。"""
"""MixIn 的目的就是给一个类增加多个功能,这样,在设计类的时候,我
们优先考虑通过多重继承来组合多个 MixIn 的功能,而不是设计多层次
的复杂的继承关系"""
# 比如,编写一个多进程模式的 TCP 服务,定义如下:
class ForkingMixin(object):
pass
class TcpServer(object):
pass
class MyTCPServer(TcpServer, ForkingMixin):
pass
"""小结
由于 Python 允许使用多重继承,因此, MixIn 就是一种常见的设计。
只允许单一继承的语言(如 Java)不能使用 MixIn 的设计。""" | [
"[email protected]"
] | |
919ee4eab014fe4cd9950bf760b3ed95385fdfe2 | 79f42fd0de70f0fea931af610faeca3205fd54d4 | /base_lib/daemonize.py | bcabe9a83f6ac73cf1fcc28f101b0bb623286a53 | [] | no_license | fanwen390922198/ceph_pressure_test | a900a6dc20473ae3ff1241188ed012d22de2eace | b6a5b6d324e935915090e791d9722d921f659b26 | refs/heads/main | 2021-08-27T16:26:57.500359 | 2021-06-02T05:18:39 | 2021-06-02T05:18:39 | 115,672,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,682 | py | #!/usr/bin/python
# -*- coding:utf-8 -*-
# Copyright (C) 2018 - All Rights Reserved
# project: ceph_pressure_test
# file: daemonize.py
# time: 2019/9/5 14:05
# author: fanwen
# desc:
# !/usr/bin/env python
# coding: utf-8
import sys
import os
# 将当前进程fork为一个守护进程
# 注意:如果你的守护进程是由inetd启动的,不要这样做!inetd完成了
# 所有需要做的事情,包括重定向标准文件描述符,需要做的事情只有chdir()和umask()了
def daemonize(stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
# 重定向标准文件描述符(默认情况下定向到/dev/null)
try:
pid = os.fork()
# 父进程(会话组头领进程)退出,这意味着一个非会话组头领进程永远不能重新获得控制终端。
if pid > 0:
sys.exit(0) # 父进程退出
except OSError, e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
# 从母体环境脱离
os.chdir("/") # chdir确认进程不保持任何目录于使用状态,否则不能umount一个文件系统。也可以改变到对于守护程序运行重要的文件所在目录
os.umask(0) # 调用umask(0)以便拥有对于写的任何东西的完全控制,因为有时不知道继承了什么样的umask。
os.setsid() # setsid调用成功后,进程成为新的会话组长和新的进程组长,并与原来的登录会话和进程组脱离。
# 执行第二次fork
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # 第二个父进程退出
except OSError, e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
# 进程已经是守护进程了,重定向标准文件描述符
for f in sys.stdout, sys.stderr: f.flush()
si = open(stdin, 'r')
so = open(stdout, 'a+')
se = open(stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno()) # dup2函数原子化关闭和复制文件描述符
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# 示例函数:每秒打印一个数字和时间戳
def main():
import time
sys.stdout.write('Daemon started with pid %d\n' % os.getpid())
sys.stdout.write('Daemon stdout output\n')
sys.stderr.write('Daemon stderr output\n')
c = 0
while True:
sys.stdout.write('%d: %s\n' % (c, time.ctime()))
sys.stdout.flush()
c = c + 1
time.sleep(1)
if __name__ == "__main__":
daemonize('/dev/null', '/tmp/daemon_stdout.log', '/tmp/daemon_error.log')
main()
| [
"[email protected]"
] | |
e1af9eea7f4456e93b638d47785844b21ab0e873 | bb17c42217a6eaf5434878f86d43a3573fb4d201 | /caffe2/quantization/server/conv_groupwise_dnnlowp_op_test.py | badb115c15661bfc6b5d9c6c1ce020a9d3b70c4d | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | iotamudelta/pytorch | 6934cc48628cf3db6221bb27ca89a8a8f25826d5 | b710aee8c2ac2daa36e5143b00982b06746a4bf7 | refs/heads/master | 2021-06-05T22:33:12.622376 | 2019-01-04T17:18:39 | 2019-01-04T17:18:39 | 136,206,721 | 1 | 0 | NOASSERTION | 2018-12-17T23:16:59 | 2018-06-05T16:40:18 | C++ | UTF-8 | Python | false | false | 8,718 | py | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
from caffe2.python import core, dyndep
from caffe2.quantization.server import utils as dnnlowp_utils
from dnnlowp_test_utils import (
check_quantized_results_close,
generate_conv_inputs,
nchw2nhwc,
nhwc2nchw,
)
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
class GroupWiseDNNLowPOpConvTest(hu.HypothesisTestCase):
# correctness test with no quantization error in inputs
@given(
stride=st.integers(1, 2),
pad=st.integers(0, 2),
kernel=st.integers(1, 5),
dilation=st.integers(1, 2),
size=st.integers(10, 16),
group=st.integers(1, 4),
input_channels_per_group=st.sampled_from([2, 3, 4, 5, 8, 16, 32]),
output_channels_per_group=st.integers(2, 16),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW", "NHWC"]),
in_quantized=st.booleans(),
out_quantized=st.booleans(),
preserve_activation_sparsity=st.booleans(),
preserve_weight_sparsity=st.booleans(),
**hu.gcs_cpu_only
)
def test_groupwise_dnnlowp_conv_int(
self,
stride,
pad,
kernel,
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
in_quantized,
out_quantized,
preserve_activation_sparsity,
preserve_weight_sparsity,
gc,
dc,
):
if group > 1:
dilation = 1
X, W, b = generate_conv_inputs(
stride,
pad,
kernel,
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
groupwise_quantization=True,
preserve_activation_sparsity=preserve_activation_sparsity,
preserve_weight_sparsity=preserve_weight_sparsity,
)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine", "order"])
outputs = []
op_engine_list = [
("Conv", ""),
("Conv", "DNNLOWP"),
("Conv", "DNNLOWP_16"),
("Int8Conv", "DNNLOWP"),
]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine and in_quantized
do_dequantize = "DNNLOWP" in engine and out_quantized
if do_quantize:
quantize = core.CreateOperator(
"Quantize",
["X"],
["X_q"],
preserve_activation_sparsity=preserve_activation_sparsity,
engine=engine,
device_option=gc,
)
net.Proto().op.extend([quantize])
conv = core.CreateOperator(
op_type,
["X_q" if do_quantize else "X", "W", "b"],
["Y_q" if do_dequantize else "Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
dequantize_output=not do_dequantize,
preserve_activation_sparsity=preserve_activation_sparsity,
preserve_weight_sparsity=preserve_weight_sparsity,
engine=engine,
group=group,
quantize_groupwise=1,
device_option=gc,
)
if do_dequantize:
# groupwise quantization only works with static quantization
# so we need to set quantization parameters
dnnlowp_utils.add_quantization_param_args(
conv, outputs[0][0], preserve_activation_sparsity
)
net.Proto().op.extend([conv])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize",
["Y_q"],
["Y"],
preserve_activation_sparsity=preserve_activation_sparsity,
engine=engine,
device_option=gc,
)
net.Proto().op.extend([dequantize])
self.ws.create_blob("X").feed(X, device_option=gc)
self.ws.create_blob("W").feed(W, device_option=gc)
self.ws.create_blob("b").feed(b, device_option=gc)
self.ws.run(net)
Y = self.ws.blobs["Y"].fetch()
outputs.append(Output(Y=Y, op_type=op_type, engine=engine, order=order))
check_quantized_results_close(outputs, symmetric=preserve_activation_sparsity)
# correctness test with no quantization error in inputs
@given(
stride=st.integers(1, 2),
pad=st.integers(0, 2),
kernel=st.integers(1, 5),
dilation=st.integers(1, 2),
size=st.integers(10, 16),
group=st.integers(1, 4),
input_channels_per_group=st.sampled_from([2, 3, 4, 5, 8, 16, 32]),
output_channels_per_group=st.integers(2, 16),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW", "NHWC"]),
**hu.gcs_cpu_only
)
def test_groupwise_dnnlowp_conv_relu_int(
self,
stride,
pad,
kernel,
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
gc,
dc,
):
if group > 1:
dilation = 1
X, W, b = generate_conv_inputs(
stride,
pad,
kernel,
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
True, # group-wise
)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine", "order"])
outputs = []
op_engine_list = [
("Conv", ""),
("ConvRelu", "DNNLOWP"),
("ConvRelu", "DNNLOWP_16"),
("Int8ConvRelu", "DNNLOWP"),
]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
if "DNNLOWP" in engine:
quantize = core.CreateOperator(
"Quantize", ["X"], ["X_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize])
conv = core.CreateOperator(
op_type,
["X_q", "W", "b"],
["Y_q"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
group=group,
quantize_groupwise=1,
device_option=gc,
)
# groupwise quantization only works with static quantization
# so we need to set quantization parameters
dnnlowp_utils.add_quantization_param_args(conv, outputs[0][0])
net.Proto().op.extend([conv])
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
else:
conv = core.CreateOperator(
op_type,
["X", "W", "b"],
["Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
group=group,
device_option=gc,
)
net.Proto().op.extend([conv])
relu = core.CreateOperator(
"Relu", ["Y"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([relu])
self.ws.create_blob("X").feed(X, device_option=gc)
self.ws.create_blob("W").feed(W, device_option=gc)
self.ws.create_blob("b").feed(b, device_option=gc)
self.ws.run(net)
Y = self.ws.blobs["Y"].fetch()
outputs.append(Output(Y=Y, op_type=op_type, engine=engine, order=order))
check_quantized_results_close(outputs)
| [
"[email protected]"
] | |
85d021bf63d7f990e9182b73040daae662c6324f | 82e19f3738f47bc517fcb6dd1bf480117bdc8825 | /0x07-python-test_driven_development/5-text_indentation.py | 9457c9692e0add48c1f4ddabf3c780864188b0bb | [] | no_license | PierreBeaujuge/holbertonschool-higher_level_programming | ce6cfaf09fd0fefff8047c23320009ffae9f6e79 | a133bfd68e3ec1f9430d6c722dd96d13f117c8cf | refs/heads/master | 2021-07-08T00:10:20.568509 | 2020-11-14T20:26:56 | 2020-11-14T20:26:56 | 207,353,878 | 0 | 5 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | #!/usr/bin/python3
"""
Module that handles
printing
of a text
"""
def text_indentation(text):
"""
Function that prints a text with 2 new lines after ., ? and :
"""
new_text = ""
if not isinstance(text, str):
raise TypeError("text must be a string")
i = 0
while i < len(text):
if text[i] is '.' or text[i] is '?' or text[i] is ':':
new_text += text[i]
new_text += '\n\n'
if i < len(text) - 1 and (text[i + 1] == ' ' or
text[i + 1] == '\t'):
i += 1
while i < len(text) - 1 and (text[i + 1] == ' ' or
text[i + 1] == '\t'):
i += 1
else:
new_text += text[i]
i += 1
print(new_text, end='')
| [
"[email protected]"
] | |
c8e277f57fbd493bde3ddc1f1f68158067e231ac | e48375c39c0d1fc71742b1964dffdd3af0ff86c0 | /nlu/components/classifiers/token_bert_healthcare/token_bert_healthcare.py | dfcbf72f27c66728a4a68aba94343643ceb788d4 | [
"Apache-2.0"
] | permissive | ahmedlone127/nlu | b8da5a84f0e47640cb09616559bf8b84c259f278 | 614bc2ff94c80a7ebc34a78720ef29a1bf7080e0 | refs/heads/master | 2023-02-09T05:10:29.631583 | 2022-05-20T15:16:33 | 2022-05-20T15:16:33 | 325,437,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | class TokenBertHealthcare:
@staticmethod
def get_default_model():
from sparknlp_jsl.annotator import MedicalBertForTokenClassifier
return MedicalBertForTokenClassifier.pretrained() \
.setInputCols("sentence", "token") \
.setOutputCol("ner")
@staticmethod
def get_pretrained_model(name, language, bucket=None):
from sparknlp_jsl.annotator import MedicalBertForTokenClassifier
return MedicalBertForTokenClassifier.pretrained(name, language, bucket) \
.setInputCols("sentence", "token") \
.setOutputCol("ner")
| [
"[email protected]"
] | |
373e06129c3d09a2092239b0a9dd19e72f5ca703 | e8e9bab84754786e68e32ad5bba9a1f93dd36df1 | /python/rr.py | b851fac9403a587338b012a5479bea4efa44772f | [] | no_license | 0vermind/eddie | 98026246bca34dd9a67b91113cf03bce6743489d | 38f43fb296a916fde7721543b942a59fffb9e871 | refs/heads/master | 2021-05-10T00:23:39.336437 | 2018-01-24T12:26:31 | 2018-01-24T12:26:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,921 | py | """
Usage:
df = pd.read_csv("data.csv")
renko = Renko(df)
renko.brick_size = 2
bricks = renko.get_bricks()
print(bricks)
"""
import sys
import datetime as dt
import numpy as np
import pandas as pd
import nsepy
class Renko:
PERIOD_CLOSE = 1
PRICE_MOVEMENT = 2
TREND_CHANGE_DIFF = 2
brick_size = 1
chart_type = PERIOD_CLOSE
required_columns = {'open', 'high', 'low', 'close'}
def __init__(self, df):
self.df = df
self._validate_df()
self.rdf = df
self.bdf = None
def _validate_df(self):
if not self.required_columns.issubset(self.df.columns):
raise ValueError('DataFrame should have OHLC {} columns'.format(self.required_columns))
def get_bricks(self):
if self.chart_type == self.PERIOD_CLOSE:
self.period_close_bricks()
else:
self.price_movement_bricks()
return self.bdf
def period_close_bricks(self):
brick_size = self.brick_size
self.rdf = self.rdf[['date', 'close']]
self.rdf.loc[:, 'close_s1'] = self.rdf['close'] - self.rdf['close'].shift()
# self.rdf.dropna(inplace=True)
self.rdf.loc[:, 'close_r'] = (self.rdf['close'] // self.brick_size) * self.brick_size
self.rdf.loc[:, 'close_r_s1'] = (self.rdf['close_s1'] // self.brick_size) * self.brick_size
self.filter_noise()
bricks = self.rdf['bricks']
asign = np.sign(bricks)
self.rdf.loc[:, 'rtc'] = ((np.roll(asign, 1) - asign) != 0).astype(int)
self.rdf.loc[:, 'u_bricks'] = self.rdf.loc[self.rdf['rtc'] == 1, 'bricks']
self.rdf.loc[:, 'u_bricks'] = self.rdf['u_bricks'].apply(
lambda x: x - self.TREND_CHANGE_DIFF if x > 0 else x + self.TREND_CHANGE_DIFF
)
self.rdf.loc[self.rdf['rtc'] == 0, 'u_bricks'] = self.rdf['bricks']
self.rdf = self.rdf[['close_r', 'u_bricks', 'date']]
self.rdf = self.rdf[self.rdf['u_bricks'] != 0]
self.rdf.reset_index(inplace=True)
self.rdf.dropna(inplace=True)
self.calculate_bricks_from_diff()
self.shift_bricks()
def shift_bricks(self):
shift = self.df['close'].iloc[-1] - self.bdf['close'].iloc[-1]
if abs(shift) < self.brick_size:
return
step = shift // self.brick_size
self.bdf[['open', 'close']] += step * self.brick_size
def calculate_bricks_from_diff(self):
brick_size = self.brick_size
columns = ['open', 'close', 'date']
self.bdf = pd.DataFrame(
columns=columns,
data=[[0, 0, 0]],
)
prev_bricks = 1
cls = (self.df['close'].iloc[0] // brick_size) * brick_size
for index, row in self.rdf.iterrows():
bricks = row['u_bricks']
date = row['date']
data = []
for i in range(int(abs(bricks))):
if prev_bricks * bricks < 0 and i == 0 :
cls = cls + brick_size * (bricks / abs(bricks))
r = [
cls,
cls + (brick_size * (bricks / abs(bricks))),
date
]
data.append(r)
cls = r[1]
prev_bricks = bricks
# print(data)
sdf = pd.DataFrame(data=data, columns=columns)
self.bdf = pd.concat([self.bdf, sdf])
return self.bdf
def filter_noise(self):
df = self.rdf
brick_size = self.brick_size
df.loc[:, 'cr_diff'] = df['close_r'] - df['close_r'].shift()
df = df[df['cr_diff'] != 0]
df.loc[:, 'bricks'] = df.loc[:, ('cr_diff', )] / brick_size
df.loc[:, 'bricks_s1'] = df['bricks'].shift()
df.loc[:, 'tc'] = np.where((df['bricks'] * df['bricks_s1']) < 0, True, False)
while True:
df.loc[:, 'cr_diff'] = df['close_r'] - df['close_r'].shift()
df = df[df['cr_diff'] != 0]
df['bricks'] = df.loc[:, ('cr_diff', )] / brick_size
df['bricks_s1'] = df['bricks'].shift()
df['tc'] = np.where((df['bricks'] * df['bricks_s1']) < 0, True, False)
filtered_df = df[(~df['tc']) | ~(abs(df['bricks']) == 1)]
if len(df) == len(filtered_df):
break
df = filtered_df
self.rdf = df
if len(sys.argv) > 1:
fname = sys.argv[1]
print('Reading local file {}'.format(fname))
df = pd.read_csv(sys.argv[1])
else:
print('Downloading data from nsepy')
df = nsepy.get_history(
symbol='SBIN',
start=dt.date(2017,1,1),
end=dt.date(2018,1,19)
)
if df.empty:
print('No data is received from nsepy. Exiting...')
sys.exit()
df.reset_index(inplace=True)
df.columns = [i.lower() for i in df.columns]
renko = Renko(df)
renko.brick_size = 4
r = renko.get_bricks()
print(r.tail(20))
| [
"[email protected]"
] | |
baf9232934abe8f004c202e7807716b4d6876a09 | a5a943391577a3b7222533d335ec0eda6cc1bc33 | /src/robot/version.py | 3be9a14084a8b351caae9ad094fb248c1aa7bd42 | [
"Apache-2.0",
"CC-BY-3.0"
] | permissive | miktuy/robotframework | 2164aa67d779ec2a3511181bb7d01a7ad2bc45a9 | 15e11c63be0e7a4ce8401c7d47346a7dc8c81bf5 | refs/heads/master | 2023-01-31T11:19:18.816499 | 2023-01-18T17:34:13 | 2023-01-18T17:34:13 | 298,874,288 | 0 | 0 | Apache-2.0 | 2023-01-26T12:49:01 | 2020-09-26T18:20:30 | Python | UTF-8 | Python | false | false | 1,408 | py | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
# Version number typically updated by running `invoke set-version <version>`.
# Run `invoke --help set-version` or see tasks.py for details.
VERSION = '6.1.dev1'
def get_version(naked=False):
if naked:
return re.split('(a|b|rc|.dev)', VERSION)[0]
return VERSION
def get_full_version(program=None, naked=False):
version = '%s %s (%s %s on %s)' % (program or '',
get_version(naked),
get_interpreter(),
sys.version.split()[0],
sys.platform)
return version.strip()
def get_interpreter():
if 'PyPy' in sys.version:
return 'PyPy'
return 'Python'
| [
"[email protected]"
] | |
8b94b6f8a3cf469357b5616523eb1c8f2ac05b7e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03605/s512745938.py | cf053959fa1a5cb30cf04db0828a77316527a481 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | def main():
print('Yes' if '9' in input() else 'No')
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
a816b9eb7cb4c15572bcc2fde5429a9a3fef50b8 | 2827d7a837eb29c3cb07793ab6d3d5a753e18669 | /alipay/aop/api/request/AlipayOfflineMarketProductBatchqueryRequest.py | 5b6524909b5e0b93c45834d283793cf3e6e236ce | [
"Apache-2.0"
] | permissive | shaobenbin/alipay-sdk-python | 22e809b8f5096bec57d2bb25414f64bdc87fa8b3 | 5232ad74dff2e8a6e0e7646ab3318feefa07a37d | refs/heads/master | 2020-03-21T04:51:39.935692 | 2018-06-21T07:03:31 | 2018-06-21T07:03:31 | 138,131,022 | 0 | 0 | null | 2018-06-21T06:50:24 | 2018-06-21T06:50:24 | null | UTF-8 | Python | false | false | 4,015 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayOfflineMarketProductBatchqueryModel import AlipayOfflineMarketProductBatchqueryModel
class AlipayOfflineMarketProductBatchqueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayOfflineMarketProductBatchqueryModel):
self._biz_content = value
else:
self._biz_content = AlipayOfflineMarketProductBatchqueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._notify_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.offline.market.product.batchquery'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"[email protected]"
] | |
cdf830ae3e0835a12624bfd2c5ef4c83e633a614 | 313afbb1ea19f2266571870c7461f2591e30ea7b | /src/lab/lab01/z_7_macierze.py | e11a427c91a7ab0da9a9a38c400254c006be1ae9 | [] | no_license | tborzyszkowski/LogikaProgramowania | 89201a76ddc60692ffccaf7c4c7b17d4e1e2c0e8 | cceb161a6ff5933d2acc31f15879cafcf03b285b | refs/heads/master | 2022-06-23T18:41:29.460094 | 2022-06-17T16:18:29 | 2022-06-17T16:18:29 | 250,095,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | from random import random
N = 2
m1 = [
[(1 if i == j else 0) for j in range(N)]
for i in range(N)
]
m2 = [[int(random() * 100) for y in range(N)] for x in range(N)]
print("m1:")
for row in m1:
print(row)
print("\n-----------\n")
print("m2:")
for row in m2:
print(row)
print("\n-----------\n")
wynik = [[0 for y in range(N)] for x in range(N)]
for i in range(N):
for j in range(N):
for k in range(N):
wynik[i][j] += m1[i][k] * m2[k][j]
for row in wynik:
print(row)
| [
"[email protected]"
] | |
2e80e028db2b748647fa57e8c02cd4029b2f9c93 | 27b3c7f8e144a3f6f4699e49d7df85c2918b3b23 | /customlogger/custom_logger.py | a00a7935d53ac335c2646f71ecfd4cfe3e79646e | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | deresmos/customlogger | ddf0aabba7435816a5e160474e3545758cc078a4 | 6f6bbda8caeaa27a15d4981732ac7e57a525ea6f | refs/heads/master | 2021-07-18T21:50:54.808304 | 2018-06-28T06:08:01 | 2018-06-28T06:08:01 | 98,201,131 | 1 | 1 | MIT | 2018-06-05T14:55:59 | 2017-07-24T14:40:55 | Python | UTF-8 | Python | false | false | 5,725 | py | # imports {{{1
import logging
import os
from os.path import expanduser
from colorlog import ColoredFormatter
from customlogger.only_filter import OnlyFilter
from customlogger.run_rotating_handler import RunRotatingHandler
# }}}
class CustomLogger:
# class variable {{{1
NOTSET = logging.NOTSET
DEBUG = logging.DEBUG
INFO = logging.INFO
WARNING = logging.WARNING
ERROR = logging.ERROR
CRITICAL = logging.CRITICAL
allLogFileName = 'all.log'
logDirPath = './log'
streamLevel = WARNING
fileLevel = DEBUG
isSaveLog = False
isColorLog = True
backupCount = 5
fileLogFmt = '%(asctime)s %(levelname)s %(filename)s %(name)s ' \
'%(lineno)s "%(message)s"'
streamLogFmt = '%(levelname)-8s %(message)s'
streamDebugLogFmt = '[%(levelname)s: File "%(filename)s", ' \
'line %(lineno)s, in %(funcName)s] "%(message)s"'
streamColorLogFmt = '%(log_color)s%(levelname)-8s%(reset)s %(message)s'
streamColorDebugLogFmt = '[%(log_color)s%(levelname)s%(reset)s: ' \
'File "%(filename)s", line %(lineno)s, in %(funcName)s] "%(message)s"'
dateFmt = '%Y-%m-%d %a %H:%M:%S'
logColors = {
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white',
}
# class methods {{{1
# debugMode {{{2
@classmethod
def debugMode(cls):
cls.streamLevel = CustomLogger.DEBUG
# property {{{1
@property
def logger(self):
if not self.__logger.handlers or self.__isFirstInitLogger:
self.setLogger()
return self.__logger
# private functions {{{1
def __init__( # {{{2
self, parent=None, logger_name=None, is_default=True):
name = parent or self
name = logger_name or type(name).__name__
logger = logging.getLogger(name)
self.__logger = logger
self.isDefault = is_default
self.__isFirstInitLogger = True
if self.__logger.handlers:
self.__isFirstInitLogger = False
@staticmethod # __createLogDir {{{2
def __createLogDir(path):
path = expanduser(path)
if os.path.isdir(path):
return
os.mkdir(path)
print('Create log directory. ({})'.format(os.path.abspath(path)))
# public functions {{{1
def setLogger(self): # {{{2
if self.isDefault:
self.defaultLoggerSetting()
def defaultLoggerSetting(self): # {{{2
self.__logger.setLevel(CustomLogger.DEBUG)
if self.isColorLog:
if self.streamLevel <= self.DEBUG:
fmt = self.streamColorDebugLogFmt
else:
fmt = self.streamColorLogFmt
self.addStreamColorHandler(self.streamLevel, fmt=fmt)
else:
if self.streamLevel <= self.DEBUG:
fmt = self.streamDebugLogFmt
else:
fmt = self.streamLogFmt
self.addStreamHandler(self.streamLevel, fmt=fmt)
self.addStreamHandler(
CustomLogger.INFO, is_only=True, check_level=True)
if self.isSaveLog:
self.__createLogDir(self.logDirPath)
self.addFileHandler(self.fileLevel)
self.addRunRotatingHandler(CustomLogger.DEBUG, self.backupCount)
def addHandler( # {{{2
self,
handler,
level,
fmt=None,
datefmt=None,
is_only=False,
formatter=None,
):
handler.setLevel(level)
datefmt = datefmt or self.dateFmt
formatter = formatter or logging.Formatter(fmt, datefmt)
handler.setFormatter(formatter)
# set only filter
if is_only:
handler.addFilter(OnlyFilter(level))
self.__logger.addHandler(handler)
def addStreamHandler( # {{{2
self, level, fmt=None, is_only=False, check_level=False):
if check_level and self.streamLevel <= level:
return
handler = logging.StreamHandler()
self.addHandler(handler, level, fmt=fmt, is_only=is_only)
def addStreamColorHandler( # {{{2
self, level, fmt=None, is_only=False, check_level=False):
if check_level and self.streamLevel <= level:
return
handler = logging.StreamHandler()
formatter = ColoredFormatter(
fmt,
log_colors=self.logColors,
style='%',
)
self.addHandler(handler, level, is_only=is_only, formatter=formatter)
def addFileHandler( # {{{2
self, level, out_path=None, fmt=None, is_only=False):
out_path = expanduser(
out_path or os.path.join(self.logDirPath, self.allLogFileName))
handler = logging.FileHandler(out_path)
fmt = fmt or self.fileLogFmt
self.addHandler(handler, level, fmt, is_only)
def addRotatingFileHandler( # {{{2
self,
level,
out_path,
max_bytes,
backup_count,
fmt=None,
is_only=False):
handler = logging.handlers.RotatingFileHandler(
filename=out_path, maxBytes=max_bytes, backupCount=backup_count)
fmt = fmt or self.fileLogFmt
self.addHandler(handler, level, fmt, is_only)
def addRunRotatingHandler( # {{{2
self,
level,
backup_count,
out_path=None,
fmt=None,
is_only=False):
out_path = expanduser(out_path or self.logDirPath)
handler = RunRotatingHandler(out_path, backup_count)
fmt = fmt or self.fileLogFmt
self.addHandler(handler, level, fmt, is_only)
# }}}1
| [
"[email protected]"
] | |
809595d1fa3df5a45abcab47ef7dd4d58698b915 | 8af8544612d10260d1eaf4c613e599aaafc8f4c7 | /cal/admin.py | 48bd114c5a689fcc66e2fa9e1edc167617f235d1 | [] | no_license | Noeuclides/djangocalendar | 46979e7249adc94c449b7bd54888f752936a9a46 | 4a7fed9ae73989190c8b1f620de81af48248b788 | refs/heads/master | 2023-07-31T21:42:25.638017 | 2020-06-22T22:32:15 | 2020-06-22T22:32:15 | 273,511,608 | 0 | 0 | null | 2021-09-22T19:16:05 | 2020-06-19T14:20:39 | Python | UTF-8 | Python | false | false | 1,114 | py | from django.contrib import admin
from cal.models import *
class EventAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'grade')
list_display_links = ('id', 'title', 'grade')
class ChallengeAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'grade')
list_display_links = ('id', 'name', 'grade')
class GradeAdmin(admin.ModelAdmin):
list_display = ('id', 'name')
list_display_links = ('id', 'name')
class ActivityAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'event')
list_display_links = ('id', 'name', 'event')
class WorkTeamAdmin(admin.ModelAdmin):
list_display = ('id', 'challenge')
list_display_links = ('id', 'challenge')
class WA_Admin(admin.ModelAdmin):
list_display = ('id', 'workteam', 'activity', 'state')
list_display_links = ('id', 'workteam', 'activity', 'state')
admin.site.register(Event, EventAdmin)
admin.site.register(Challenge, ChallengeAdmin)
admin.site.register(Grade, GradeAdmin)
admin.site.register(Activity, ActivityAdmin)
admin.site.register(WorkTeam, WorkTeamAdmin)
admin.site.register(workteam_activity, WA_Admin)
| [
"[email protected]"
] | |
0a45a862d3c529f57cce59103290f55dc8ab44f8 | 057fde8a8ab9622a3524cb880c7ace5a15c0f355 | /set7/70.py | 4b1fb8e3c9d5e4a6348aeeaf197facb1e7fb3b10 | [] | no_license | ramyasutraye/Guvi_Python | e9ba6eb812ec8014214dce77d710ce230bbb8020 | 2fed3c460185fbf7bcf64c068084bcdb7d840140 | refs/heads/master | 2020-04-23T19:30:21.003061 | 2018-05-25T10:43:14 | 2018-05-25T10:43:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | a=int(input("Enter any number:"))
for i in range(0,a):
num=2**i
if num>a:
print(num)
break
if a==2:
print("4")
elif a==1 or a==0:
print("2")
| [
"[email protected]"
] | |
4fd5d0b3cf69ec3401158708578acd35b429b996 | 242f1dafae18d3c597b51067e2a8622c600d6df2 | /src/0000-0099/0005.manacher.py | fb347e485f03f4f36cd8545c32505243e8a5b10e | [] | no_license | gyang274/leetcode | a873adaa083270eb05ddcdd3db225025533e0dfe | 6043134736452a6f4704b62857d0aed2e9571164 | refs/heads/master | 2021-08-07T15:15:01.885679 | 2020-12-22T20:57:19 | 2020-12-22T20:57:19 | 233,179,192 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,377 | py | class Solution:
def longestPalindrome(self, s: str) -> str:
"""Manacher Algorithm.
"""
n = 2 * len(s) + 1
# augmented s with #
ss = lambda s, i: s[(i - 1) // 2] if i % 2 else '#'
# c: center of palindrome with rightmost position
c = 0
# r: right of palindrom with rightmost position
r = 0
# i: current position under investigation, c <= i <= r
i = 0
# j: relfect of i w.r.t c
# j = lambda i, c: 2 * c - i
# p: length of palindrome at each position, e.g., r - c at c
p = [0 for _ in range(n)]
# loop through i w.r.t manacher algorithm
mi = -1
ml = -1
for i in range(n):
if (r < i):
c, r = i, i
# 2 * c - i - p[2 * c - i] == 2 * c - r
if p[2 * c - i] == r - i:
while 2 * i - r >= 0 and r < n and ss(s, 2 * i - r) == ss(s, r):
r += 1
r -= 1
c = i
p[i] = r - i
else:
p[i] = min(p[2 * c - i], r - i)
if p[i] > ml:
mi = i
ml = p[i]
# print(i, c, r, [ss(s, i) + ':' + str(p[i]) for i in range(n)])
return s[((mi - ml) // 2):((mi + ml) // 2)]
if __name__ == '__main__':
solver = Solution()
cases = [
"aba",
"aabba",
"aababa",
"aabbaa",
]
rslts = [solver.longestPalindrome(s) for s in cases]
for cs, rs in zip(cases, rslts):
print(f"case: {cs} | solution: {rs}")
| [
"[email protected]"
] | |
bc576f8ef94ce910cccb8942737b54d6b3bf8daa | c071eb46184635818e8349ce9c2a78d6c6e460fc | /system/python_stubs/1336907795/Scripts/_testbuffer.py | 706a5b806e0bdf4a96417ae23fce495550448982 | [] | no_license | sidbmw/PyCharm-Settings | a71bc594c83829a1522e215155686381b8ac5c6e | 083f9fe945ee5358346e5d86b17130d521d1b954 | refs/heads/master | 2020-04-05T14:24:03.216082 | 2018-12-28T02:29:29 | 2018-12-28T02:29:29 | 156,927,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,549 | py | # encoding: utf-8
# module Scripts._testbuffer
# from C:\Users\siddh\PycharmProjects\Introduction to Python\.idea\VirtualEnvironment\Scripts\_testbuffer.pyd
# by generator 1.145
# no doc
# no imports
# Variables with simple values
ND_FORTRAN = 4
ND_GETBUF_FAIL = 64
ND_GETBUF_UNDEFINED = 128
ND_MAX_NDIM = 128
ND_PIL = 16
ND_REDIRECT = 32
ND_SCALAR = 8
ND_VAREXPORT = 1
ND_WRITABLE = 2
PyBUF_ANY_CONTIGUOUS = 152
PyBUF_CONTIG = 9
PyBUF_CONTIG_RO = 8
PyBUF_C_CONTIGUOUS = 56
PyBUF_FORMAT = 4
PyBUF_FULL = 285
PyBUF_FULL_RO = 284
PyBUF_F_CONTIGUOUS = 88
PyBUF_INDIRECT = 280
PyBUF_ND = 8
PyBUF_READ = 256
PyBUF_RECORDS = 29
PyBUF_RECORDS_RO = 28
PyBUF_SIMPLE = 0
PyBUF_STRIDED = 25
PyBUF_STRIDED_RO = 24
PyBUF_STRIDES = 24
PyBUF_WRITABLE = 1
PyBUF_WRITE = 512
# functions
def cmp_contig(*args, **kwargs): # real signature unknown
pass
def get_contiguous(*args, **kwargs): # real signature unknown
pass
def get_pointer(*args, **kwargs): # real signature unknown
pass
def get_sizeof_void_p(*args, **kwargs): # real signature unknown
pass
def is_contiguous(*args, **kwargs): # real signature unknown
pass
def py_buffer_to_contiguous(*args, **kwargs): # real signature unknown
pass
def slice_indices(*args, **kwargs): # real signature unknown
pass
# classes
class ndarray(object):
# no doc
def add_suboffsets(self, *args, **kwargs): # real signature unknown
pass
def memoryview_from_buffer(self, *args, **kwargs): # real signature unknown
pass
def pop(self, *args, **kwargs): # real signature unknown
pass
def push(self, *args, **kwargs): # real signature unknown
pass
def tobytes(self, *args, **kwargs): # real signature unknown
pass
def tolist(self, *args, **kwargs): # real signature unknown
pass
def __delitem__(self, *args, **kwargs): # real signature unknown
""" Delete self[key]. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __getitem__(self, *args, **kwargs): # real signature unknown
""" Return self[key]. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __setitem__(self, *args, **kwargs): # real signature unknown
""" Set self[key] to value. """
pass
contiguous = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
c_contiguous = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
flags = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
format = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
f_contiguous = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
itemsize = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
nbytes = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
ndim = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
obj = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
offset = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
readonly = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
shape = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
strides = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
suboffsets = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class staticarray(object):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
# variables with complex values
__loader__ = None # (!) real value is ''
__spec__ = None # (!) real value is ''
| [
"[email protected]"
] | |
e7567675022bdbdfd9b443dd0209766b03578342 | 9f2ea36883c5fbd2b96132917e4939d5f99de400 | /flask_mongo_profiler/contrib/flask_admin/formatters/lookup.py | c500c2d56a60f9c56a9b70753024b9727317fc00 | [
"MIT"
] | permissive | eduflow/flask-mongo-profiler | 80803111e1c4e5a817401d1339571c8475c9b84c | a267eeb49fea07c9a24fb370bd9d7a90ed313ccf | refs/heads/master | 2023-05-27T01:58:24.860787 | 2018-12-15T21:16:57 | 2018-12-15T21:18:54 | 154,967,856 | 0 | 0 | MIT | 2023-05-22T21:35:44 | 2018-10-27T13:31:26 | Python | UTF-8 | Python | false | false | 1,265 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from flask import Markup
from ..helpers import get_list_url_filtered_by_field_value
def search_field_formatter(view, context, model, name):
filter_url = get_list_url_filtered_by_field_value(view, model, name)
filter_applied = False
if filter_url is None: # currently filtered
filter_url = get_list_url_filtered_by_field_value(
view, model, name, reverse=True
)
filter_applied = True
return Markup(
''.join(
[
model[name],
' ',
'<a href="{href}" class="{classname}" data-role="tooltip"'.format(
href=filter_url,
classname='fa fa-{icon} glyphicon glyphicon-{icon}'.format(
icon='search' if not filter_applied else 'remove'
),
),
'title data-original-title="{}"'.format(
'Filter {} by {}'.format(name, model[name])
if not filter_applied
else 'Clear filter'
),
'style="text-decoration:none"',
'></a>',
]
)
)
| [
"[email protected]"
] | |
4e4334450f22bbbdb6cbd28d91bea30372bf64eb | 8fa938eddcc75eb7dff1f2055c49cb3817a00c63 | /String/ex49.py | 60dfa2055e3999b694cd7c97f3c9dca529ba9fc5 | [] | no_license | jayhebe/w3resource_exercises | f27109759d112b0611574aa70eb378ace447c2a0 | b29aa7c806f6021a8988e83bb9f674522a41380d | refs/heads/master | 2020-05-07T09:23:24.039271 | 2020-01-30T15:05:06 | 2020-01-30T15:05:06 | 180,374,062 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | def get_vowels(char_str):
return [ch for ch in char_str if ch in "aeiouAEIOU"]
if __name__ == '__main__':
print(get_vowels("w3resource"))
| [
"[email protected]"
] | |
d6285ad479fd73931af9354c270848582cab80b4 | d3b829dc03641fba2a57c816891a021ab7d5b505 | /fluent_contents/migrations/0001_initial.py | d713326b77ede1588edb67715503f41e8d68659b | [
"Apache-2.0"
] | permissive | django-fluent/django-fluent-contents | 7af8c0782f1e99832cae6c4f1ed3d99e72097199 | 5577567303d29b56fd48128c22c7dc5d8b2c7476 | refs/heads/master | 2023-02-21T01:44:34.935089 | 2021-11-17T08:55:16 | 2021-11-17T08:58:26 | 3,145,163 | 84 | 18 | Apache-2.0 | 2023-02-15T20:50:09 | 2012-01-10T12:54:57 | Python | UTF-8 | Python | false | false | 4,207 | py | import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("contenttypes", "0001_initial")]
operations = [
migrations.CreateModel(
name="ContentItem",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("parent_id", models.IntegerField(null=True)),
(
"language_code",
models.CharField(default="", max_length=15, editable=False, db_index=True),
),
("sort_order", models.IntegerField(default=1, db_index=True)),
(
"parent_type",
models.ForeignKey(to="contenttypes.ContentType", on_delete=models.CASCADE),
),
],
options={
"ordering": ("placeholder", "sort_order"),
"verbose_name": "Contentitem link",
"verbose_name_plural": "Contentitem links",
},
bases=(models.Model,),
),
migrations.CreateModel(
name="Placeholder",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"slot",
models.SlugField(
help_text="A short name to identify the placeholder in the template code.",
verbose_name="Slot",
),
),
(
"role",
models.CharField(
default="m",
help_text="This defines where the object is used.",
max_length=1,
verbose_name="Role",
choices=[
("m", "Main content"),
("s", "Sidebar content"),
("r", "Related content"),
],
),
),
("parent_id", models.IntegerField(null=True)),
(
"title",
models.CharField(max_length=255, verbose_name="Admin title", blank=True),
),
(
"parent_type",
models.ForeignKey(
blank=True,
to="contenttypes.ContentType",
null=True,
on_delete=models.CASCADE,
),
),
],
options={
"verbose_name": "Placeholder",
"verbose_name_plural": "Placeholders",
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name="placeholder",
unique_together={("parent_type", "parent_id", "slot")},
),
migrations.AddField(
model_name="contentitem",
name="placeholder",
field=models.ForeignKey(
related_name="contentitems",
on_delete=django.db.models.deletion.SET_NULL,
to="fluent_contents.Placeholder",
null=True,
),
preserve_default=True,
),
migrations.AddField(
model_name="contentitem",
name="polymorphic_ctype",
field=models.ForeignKey(
related_name="polymorphic_fluent_contents.contentitem_set+",
editable=False,
to="contenttypes.ContentType",
on_delete=models.CASCADE,
null=True,
),
preserve_default=True,
),
]
| [
"[email protected]"
] | |
2f6e18a5f4ba8b0456377676821bca0328fe208c | 8311a0bcf3f2126d622f928483ce2ea9d6a7cb0d | /Code/Matthew/python/bogosort.py | 8a5e8b1541f2a5351db02b7bfa5859e5544947c6 | [] | no_license | guam68/class_iguana | 857247dca0ff732d11f7fb0d3dc761ec83846c94 | e4359d32dfe60423a643c21df5636669016ad2c0 | refs/heads/master | 2020-05-01T06:33:22.611127 | 2019-03-13T23:07:41 | 2019-03-13T23:07:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,570 | py |
# import random
# def bubble_sort(nums):
# for i in range(len(nums)):
# for j in range(len(nums)-1):
# if nums[j] > nums[j+1]:
# nums[j], nums[j+1] = nums[j+1], nums[j]
#
# nums = [random.randint(0,99) for i in range(100)]
# print(nums)
# bubble_sort(nums)
# print(nums)
import random
import time
def random_list(n):
nums = []
for i in range(n):
nums.append(random.randint(0, 99))
return nums
def shuffle_nums(nums):
for i in range(len(nums)):
j = random.randint(0, len(nums)-1)
nums[i], nums[j] = nums[j], nums[i]
def is_sorted(nums):
for i in range(len(nums)-1):
if nums[i] > nums[i+1]:
return False
return True
def percent_sorted(nums):
count = 0
for i in range(len(nums)-1):
if nums[i] <= nums[i+1]:
count += 1
return count/(len(nums)-1)
def get_time():
return int(round(time.time() * 1000))
def bogosort(nums):
counter = 0
start_time = get_time()
while not is_sorted(nums):
shuffle_nums(nums)
counter += 1
end_time = get_time()
time_taken = end_time - start_time
# print(f'total time taken: {time_taken/1000} seconds')
# print(f'time per step: {time_taken/1000/counter} second')
print(f'bogosort: {counter}')
def bogosort_optimized(nums):
ps = percent_sorted(nums)
counter = 0
# while abs(ps-1.0) > 0.00001:
while ps != 1.0:
counter += 1
nums_temp = nums.copy()
shuffle_nums(nums_temp)
pst = percent_sorted(nums_temp)
if pst > ps:
nums = nums_temp
ps = pst
print(f'bogosort_optimized: {counter}')
return nums
def sqrt_optimized(x):
z = 0
counter = 0
while z*z != x:
z = int(random.random()*x)
counter += 1
print(f'sqrt_optimized: {counter}')
return z
print(sqrt_optimized(64))
seed = get_time()
n_values = 8
print(f'seed: {seed}')
print()
random.seed(seed)
nums = random_list(n_values)
print(nums)
bogosort(nums)
print(nums)
random.seed(seed)
nums = random_list(n_values)
print(nums)
nums = bogosort_optimized(nums)
print(nums)
# nums = random_list(5)
# print(nums)
# input('...')
# bogosort(nums)
# print(nums)
# def get_time():
# return int(round(time.time() * 1000))
# nums = random_list(12)
# print(nums)
# input('>')
# bogosort(nums)
# print(nums)
# nums = [1, 2, 3, 4]
# print(nums)
# print(is_sorted(nums))
# shuffle_nums(nums)
# print(nums)
# print(is_sorted(nums))
| [
"[email protected]"
] | |
8f425fd16f7d5aded1fbfb08578f898f5cecf18f | 5da5473ff3026165a47f98744bac82903cf008e0 | /packages/google-cloud-alloydb/samples/generated_samples/alloydb_v1beta_generated_alloy_db_admin_generate_client_certificate_async.py | f1d54b369e792a9e23c7159b0cbbd6d2a0c78430 | [
"Apache-2.0"
] | permissive | googleapis/google-cloud-python | ed61a5f03a476ab6053870f4da7bc5534e25558b | 93c4e63408c65129422f65217325f4e7d41f7edf | refs/heads/main | 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 | Apache-2.0 | 2023-09-14T21:45:18 | 2014-01-28T15:51:47 | Python | UTF-8 | Python | false | false | 1,940 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GenerateClientCertificate
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-alloydb
# [START alloydb_v1beta_generated_AlloyDBAdmin_GenerateClientCertificate_async]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import alloydb_v1beta
async def sample_generate_client_certificate():
# Create a client
client = alloydb_v1beta.AlloyDBAdminAsyncClient()
# Initialize request argument(s)
request = alloydb_v1beta.GenerateClientCertificateRequest(
parent="parent_value",
)
# Make the request
response = await client.generate_client_certificate(request=request)
# Handle the response
print(response)
# [END alloydb_v1beta_generated_AlloyDBAdmin_GenerateClientCertificate_async]
| [
"[email protected]"
] | |
5f7225a8bb8465174507e5765718f14c91635f9b | 2d9a3ce2a04190d0032e8a298829022260b1d76b | /indra/databases/biolookup_client.py | a8ac55265ff756883e26106fd0c97136b94d074f | [
"BSD-2-Clause",
"BSD-2-Clause-Views"
] | permissive | sorgerlab/indra | f127a0f9bdd2d3f48df14575883fd31e2f4de4bf | 6d6ca1174792b6c5a05cbf3afcb9f138fabcec6a | refs/heads/master | 2023-08-21T13:25:54.654995 | 2023-06-11T16:46:41 | 2023-06-11T16:46:41 | 22,848,436 | 158 | 61 | BSD-2-Clause | 2023-08-30T21:47:59 | 2014-08-11T17:44:05 | Python | UTF-8 | Python | false | false | 1,316 | py | """A client to the Biolookup web service available at http://biolookup.io/."""
from typing import Dict
import requests
URL = 'http://biolookup.io/api/lookup/'
def lookup_curie(curie: str) -> Dict:
"""Look up a CURIE in the Biolookup web service.
Parameters
----------
curie :
The CURIE to look up.
Returns
-------
:
A dictionary containing the results of the lookup.
"""
url = URL + curie
response = requests.get(url)
response.raise_for_status()
return response.json()
def lookup(db_ns: str, db_id: str) -> dict:
"""Look up a namespace and corresponding ID in the Biolookup web service.
Parameters
----------
db_ns :
The database namespace.
db_id :
The database ID.
Returns
-------
:
A dictionary containing the results of the lookup.
"""
curie = db_ns + ':' + db_id
return lookup_curie(curie)
def get_name(db_ns: str, db_id: str) -> Dict:
"""Return the name of a namespace and corresponding ID in the Biolookup web
service.
Parameters
----------
db_ns :
The database namespace.
db_id :
The database ID.
Returns
-------
:
The name of the entry.
"""
res = lookup(db_ns, db_id)
return res.get('name')
| [
"[email protected]"
] | |
0797c311f47d452f7d067ce093d1a6ac6666d7b9 | d178ecd2d3511fcd98aca731ada1aa0fec0e15a1 | /prog_count/grader.py | dca9b796081c90aeae9a3b3000b81e8f76932a68 | [] | no_license | 0xBADCA7/easyctf-iv-problems | 165cca68e2bad788604dab4b15c644e994c7fa85 | 7037fe557df97cd85b3eada672ef44a356236522 | refs/heads/master | 2020-08-13T21:16:26.114580 | 2018-02-21T06:15:40 | 2018-02-21T06:15:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from itertools import combinations as comb
n, s = map(int, input().split())
nums = [int(u) for u in input().split()]
t = 0
for i in range(1, len(nums) + 1):
for c in comb(nums, i):
if sum(c) == s:
t += 1
print(t) | [
"[email protected]"
] | |
19d84cb48523b1f33dfbbc80555e822ae0d9177b | 9cfdfe633dfb2755955f9d356fdd0a9601089955 | /account_auth/tests/test_veiws.py | cc19cd637b9384f9d338cf62d7f16a79d4b53583 | [] | no_license | DimAntDim/ResumeBuilder | cec597ba4b857d98147e2f5f6831bd3c93c83c80 | 0507d5d9c44936d892df280015f7c6d8e630f55b | refs/heads/main | 2023-08-21T08:03:43.327868 | 2021-11-03T19:43:04 | 2021-11-03T19:43:04 | 394,882,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,701 | py | from account_auth.forms import RegisterForm
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
UserModel = get_user_model()
class RegisterViewTest(TestCase):
def test_register_render_template(self):
response = self.client.get(reverse('register'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, template_name='account/register.html')
self.assertIsInstance(response.context['form'], RegisterForm)
def test_register_create_user(self):
response = self.client.post(reverse('register'),
data={
'email': "[email protected]",
'password': "test",
'password2': "test",
},
follow=True)
self.assertEqual(response.status_code, 200)
class LoginViewTest(TestCase):
def test_login_success_redirect_user_home_page(self):
response = self.client.post(reverse('login'), data={'email': '[email protected]', 'password': 'test'})
self.assertEqual(200, response.status_code)
class LogOutViewTest(TestCase):
def setUp(self):
self.user = UserModel.objects.create(
email='test@textcom',
password = 'test',
)
def tearDown(self):
self.user.delete()
def test_logout_success_redirect_index(self):
self.client.login(email="[email protected]", password='text')
response = self.client.get(reverse('logout'))
self.assertEqual(302, response.status_code)
| [
"[email protected]"
] | |
37622bd384cc377d31cf52cceba87131abd2c22f | 054f03640ea7598b8d623fa604c691c7526c19e7 | /extra_foam/pipeline/tests/test_data_model.py | dde2d4e18c5425922fa66e37b72f0efc1681b9a1 | [
"BSD-3-Clause"
] | permissive | zhujun98/EXtra-foam | ef1298f026c08b9d83464a84106c612fbfbe000f | 680d6d7fd4afdcbc41eb8e440feac54b6cecab33 | refs/heads/master | 2021-07-25T20:18:17.736460 | 2020-07-31T15:46:47 | 2020-07-31T15:46:47 | 224,394,172 | 0 | 0 | BSD-3-Clause | 2019-12-04T10:21:56 | 2019-11-27T09:32:38 | Python | UTF-8 | Python | false | false | 15,441 | py | import unittest
from unittest.mock import patch
import numpy as np
from extra_foam.pipeline.data_model import (
PulseIndexMask, MovingAverageArray, MovingAverageScalar,
ImageData, ProcessedData, RawImageData
)
from extra_foam.config import config
class TestMovingAverageScalar(unittest.TestCase):
def testGeneral(self):
class Dummy:
data = MovingAverageScalar()
dm = Dummy()
dm.data = 1.0
self.assertEqual(1, Dummy.data.window)
self.assertEqual(1.0, dm.data)
Dummy.data.window = 5
self.assertEqual(5, Dummy.data.window)
self.assertEqual(1, Dummy.data.count)
dm.data = 2.0
self.assertEqual(5, Dummy.data.window)
self.assertEqual(2, Dummy.data.count)
self.assertEqual(1.5, dm.data)
dm.data = 3.0
self.assertEqual(5, Dummy.data.window)
self.assertEqual(3, Dummy.data.count)
self.assertEqual(2.0, dm.data)
# set a ma window which is smaller than the current window
Dummy.data.window = 3
self.assertEqual(3, Dummy.data.window)
self.assertEqual(3, Dummy.data.count)
self.assertEqual(2.0, dm.data)
del dm.data
self.assertIsNone(dm.data)
self.assertEqual(3, Dummy.data.window)
self.assertEqual(0, Dummy.data.count)
dm.data = 1.0
self.assertEqual(1.0, dm.data)
dm.data = None
self.assertIsNone(dm.data)
self.assertEqual(3, Dummy.data.window)
self.assertEqual(0, Dummy.data.count)
class TestMovingAverageArray(unittest.TestCase):
def test1DArray(self):
class Dummy:
data = MovingAverageArray()
dm = Dummy()
arr = np.array([1, np.nan, 3], dtype=np.float32)
dm.data = arr.copy()
self.assertEqual(1, Dummy.data.window)
Dummy.data.window = 5
self.assertEqual(5, Dummy.data.window)
self.assertEqual(1, Dummy.data.count)
dm.data = np.array([3, 2, np.nan], dtype=np.float32)
self.assertEqual(5, Dummy.data.window)
self.assertEqual(2, Dummy.data.count)
np.testing.assert_array_equal(
np.array([2, np.nan, np.nan], dtype=np.float32), dm.data)
# set a ma window which is smaller than the current window
Dummy.data.window = 3
self.assertEqual(3, Dummy.data.window)
self.assertEqual(2, Dummy.data.count)
np.testing.assert_array_equal(
np.array([2, np.nan, np.nan], dtype=np.float32), dm.data)
# set a data with a different shape
new_arr = np.array([2, np.nan, 1, 3], dtype=np.float32)
dm.data = new_arr
self.assertEqual(3, Dummy.data.window)
self.assertEqual(1, Dummy.data.count)
np.testing.assert_array_equal(new_arr, dm.data)
del dm.data
self.assertIsNone(dm.data)
self.assertEqual(3, Dummy.data.window)
self.assertEqual(0, Dummy.data.count)
dm.data = new_arr.copy()
np.testing.assert_array_equal(new_arr, dm.data)
dm.data = None
self.assertIsNone(dm.data)
self.assertEqual(3, Dummy.data.window)
self.assertEqual(0, Dummy.data.count)
class TestRawImageData(unittest.TestCase):
# This tests 2d and 3d MovingAverageArray
def testTrainResolved(self):
class Dummy:
data = RawImageData()
dm = Dummy()
arr = np.ones((3, 3), dtype=np.float32)
arr[0][2] = np.nan
dm.data = arr
self.assertEqual(1, Dummy.data.n_images)
Dummy.data.window = 5
self.assertEqual(5, Dummy.data.window)
self.assertEqual(1, Dummy.data.count)
arr = 3 * np.ones((3, 3), dtype=np.float32)
arr[1][2] = np.nan
dm.data = arr
self.assertEqual(5, Dummy.data.window)
self.assertEqual(2, Dummy.data.count)
expected = 2 * np.ones((3, 3), dtype=np.float32)
expected[1][2] = np.nan
expected[0][2] = np.nan
np.testing.assert_array_equal(expected, dm.data)
# set a ma window which is smaller than the current window
Dummy.data.window = 3
self.assertEqual(3, Dummy.data.window)
self.assertEqual(2, Dummy.data.count)
np.testing.assert_array_equal(expected, dm.data)
# set an image with a different shape
new_arr = 2*np.ones((3, 1), dtype=np.float32)
dm.data = new_arr
self.assertEqual(3, Dummy.data.window)
self.assertEqual(1, Dummy.data.count)
np.testing.assert_array_equal(new_arr, dm.data)
del dm.data
self.assertIsNone(dm.data)
self.assertEqual(3, Dummy.data.window)
self.assertEqual(0, Dummy.data.count)
def testPulseResolved(self):
class Dummy:
data = RawImageData()
dm = Dummy()
arr = np.ones((3, 4, 4), dtype=np.float32)
arr[1][2][1] = np.nan
self.assertEqual(0, Dummy.data.n_images)
dm.data = arr
self.assertEqual(3, Dummy.data.n_images)
Dummy.data.window = 10
self.assertEqual(10, Dummy.data.window)
self.assertEqual(1, Dummy.data.count)
dm.data = 5 * np.ones((3, 4, 4), dtype=np.float32)
dm.data[2][3][3] = np.nan
self.assertEqual(10, Dummy.data.window)
self.assertEqual(2, Dummy.data.count)
expected = 3 * np.ones((3, 4, 4), dtype=np.float32)
expected[1][2][1] = np.nan
expected[2][3][3] = np.nan
np.testing.assert_array_equal(expected, dm.data)
# set a ma window which is smaller than the current window
Dummy.data.window = 2
self.assertEqual(2, Dummy.data.window)
self.assertEqual(2, Dummy.data.count)
np.testing.assert_array_equal(expected, dm.data)
# set a data with a different number of images
new_arr = 5 * np.ones((5, 4, 4))
dm.data = new_arr
self.assertEqual(2, Dummy.data.window)
self.assertEqual(1, Dummy.data.count)
np.testing.assert_array_equal(new_arr, dm.data)
del dm.data
self.assertIsNone(dm.data)
self.assertEqual(2, Dummy.data.window)
self.assertEqual(0, Dummy.data.count)
class TestProcessedData(unittest.TestCase):
def testGeneral(self):
# ---------------------
# pulse-resolved data
# ---------------------
data = ProcessedData(1234)
self.assertEqual(1234, data.tid)
self.assertEqual(0, data.n_pulses)
data.image = ImageData.from_array(np.zeros((1, 2, 2)))
self.assertEqual(1, data.n_pulses)
data = ProcessedData(1235)
data.image = ImageData.from_array(np.zeros((3, 2, 2)))
self.assertEqual(3, data.n_pulses)
# ---------------------
# train-resolved data
# ---------------------
data = ProcessedData(1236)
data.image = ImageData.from_array(np.zeros((2, 2)))
self.assertEqual(1236, data.tid)
self.assertEqual(1, data.n_pulses)
class TestImageData(unittest.TestCase):
def testFromArray(self):
with self.assertRaises(TypeError):
ImageData.from_array()
with self.assertRaises(ValueError):
ImageData.from_array(np.ones(2))
with self.assertRaises(ValueError):
ImageData.from_array(np.ones((2, 2, 2, 2)))
image_data = ImageData.from_array(np.ones((2, 2, 3)))
self.assertEqual((2, 3), image_data.mask.shape)
image_data = ImageData.from_array(np.ones((3, 2)))
self.assertEqual((3, 2), image_data.mask.shape)
@patch.dict(config._data, {'PIXEL_SIZE': 2e-3})
def testInitWithSpecifiedParametersPS(self):
with self.assertRaises(ValueError):
ImageData.from_array(np.ones((2, 2, 2)), sliced_indices=[0, 1, 2])
with self.assertRaises(ValueError):
ImageData.from_array(np.ones((2, 2, 2)), sliced_indices=[1, 1, 1])
imgs = np.ones((3, 2, 3))
imgs[:, 0, :] = 2
image_mask = np.zeros((2, 3), dtype=np.bool)
image_mask[::2, ::2] = True
image_data = ImageData.from_array(imgs,
image_mask=image_mask,
threshold_mask=(0, 1),
poi_indices=[0, 1])
self.assertEqual(2e-3, image_data.pixel_size)
self.assertIsInstance(image_data.images, list)
self.assertEqual(3, image_data.n_images)
self.assertListEqual([0, 1, 2], image_data.sliced_indices)
np.testing.assert_array_equal(np.array([[np.nan, np.nan, np.nan], [1., 1., 1.]]),
image_data.images[0])
np.testing.assert_array_equal(np.array([[np.nan, np.nan, np.nan], [1., 1., 1.]]),
image_data.images[1])
self.assertIsNone(image_data.images[2])
np.testing.assert_array_equal(np.array([[2., 2., 2.], [1., 1., 1.]]),
image_data.mean)
np.testing.assert_array_equal(np.array([[np.nan, np.nan, np.nan], [1., 1., 1.]]),
image_data.masked_mean)
self.assertIsNone(image_data.gain_mean)
self.assertIsNone(image_data.offset_mean)
self.assertEqual((0, 1), image_data.threshold_mask)
@patch.dict(config._data, {'PIXEL_SIZE': 2e-3})
def testInitWithSpecifiedParametersTS(self):
with self.assertRaises(ValueError):
ImageData.from_array(np.ones((2, 2)), sliced_indices=[0])
img = np.array([[2, 1], [1, 1]])
image_data = ImageData.from_array(img, threshold_mask=(0, 1))
self.assertEqual([0], image_data.sliced_indices)
self.assertEqual([None], image_data.images)
self.assertEqual(1, image_data.n_images)
np.testing.assert_array_equal(np.array([[2., 1.], [1., 1.]]),
image_data.mean)
np.testing.assert_array_equal(np.array([[np.nan, 1.], [1., 1.]]),
image_data.masked_mean)
self.assertEqual((0, 1), image_data.threshold_mask)
class TestIndexMask(unittest.TestCase):
def testGeneral(self):
mask = PulseIndexMask()
mask.mask_by_index([0, 5])
mask.mask_by_index(7)
self.assertEqual(3, mask.n_dropped(10))
self.assertEqual(1, mask.n_dropped(4))
self.assertEqual(7, mask.n_kept(10))
self.assertEqual(3, mask.n_kept(4))
self.assertListEqual([0, 5, 7], mask.dropped_indices(100).tolist())
self.assertListEqual([0, 5], mask.dropped_indices(6).tolist())
self.assertEqual(97, len(mask.kept_indices(100)))
self.assertEqual(4, len(mask.kept_indices(6)))
for i in [0, 5, 7]:
self.assertNotIn(i, mask.kept_indices(100))
self.assertNotIn(i, mask.kept_indices(6))
mask.reset()
self.assertEqual(10, mask.n_kept(10))
def testMaskByArray(self):
mask = PulseIndexMask()
mask.mask_by_array(np.array([True, False]))
self.assertListEqual([0], mask.dropped_indices(100).tolist())
mask.mask_by_array(np.array([1, 1, 0, 1, 0], dtype=bool))
self.assertListEqual([0, 1, 3], mask.dropped_indices(100).tolist())
class TestRoiGeom(unittest.TestCase):
def setUp(self):
self._img = np.arange(100).reshape((10, 10))
self._img_array = np.arange(400).reshape((4, 10, 10))
def testRect(self):
from extra_foam.pipeline.data_model import RectRoiGeom
for img in [self._img, self._img_array]:
# roi.geometry == [0, 0, -1, -1]
roi = RectRoiGeom()
self.assertIsNone(roi.rect(img))
# no intersection
roi.geometry = [0, 0, 0, 2]
self.assertIsNone(roi.rect(img))
# has intersection
roi.geometry = [1, 2, 3, 2]
np.testing.assert_array_equal(img[..., 2:2+2, 1:1+3], roi.rect(img))
class TestXgmData(unittest.TestCase):
from extra_foam.pipeline.data_model import XgmData
def testGeneral(self):
data = self.XgmData()
data.intensity, data.x, data.y = 100., 0.1, -0.1
with self.assertRaises(AttributeError):
data.xx = 0.2
class TestDigitizerData(unittest.TestCase):
from extra_foam.pipeline.data_model import (
_DigitizerDataItem, _DigitizerChannelData, DigitizerData)
def testGeneral(self):
data = self.DigitizerData()
self.assertIn('A', data)
for cn, item in data.items():
self.assertIsInstance(item, self._DigitizerDataItem)
data['D'].pulse_integral = [1, 2, 3]
with self.assertRaises(AttributeError):
data['D'].sample
class TestBinData(unittest.TestCase):
from extra_foam.pipeline.data_model import BinData
def testGeneral(self):
data = self.BinData()
# test mapping
self.assertEqual(2, len(data))
self.assertIn(0, data)
self.assertIn(1, data)
self.assertNotIn(2, data)
for b in data:
self.assertIsInstance(b, self.BinData.BinDataItem)
with self.assertRaises(IndexError):
data[2]
self.assertIsInstance(data[1], self.BinData.BinDataItem)
# test slots
with self.assertRaises(AttributeError):
data.b = self.BinData.BinDataItem()
class TestCorrelationData(unittest.TestCase):
from extra_foam.pipeline.data_model import CorrelationData
def testGeneral(self):
data = self.CorrelationData()
# test mapping
self.assertEqual(2, len(data))
self.assertIn(0, data)
self.assertIn(1, data)
self.assertNotIn(2, data)
for c in data:
self.assertIsInstance(c, data.CorrelationDataItem)
with self.assertRaises(IndexError):
data[2]
self.assertIsInstance(data[1], data.CorrelationDataItem)
# test slots
with self.assertRaises(AttributeError):
data.c = data.CorrelationDataItem()
class TestHistogramData(unittest.TestCase):
def testGeneral(self):
from extra_foam.pipeline.data_model import _HistogramDataItem, HistogramDataPulse
data = HistogramDataPulse()
hist_gt, bin_centers_gt = np.arange(0, 10, 1), np.arange(0, 20, 2)
self.assertEqual(0, len(data))
# __getitem__ and __setitem__
with self.assertRaises(KeyError):
data['abc'] = (hist_gt, bin_centers_gt)
with self.assertRaises(KeyError):
data['1'] = (hist_gt, bin_centers_gt)
with self.assertRaises(KeyError):
data[2700] = (hist_gt, bin_centers_gt)
data[1] = (hist_gt, bin_centers_gt, 1, 2, 3)
data[100] = (hist_gt, bin_centers_gt, 1, 2, 3)
np.testing.assert_array_equal(hist_gt, data[100].hist)
np.testing.assert_array_equal(bin_centers_gt, data[100].bin_centers)
self.assertEqual(1, data[100].mean)
self.assertEqual(2, data[100].median)
self.assertEqual(3, data[100].std)
# __iter__
for _, item in data.items():
self.assertIsInstance(item, _HistogramDataItem)
# __delitem__ and __len__
self.assertEqual(2, len(data))
del data[100]
self.assertEqual(1, len(data))
del data[1]
self.assertEqual(0, len(data))
| [
"[email protected]"
] | |
9a9a5e1a87e823fb31274cc803479e5f9f48c592 | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /day_or_point/say_good_work.py | e57f94898800ec38a14522b3b0c3743ae46ef192 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py |
#! /usr/bin/env python
def use_few_part_over_bad_hand(str_arg):
important_number(str_arg)
print('thing')
def important_number(str_arg):
print(str_arg)
if __name__ == '__main__':
use_few_part_over_bad_hand('great_year')
| [
"[email protected]"
] | |
04f06111126887806589e20ae3df08a21ef35dab | 683a90831bb591526c6786e5f8c4a2b34852cf99 | /CodeSignal/Interview/Backtracking/2_WordBoggle.py | 56777e11c7b04f10d9d347dd0cc392adb21fe6d6 | [] | no_license | dbetm/cp-history | 32a3ee0b19236a759ce0a6b9ba1b72ceb56b194d | 0ceeba631525c4776c21d547e5ab101f10c4fe70 | refs/heads/main | 2023-04-29T19:36:31.180763 | 2023-04-15T18:03:19 | 2023-04-15T18:03:19 | 164,786,056 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,514 | py | import copy
# https://app.codesignal.com/interview-practice/task/v3uf4PGocp2CH62nn/description
# Tag(s): Backtracking, recursion
def print_board(board):
for row in board:
print(row)
deltas = [
(-1, 0), (-1, -1), (-1, 1), (0, -1),
(0, 1), (1, 0), (1, -1), (1, 1)
]
def explore(board, word, i, j, k):
if i < 0 or i >= len(board) or j < 0 or j >= len(board[0]):
return False
if board[i][j] == '-' or k >= len(word):
return False
if board[i][j] != word[k]:
return False
original_chr = board[i][j]
board[i][j] = '-'
if k == (len(word)-1):
return True
ans = False
for delta in deltas:
if explore(board, word, i+delta[0], j+delta[1], k+1):
ans = True
break
board[i][j] = original_chr # backtracking
return ans
def wordBoggle(board, words):
ans = []
for word in words:
for i in range(len(board)):
for j in range(len(board[i])):
if board[i][j] == word[0]:
tmp_board = copy.deepcopy(board)
if explore(tmp_board, word, i, j, 0):
ans.append(word)
break
else:
continue
break
ans.sort()
return ans
if __name__ == '__main__':
board = []
rows = int(input())
for _ in range(rows):
x = input().split()
board.append(x)
words = input().split()
print(wordBoggle(board, words))
| [
"[email protected]"
] | |
8137538c751572157b1a44dc0c0f97368389f271 | f9cce83d8259f53686ed545cf301d6e72258ea90 | /mynewproject/mynewproject/settings.py | 2a6d9179c5e65c306eeb698650876c6ff4e54220 | [] | no_license | ir4y/docker-workshop | dc649e53598d9fc237348aab64ccd3b5141bc12e | 9e6f7f2445bf4b4ac1c416f83488a8d840d64db6 | refs/heads/master | 2020-12-24T22:06:12.190270 | 2016-04-22T20:53:22 | 2016-04-22T20:53:22 | 56,765,105 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,123 | py | """
Django settings for mynewproject project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1w60yw+fb4t2ni1=u1-2en59e40=kb44yms*!7z2j@2#85ths6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mynewproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mynewproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(),
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
15bc8407519a5501e2d0687a01ae6c171ec545ed | 051c3ee44478265c4510530888335335ec9f7fdf | /ML_Applications/SVM/experiments_(MRs_on_Mutants)/digitsData/RBFKernel/Mutants/MR1-permuteFeatures/r5/DigitRecognitionApp_5.py | 99860b4f108cd70b0936ce937a2f1954c2cf2a6a | [] | no_license | PinjiaHe/VerifyML | b581c016012c62d8439adfce0caef4f098b36d5e | 3bd7c49e45720c1cdfe0af4ac7dd35b201056e65 | refs/heads/master | 2020-03-25T19:40:39.996370 | 2018-01-30T08:58:58 | 2018-01-30T08:58:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,197 | py | """
Created on Fri May 26 15:20:01 2017
#Digit Recognition for V & V
#Following note added by Raghu
Note:
1. The actual digits data from the http://archive.ics.uci.edu/ml/datasets/Pen-Based+Recognition+of+Handwritten+Digits is different than the one referred in this sklearn example
2. For more info, refer this link http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html and the above one.
3. The digits data referred by this Sklearn example can be downloaded from the following link.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/data/digits.csv.gz
"""
import matplotlib.pyplot as plt
from sklearn import datasets, svm, metrics
import numpy as np
import _pickle as cPickle
digits = np.loadtxt('digits_Train_MR1_PermFeatures.csv', delimiter=',')
digits_images_flat = digits[:,:(-1)]
digits_images = digits_images_flat.view()
digits_images.shape = ((-1), 8, 8)
digits_target = digits[:,(-1)].astype(np.int)
digits_test = np.loadtxt('digits_Test_MR1_PermFeatures.csv', delimiter=',')
digits_test_images_flat = digits_test[:,:(-1)]
digits_test_images = digits_test_images_flat.view()
digits_test_images.shape = ((-1), 8, 8)
digits_test_target = digits_test[:,1].astype(np.int)
images_and_labels = list(zip(digits_images, digits_target))
n_samples = len(digits_images)
classifier = svm.SVC(gamma=0.001)
classifier.fit(digits_images_flat, digits_target)
expected = digits_test_target
predicted = classifier.predict(digits_test_images_flat)
print('Classification report for classifier %s:\n%s\n' % (
classifier, metrics.classification_report(expected, predicted)))
print('Confusion matrix:\n%s' % metrics.confusion_matrix(expected, predicted))
print("accuracy:", metrics.accuracy_score(expected, predicted))
images_and_predictions = list(zip(digits_test_images, predicted))
np.savetxt('output.txt', classifier.decision_function(digits_test_images_flat))
outputData = {'data_array': metrics.confusion_matrix(expected, predicted)}
with open('output.pkl', 'wb') as outputFile:
cPickle.dump(outputData, outputFile)
with open('model.pkl', 'wb') as modelFile:
cPickle.dump(classifier, modelFile) | [
"[email protected]"
] | |
8afe84368719c408ffe7879c59647637b4ef3d99 | de56b7409521bec01709042fb6ba8d7b49c066bc | /Baekjoon/Bronze/10809.py | fd7f2bf0bc9e1603919ab7c87bd43c4e9e0b26e3 | [] | no_license | hodurie/Algorithm | 40595c836febef815eff80585765a21a8cc299f1 | f0a72afd65d078661f3e8921de61d8c61ac06d89 | refs/heads/master | 2023-07-26T04:29:59.181987 | 2021-09-06T11:44:39 | 2021-09-06T11:44:39 | 264,898,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | S = input()
lst = list(map(chr, range(97, 123)))
for i in lst:
print(S.find(i), end = " ") | [
"[email protected]"
] | |
53f18e3d6324766217bd33e5f7c5df5c74d01171 | 7e98a3fc246547cc93ce2a93f39410aac38f8dd3 | /bag/tests/test_urls.py | c6fc2dc10f4a6c80e6986f82a633ff10c1e9cdec | [] | no_license | kydzoster/huntinteriors | 53b5d064f05010c71c70d72a6148494226980287 | 4439c652d74d9b5553abc67d4bbac73b33b42336 | refs/heads/main | 2023-02-04T03:14:50.250492 | 2020-12-16T13:22:31 | 2020-12-16T13:22:31 | 301,401,449 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,063 | py | from django.test import TestCase
from django.urls import reverse, resolve
from bag.views import view_bag, add_to_bag, adjust_bag, remove_from_bag,\
success
class TestUrls(TestCase):
def test_view_bag_url_is_resolved(self):
url = reverse('view_bag')
print(resolve(url))
self.assertEquals(resolve(url).func, view_bag)
def test_add_to_bag_url_is_resolved(self):
url = reverse('add_to_bag', args=[1])
print(resolve(url))
self.assertEquals(resolve(url).func, add_to_bag)
def test_adjust_bag_url_is_resolved(self):
url = reverse('adjust_bag', args=[1])
print(resolve(url))
self.assertEquals(resolve(url).func, adjust_bag)
def test_remove_from_bag_url_is_resolved(self):
url = reverse('remove_from_bag', args=[1])
print(resolve(url))
self.assertEquals(resolve(url).func, remove_from_bag)
def test_success_url_is_resolved(self):
url = reverse('success')
print(resolve(url))
self.assertEquals(resolve(url).func, success)
| [
"[email protected]"
] | |
0aa85492271aaf75d61d16e7ddde4438d0ea5ae7 | ac235a23f22be0d6f1818bb53902177f9969813a | /ddtrace/contrib/kafka/patch.py | bbec71de62bc1dc85235d14cc11c93ef77323796 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | DataDog/dd-trace-py | f09d6d48c4c69aea68f999fc8a458ade5c6150cf | 1e3bd6d4edef5cda5a0831a6a7ec8e4046659d17 | refs/heads/1.x | 2023-09-01T20:25:26.746324 | 2023-09-01T18:54:37 | 2023-09-01T18:54:37 | 61,572,326 | 461 | 426 | NOASSERTION | 2023-09-14T20:38:57 | 2016-06-20T18:52:23 | Python | UTF-8 | Python | false | false | 10,186 | py | import time
import confluent_kafka
from confluent_kafka import TopicPartition
from ddtrace import config
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.constants import SPAN_KIND
from ddtrace.constants import SPAN_MEASURED_KEY
from ddtrace.contrib import trace_utils
from ddtrace.ext import SpanKind
from ddtrace.ext import SpanTypes
from ddtrace.ext import kafka as kafkax
from ddtrace.internal.compat import ensure_text
from ddtrace.internal.constants import COMPONENT
from ddtrace.internal.constants import MESSAGING_SYSTEM
from ddtrace.internal.datastreams.processor import PROPAGATION_KEY
from ddtrace.internal.schema import schematize_messaging_operation
from ddtrace.internal.schema import schematize_service_name
from ddtrace.internal.schema.span_attribute_schema import SpanDirection
from ddtrace.internal.utils import ArgumentError
from ddtrace.internal.utils import get_argument_value
from ddtrace.internal.utils import set_argument_value
from ddtrace.internal.utils.formats import asbool
from ddtrace.pin import Pin
_Producer = confluent_kafka.Producer
_Consumer = confluent_kafka.Consumer
_SerializingProducer = confluent_kafka.SerializingProducer if hasattr(confluent_kafka, "SerializingProducer") else None
_DeserializingConsumer = (
confluent_kafka.DeserializingConsumer if hasattr(confluent_kafka, "DeserializingConsumer") else None
)
config._add(
"kafka",
dict(
_default_service=schematize_service_name("kafka"),
),
)
def get_version():
# type: () -> str
return getattr(confluent_kafka, "__version__", "")
class TracedProducer(confluent_kafka.Producer):
def __init__(self, config, *args, **kwargs):
super(TracedProducer, self).__init__(config, *args, **kwargs)
self._dd_bootstrap_servers = (
config.get("bootstrap.servers")
if config.get("bootstrap.servers") is not None
else config.get("metadata.broker.list")
)
def produce(self, topic, value=None, *args, **kwargs):
super(TracedProducer, self).produce(topic, value, *args, **kwargs)
# in older versions of confluent_kafka, bool(Producer()) evaluates to False,
# which makes the Pin functionality ignore it.
def __bool__(self):
return True
__nonzero__ = __bool__
class TracedConsumer(confluent_kafka.Consumer):
def __init__(self, config, *args, **kwargs):
super(TracedConsumer, self).__init__(config, *args, **kwargs)
self._group_id = config.get("group.id", "")
self._auto_commit = asbool(config.get("enable.auto.commit", True))
def poll(self, timeout=1):
return super(TracedConsumer, self).poll(timeout)
def commit(self, message=None, *args, **kwargs):
return super(TracedConsumer, self).commit(message, args, kwargs)
def patch():
if getattr(confluent_kafka, "_datadog_patch", False):
return
confluent_kafka._datadog_patch = True
confluent_kafka.Producer = TracedProducer
confluent_kafka.Consumer = TracedConsumer
if _SerializingProducer is not None:
confluent_kafka.SerializingProducer = TracedProducer
if _DeserializingConsumer is not None:
confluent_kafka.DeserializingConsumer = TracedConsumer
trace_utils.wrap(TracedProducer, "produce", traced_produce)
trace_utils.wrap(TracedConsumer, "poll", traced_poll)
trace_utils.wrap(TracedConsumer, "commit", traced_commit)
Pin().onto(confluent_kafka.Producer)
Pin().onto(confluent_kafka.Consumer)
def unpatch():
if getattr(confluent_kafka, "_datadog_patch", False):
confluent_kafka._datadog_patch = False
if trace_utils.iswrapped(TracedProducer.produce):
trace_utils.unwrap(TracedProducer, "produce")
if trace_utils.iswrapped(TracedConsumer.poll):
trace_utils.unwrap(TracedConsumer, "poll")
if trace_utils.iswrapped(TracedConsumer.commit):
trace_utils.unwrap(TracedConsumer, "commit")
confluent_kafka.Producer = _Producer
confluent_kafka.Consumer = _Consumer
if _SerializingProducer is not None:
confluent_kafka.SerializingProducer = _SerializingProducer
if _DeserializingConsumer is not None:
confluent_kafka.DeserializingConsumer = _DeserializingConsumer
def traced_produce(func, instance, args, kwargs):
pin = Pin.get_from(instance)
if not pin or not pin.enabled():
return func(*args, **kwargs)
topic = get_argument_value(args, kwargs, 0, "topic") or ""
try:
value = get_argument_value(args, kwargs, 1, "value")
except ArgumentError:
value = None
message_key = kwargs.get("key", "")
partition = kwargs.get("partition", -1)
if config._data_streams_enabled:
# inject data streams context
headers = kwargs.get("headers", {})
pathway = pin.tracer.data_streams_processor.set_checkpoint(["direction:out", "topic:" + topic, "type:kafka"])
headers[PROPAGATION_KEY] = pathway.encode()
kwargs["headers"] = headers
on_delivery_kwarg = "on_delivery"
on_delivery_arg = 5
on_delivery = None
try:
on_delivery = get_argument_value(args, kwargs, on_delivery_arg, on_delivery_kwarg)
except ArgumentError:
on_delivery_kwarg = "callback"
on_delivery_arg = 4
try:
on_delivery = get_argument_value(args, kwargs, on_delivery_arg, on_delivery_kwarg)
except ArgumentError:
on_delivery = None
def wrapped_callback(err, msg):
if err is None:
if pin.tracer.data_streams_processor:
pin.tracer.data_streams_processor.track_kafka_produce(
msg.topic(), msg.partition(), msg.offset() or -1, time.time()
)
if on_delivery is not None:
on_delivery(err, msg)
try:
args, kwargs = set_argument_value(args, kwargs, on_delivery_arg, on_delivery_kwarg, wrapped_callback)
except ArgumentError:
# we set the callback even if it's not set by the client, to track produce calls correctly.
kwargs[on_delivery_kwarg] = wrapped_callback
with pin.tracer.trace(
schematize_messaging_operation(kafkax.PRODUCE, provider="kafka", direction=SpanDirection.OUTBOUND),
service=trace_utils.ext_service(pin, config.kafka),
span_type=SpanTypes.WORKER,
) as span:
span.set_tag_str(MESSAGING_SYSTEM, kafkax.SERVICE)
span.set_tag_str(COMPONENT, config.kafka.integration_name)
span.set_tag_str(SPAN_KIND, SpanKind.PRODUCER)
span.set_tag_str(kafkax.TOPIC, topic)
span.set_tag_str(kafkax.MESSAGE_KEY, ensure_text(message_key, errors="replace"))
span.set_tag(kafkax.PARTITION, partition)
span.set_tag_str(kafkax.TOMBSTONE, str(value is None))
span.set_tag(SPAN_MEASURED_KEY)
if instance._dd_bootstrap_servers is not None:
span.set_tag_str(kafkax.HOST_LIST, instance._dd_bootstrap_servers)
rate = config.kafka.get_analytics_sample_rate()
if rate is not None:
span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, rate)
return func(*args, **kwargs)
def traced_poll(func, instance, args, kwargs):
pin = Pin.get_from(instance)
if not pin or not pin.enabled():
return func(*args, **kwargs)
with pin.tracer.trace(
schematize_messaging_operation(kafkax.CONSUME, provider="kafka", direction=SpanDirection.PROCESSING),
service=trace_utils.ext_service(pin, config.kafka),
span_type=SpanTypes.WORKER,
) as span:
message = func(*args, **kwargs)
span.set_tag_str(MESSAGING_SYSTEM, kafkax.SERVICE)
span.set_tag_str(COMPONENT, config.kafka.integration_name)
span.set_tag_str(SPAN_KIND, SpanKind.CONSUMER)
span.set_tag_str(kafkax.RECEIVED_MESSAGE, str(message is not None))
span.set_tag_str(kafkax.GROUP_ID, instance._group_id)
if message is not None:
if config._data_streams_enabled:
headers = {header[0]: header[1] for header in (message.headers() or [])}
ctx = pin.tracer.data_streams_processor.decode_pathway(headers.get(PROPAGATION_KEY, None))
ctx.set_checkpoint(
["direction:in", "group:" + instance._group_id, "topic:" + message.topic(), "type:kafka"]
)
if instance._auto_commit:
# it's not exactly true, but if auto commit is enabled, we consider that a message is acknowledged
# when it's read.
pin.tracer.data_streams_processor.track_kafka_commit(
instance._group_id, message.topic(), message.partition(), message.offset() or -1, time.time()
)
message_key = message.key() or ""
message_offset = message.offset() or -1
span.set_tag_str(kafkax.TOPIC, message.topic())
span.set_tag_str(kafkax.MESSAGE_KEY, ensure_text(message_key, errors="replace"))
span.set_tag(kafkax.PARTITION, message.partition())
span.set_tag_str(kafkax.TOMBSTONE, str(len(message) == 0))
span.set_tag(kafkax.MESSAGE_OFFSET, message_offset)
span.set_tag(SPAN_MEASURED_KEY)
rate = config.kafka.get_analytics_sample_rate()
if rate is not None:
span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, rate)
return message
def traced_commit(func, instance, args, kwargs):
pin = Pin.get_from(instance)
if not pin or not pin.enabled():
return func(*args, **kwargs)
if config._data_streams_enabled:
message = get_argument_value(args, kwargs, 0, "message")
offsets = kwargs.get("offsets", [])
if message is not None:
offsets = [TopicPartition(message.topic(), message.partition(), offset=message.offset())]
for offset in offsets:
pin.tracer.data_streams_processor.track_kafka_commit(
instance._group_id, offset.topic, offset.partition, offset.offset or -1, time.time()
)
return func(*args, **kwargs)
| [
"[email protected]"
] | |
4511418ac6a1ba4d051b347cf150a798e4753afa | 644d9ef18713e4cb5d4c3b53301bd7276dcdf477 | /api/programs/serializers/courses/__init__.py | 0420ef42cac21787e67976e0cca760e59a01bd92 | [] | no_license | alexhernandez-git/django-classline | 6cb5bcd268248999e18037f58c4ed30012d51915 | 49fcf0c6d735a56eaebc17d04be52dab91ca4c3a | refs/heads/master | 2023-03-18T07:10:08.770066 | 2021-03-04T22:24:09 | 2021-03-04T22:24:09 | 287,985,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | from .languages import *
from .prices import *
from .courses import *
from .blocks import *
from .block_tracks import *
from .items import *
from .item_tracks import *
from .contents import *
from .item_questions import *
from .item_answers import *
from .items_viewed import *
from .materials import *
from .course_users_data import *
from .students import * | [
"[email protected]"
] | |
81b3c1a604d12b227bc601a62060b3b20494c030 | c03d7a4e03c581d4be98b6363003cddb9c213ec0 | /registration/migrations/0017_auto_20181122_2208.py | 3e33472466085ca31c4186c48ec9f60a73f4368e | [] | no_license | hernandavidc/plataforma | b333e4f06290713072d8dc609c27d4ce8af1d9df | 4316e2a59db76e74f1e6106958631ad4a7a653c7 | refs/heads/master | 2020-04-06T17:08:21.019355 | 2019-04-09T04:41:00 | 2019-04-09T04:41:00 | 157,648,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | # Generated by Django 2.1 on 2018-11-23 03:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0016_auto_20180910_1209'),
]
operations = [
migrations.AlterField(
model_name='veterinaria',
name='latitud',
field=models.DecimalField(blank=True, decimal_places=8, max_digits=9, null=True),
),
migrations.AlterField(
model_name='veterinaria',
name='longitud',
field=models.DecimalField(blank=True, decimal_places=8, max_digits=9, null=True),
),
]
| [
"[email protected]"
] | |
5d0900423f187a722e7b4784a10d242cb508eaa3 | f4309766d0292d6ae06344221b667603fda206ec | /backend/apps/user/views.py | f23bb486e4b7eca9c5f74b758711e4d8d7606f82 | [] | no_license | tom2jack/Journey | dff0181ef8939a9edf52987a2439563ca0c4342d | cb7bbca759c3d27815fde0d1697c2184b31b2aac | refs/heads/master | 2022-02-23T05:43:58.602808 | 2019-10-08T08:27:36 | 2019-10-08T08:27:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,632 | py | from django.shortcuts import render
from rest_framework import status
from rest_framework.response import Response
from rest_framework import viewsets
from django.db.models import Q
from rest_framework import filters
from user.models import *
from user.serializers import *
from user.permissions import CustomerPremission
import random, string
def random_str(randomlength=10):
a = list(string.ascii_letters)
random.shuffle(a)
return ''.join(a[:randomlength])
class UserGroupViewSet(viewsets.ModelViewSet):
"""
list:
用户组列表.
create:
创建用户组.
delete:
删除用户组.
update:
修改用户组.
"""
queryset = UserGroup.objects.all().order_by('id')
serializer_class = UserGroupSerializer
filter_backends = (filters.SearchFilter, filters.OrderingFilter,)
search_fields = ('group','comment',)
ordering_fields = ('id',)
# 权限相关
permission_classes = [CustomerPremission,]
module_perms = ['user:usergroup']
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
userselected = request.data['userselected']
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
userlist = Users.objects.filter(Q(id__in=userselected))
instance.user_group.set(userlist,bulk=True)
return Response(serializer.data)
class MenuViewSet(viewsets.ModelViewSet):
"""
list:
菜单列表.
create:
创建菜单.
delete:
删除菜单.
update:
修改菜单.
"""
queryset = Menu.objects.all().order_by('id')
serializer_class = MenuSerializer
filter_backends = (filters.SearchFilter, filters.OrderingFilter,)
# search_fields = ('mtype',)
ordering_fields = ('id',)
# 权限相关
permission_classes = [CustomerPremission,]
module_perms = ['user:menu']
def list(self, request, *args, **kwargs):
results = []
queryset = self.filter_queryset(self.get_queryset())
for i in queryset.filter(Q(mtype=0)):
results.append({'id':i.id,'name':i.name,'parent_id':i.parent_id,'url':i.url,'perms':i.perms,'mtype':i.mtype,'icon':i.icon,'del_flag':i.del_flag,'children':[]})
for item in results:
for i in queryset.filter(Q(mtype=1)&Q(parent_id=item['id'])):
item['children'].append({'id':i.id,'name':i.name,'parent_id':i.parent_id,'url':i.url,'perms':i.perms,'mtype':i.mtype,'icon':i.icon,'del_flag':i.del_flag,'children':[]})
for item in results:
if (len(item['children']) > 0):
for node in item['children']:
for i in queryset.filter(Q(mtype=2)&Q(parent_id=node['id'])):
node['children'].append({'id':i.id,'name':i.name,'parent_id':i.parent_id,'url':i.url,'perms':i.perms,'mtype':i.mtype,'icon':i.icon,'del_flag':i.del_flag})
# serializer = self.get_serializer(queryset, many=True)
return Response(results)
class RoleViewSet(viewsets.ModelViewSet):
"""
list:
角色列表.
create:
创建角色.
delete:
删除角色.
update:
修改角色.
"""
queryset = Role.objects.all().order_by('id')
serializer_class = RoleSerializer
filter_backends = (filters.SearchFilter, filters.OrderingFilter,)
search_fields = ('name')
ordering_fields = ('id',)
# 权限相关
permission_classes = [CustomerPremission,]
module_perms = ['user:role']
def update(self, request, *args, **kwargs):
edittype = request.data['type']
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
if (edittype == 'role_perms'):
permsselected = request.data['permsselected']
instance.menu.set(permsselected)
elif (edittype == 'role_users'):
userselected = request.data['userselected']
instance.user_role.set(userselected)
return Response(serializer.data)
class UsersViewSet(viewsets.ModelViewSet):
"""
list:
用户列表.
create:
创建用户.
delete:
删除用户.
update:
修改用户.
"""
queryset = Users.objects.all().order_by('id')
serializer_class = UsersSerializer
filter_backends = (filters.SearchFilter, filters.OrderingFilter,)
search_fields = ('username','email',)
ordering_fields = ('id',)
# 权限相关
permission_classes = [CustomerPremission,]
module_perms = ['user:user']
def create(self, request, *args, **kwargs):
if (len(request.data['password']) == 0):
mailtolist = []
request.data['password'] = random_str()
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
username = request.data['username']
password = request.data['password']
useremail = request.data['email']
mailtolist.append(useremail)
userinfo = Users.objects.get(username=username)
userinfo.set_password(password)
userinfo.save()
headers = self.get_success_headers(serializer.data)
# maildata = {}
# maildata['username'] = username
# maildata['password'] = password
# send_mail(mailtolist,1,maildata)
return Response(request.data, status=status.HTTP_201_CREATED, headers=headers)
def update(self, request, *args, **kwargs):
if ('password' in request.data.keys()):
mailtolist = []
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
if getattr(instance, '_prefetched_objects_cache', None):
# If 'prefetch_related' has been applied to a queryset, we need to
# forcibly invalidate the prefetch cache on the instance.
instance._prefetched_objects_cache = {}
username = request.data['username']
password = request.data['password']
useremail = request.data['email']
#邮件内容
maildata = {}
maildata['username'] = username
maildata['password'] = password
#发送用户邮箱
mailtolist.append(useremail)
userinfo = Users.objects.get(username=username)
userinfo.set_password(password)
userinfo.save()
# send_mail(mailtolist,2,maildata)
else:
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
if getattr(instance, '_prefetched_objects_cache', None):
# If 'prefetch_related' has been applied to a queryset, we need to
# forcibly invalidate the prefetch cache on the instance.
instance._prefetched_objects_cache = {}
return Response(serializer.data)
| [
"[email protected]"
] | |
c334f9d5fe148729774e785a96feae949de4f060 | 5e48f770f975ea0ae166cd662576baa36150cb41 | /booking/migrations/0002_auto_20170725_2313.py | c1abc8a5cb7e36b285b6bc51fe62541f7d726ae7 | [] | no_license | Ngahu/Booking | df5a7b2e346bf497bc340e4ee3e6e7184c40d235 | 3187bb4a34225364181f0409344457c43f20b338 | refs/heads/master | 2021-01-01T19:54:20.069252 | 2017-07-29T08:25:32 | 2017-07-29T08:25:32 | 98,717,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-25 23:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('booking', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='book',
name='travelling_persons',
field=models.IntegerField(default=1),
),
]
| [
"[email protected]"
] | |
be0eda8aa93f7cb677306f18f5e62307bb935bc2 | efec547cce9f3e73246990de7ce9572f2c585725 | /tensorflow/python/distribute/tpu_strategy.py | f39491da5307fe44a4ad03eb944b19983a0d66c3 | [
"Apache-2.0"
] | permissive | ltn100/tensorflow | a5d150fbb4930f2c516cee9e712774d82e260ac6 | 4c54f0a0a83d3cf8e2a3326734880fced6e37541 | refs/heads/master | 2020-09-03T02:08:06.344814 | 2019-11-13T09:03:03 | 2019-11-13T09:06:49 | 219,355,872 | 1 | 0 | Apache-2.0 | 2019-11-03T19:45:13 | 2019-11-03T19:45:12 | null | UTF-8 | Python | false | false | 31,090 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU Strategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import weakref
import numpy as np
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.autograph.impl import api as autograph
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import numpy_dataset
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values
from tensorflow.python.distribute.cluster_resolver import TPUClusterResolver
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device_spec
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.tpu import device_assignment as device_assignment_lib # pylint: disable=unused-import
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.tpu import tpu_system_metadata as tpu_system_metadata_lib
from tensorflow.python.tpu import training_loop
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
def get_tpu_system_metadata(tpu_cluster_resolver):
"""Retrieves TPU system metadata given a TPUClusterResolver."""
master = tpu_cluster_resolver.master()
# pylint: disable=protected-access
cluster_spec = tpu_cluster_resolver.cluster_spec()
cluster_def = cluster_spec.as_cluster_def() if cluster_spec else None
tpu_system_metadata = (
tpu_system_metadata_lib._query_tpu_system_metadata(
master,
cluster_def=cluster_def,
query_topology=False))
return tpu_system_metadata
@contextlib.contextmanager
def maybe_init_scope():
if ops.executing_eagerly_outside_functions():
yield
else:
with ops.init_scope():
yield
@tf_export("distribute.experimental.TPUStrategy", v1=[])
class TPUStrategy(distribute_lib.Strategy):
"""TPU distribution strategy implementation."""
def __init__(self,
tpu_cluster_resolver=None,
device_assignment=None):
"""Initializes the TPUStrategy object.
Args:
tpu_cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster.
device_assignment: Optional `tf.tpu.experimental.DeviceAssignment` to
specify the placement of replicas on the TPU cluster. Currently only
supports the usecase of using a single core within a TPU cluster.
"""
super(TPUStrategy, self).__init__(TPUExtended(
self, tpu_cluster_resolver, device_assignment=device_assignment))
distribute_lib.distribution_strategy_gauge.get_cell("V2").set("TPUStrategy")
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_workers").set(self.extended.num_hosts)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_replicas_per_worker").set(self.extended.num_replicas_per_host)
# TODO(cjfj): Modify `_call_for_each_replica` in `TPUExtended` such that this
# can use the default implementation.
# This implementation runs a single step. It does not use infeed or outfeed.
def experimental_run_v2(self, fn, args=(), kwargs=None):
"""See base class."""
# Note: the target function is converted to graph even when in Eager mode,
# so autograph is on by default here.
fn = autograph.tf_convert(fn, ag_ctx.control_status_ctx())
return self.extended.tpu_run(fn, args, kwargs)
@tf_export(v1=["distribute.experimental.TPUStrategy"])
class TPUStrategyV1(distribute_lib.StrategyV1):
"""TPU distribution strategy implementation."""
def __init__(self,
tpu_cluster_resolver=None,
steps_per_run=None,
device_assignment=None):
"""Initializes the TPUStrategy object.
Args:
tpu_cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster.
steps_per_run: Number of steps to run on device before returning to the
host. Note that this can have side-effects on performance, hooks,
metrics, summaries etc.
This parameter is only used when Distribution Strategy is used with
estimator or keras.
device_assignment: Optional `tf.tpu.experimental.DeviceAssignment` to
specify the placement of replicas on the TPU cluster. Currently only
supports the usecase of using a single core within a TPU cluster.
"""
super(TPUStrategyV1, self).__init__(TPUExtended(
self, tpu_cluster_resolver, steps_per_run, device_assignment))
distribute_lib.distribution_strategy_gauge.get_cell("V1").set("TPUStrategy")
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_workers").set(self.extended.num_hosts)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_replicas_per_worker").set(self.extended.num_replicas_per_host)
@property
def steps_per_run(self):
"""DEPRECATED: use .extended.steps_per_run instead."""
return self._extended.steps_per_run
# TODO(cjfj): Modify `_call_for_each_replica` in `TPUExtended` such that this
# can use the default implementation.
# This implementation runs a single step. It does not use infeed or outfeed.
def experimental_run_v2(self, fn, args=(), kwargs=None):
"""See base class."""
fn = autograph.tf_convert(fn, ag_ctx.control_status_ctx())
return self.extended.tpu_run(fn, args, kwargs)
# TODO(josh11b): Switch to V2 when we no longer need to support tf.compat.v1.
class TPUExtended(distribute_lib.StrategyExtendedV1):
"""Implementation of TPUStrategy."""
def __init__(self,
container_strategy,
tpu_cluster_resolver=None,
steps_per_run=None,
device_assignment=None):
super(TPUExtended, self).__init__(container_strategy)
if tpu_cluster_resolver is None:
tpu_cluster_resolver = TPUClusterResolver("")
if steps_per_run is None:
# TODO(frankchn): Warn when we are being used by DS/Keras and this is
# not specified.
steps_per_run = 1
self._tpu_function_cache = weakref.WeakKeyDictionary()
self._tpu_cluster_resolver = tpu_cluster_resolver
self._tpu_metadata = get_tpu_system_metadata(self._tpu_cluster_resolver)
self._device_assignment = device_assignment
self._tpu_devices = [d.name for d in self._tpu_metadata.devices
if "device:TPU:" in d.name]
# Only create variables for the number of replicas we're running.
if device_assignment is not None:
job_name = device_spec.DeviceSpecV2.from_string(self._tpu_devices[0]).job
self._tpu_devices = []
for replica_id in range(device_assignment.num_replicas):
tpu_device = device_assignment.tpu_device(
replica=replica_id, logical_core=0, job=job_name)
tpu_device = device_util.canonicalize(tpu_device)
self._tpu_devices.append(tpu_device)
self._host_device = device_util.get_host_for_device(self._tpu_devices[0])
self._device_map = values.ReplicaDeviceMap(self._tpu_devices)
# Preload the data onto the TPUs.
input_worker_devices = collections.OrderedDict()
for tpu_device in self._tpu_devices:
host_device = device_util.get_host_for_device(tpu_device)
input_worker_devices.setdefault(host_device, [])
input_worker_devices[host_device].append(tpu_device)
self._input_workers = input_lib.InputWorkers(
self._device_map, tuple(input_worker_devices.items()))
# TODO(sourabhbajaj): Remove this once performance of running one step
# at a time is comparable to multiple steps.
self.steps_per_run = steps_per_run
self._require_static_shapes = True
# TPUStrategy handles the graph replication in TF-XLA bridge, so we don't
# need to retrace functions for each device.
self._retrace_functions_for_each_device = False
self.experimental_enable_get_next_as_optional = True
self.experimental_enable_dynamic_batch_size = True
def _validate_colocate_with_variable(self, colocate_with_variable):
values.validate_colocate(colocate_with_variable, self)
def _make_dataset_iterator(self, dataset):
"""Make iterators for each of the TPU hosts."""
return input_lib.DatasetIterator(
dataset,
self._input_workers,
self._container_strategy(),
split_batch_by=self._num_replicas_in_sync)
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
input_contexts = []
num_workers = self._input_workers.num_workers
for i in range(num_workers):
input_contexts.append(distribute_lib.InputContext(
num_input_pipelines=num_workers,
input_pipeline_id=i,
num_replicas_in_sync=self._num_replicas_in_sync))
return input_lib.InputFunctionIterator(
input_fn,
self._input_workers,
input_contexts,
self._container_strategy())
def _experimental_make_numpy_dataset(self, numpy_input, session):
return numpy_dataset.one_host_numpy_dataset(
numpy_input, numpy_dataset.SingleDevice(self._host_device),
session)
def _experimental_distribute_dataset(self, dataset):
return input_lib.get_distributed_dataset(
dataset,
self._input_workers,
self._container_strategy(),
split_batch_by=self._num_replicas_in_sync)
def _experimental_distribute_datasets_from_function(self, dataset_fn):
input_contexts = []
num_workers = self._input_workers.num_workers
for i in range(num_workers):
input_contexts.append(distribute_lib.InputContext(
num_input_pipelines=num_workers,
input_pipeline_id=i,
num_replicas_in_sync=self._num_replicas_in_sync))
return input_lib.get_distributed_datasets_from_function(
dataset_fn,
self._input_workers,
input_contexts,
self._container_strategy())
# TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed.
# TODO(sourabhbajaj): Remove the initial_loop_values parameter when we have
# a mechanism to infer the outputs of `fn`. Pending b/110550782.
def _experimental_run_steps_on_iterator(
self, fn, multi_worker_iterator, iterations, initial_loop_values=None):
# Wrap `fn` for repeat.
if initial_loop_values is None:
initial_loop_values = {}
initial_loop_values = nest.flatten(initial_loop_values)
ctx = input_lib.MultiStepContext()
def run_fn(inputs):
"""Single step on the TPU device."""
fn_result = fn(ctx, inputs)
flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)
if flat_last_step_outputs:
with ops.control_dependencies([fn_result]):
return [array_ops.identity(f) for f in flat_last_step_outputs]
else:
return fn_result
# We capture the control_flow_context at this point, before we run `fn`
# inside a while_loop and TPU replicate context. This is useful in cases
# where we might need to exit these contexts and get back to the outer
# context to do some things, for e.g. create an op which should be
# evaluated only once at the end of the loop on the host. One such usage
# is in creating metrics' value op.
self._outer_control_flow_context = (
ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access
def rewrite_fn(*args):
"""The rewritten step fn running on TPU."""
del args
per_replica_inputs = multi_worker_iterator.get_next()
replicate_inputs = []
for replica_id in range(self._num_replicas_in_sync):
select_replica = lambda x: values.select_replica(replica_id, x) # pylint: disable=cell-var-from-loop
replicate_inputs.append((nest.map_structure(
select_replica, per_replica_inputs),))
replicate_outputs = tpu.replicate(
run_fn, replicate_inputs, device_assignment=self._device_assignment)
# If run_fn has tensor outputs, tpu.replicate returns a list of list. We
# will flatten it in this case. If run_fn has no tensor outputs,
# tpu.replicate returns a list of no_ops, we will keep the output as it
# is.
if isinstance(replicate_outputs[0], list):
replicate_outputs = nest.flatten(replicate_outputs)
return replicate_outputs
# TODO(sourabhbajaj): The input to while loop should be based on the
# output type of the step_fn
assert isinstance(initial_loop_values, list)
initial_loop_values = initial_loop_values * self._num_replicas_in_sync
# Put the while loop op on TPU host 0.
with ops.device(self._host_device):
if self.steps_per_run == 1:
replicate_outputs = rewrite_fn()
else:
replicate_outputs = training_loop.repeat(iterations, rewrite_fn,
initial_loop_values)
del self._outer_control_flow_context
ctx.run_op = control_flow_ops.group(replicate_outputs)
if isinstance(replicate_outputs, list):
# Filter out any ops from the outputs, typically this would be the case
# when there were no tensor outputs.
last_step_tensor_outputs = [
x for x in replicate_outputs if not isinstance(x, ops.Operation)
]
# Outputs are currently of the structure (flattened)
# [output0_device0, output1_device0, output2_device0,
# output0_device1, output1_device1, output2_device1,
# ...]
# Convert this to the following structure instead: (grouped by output)
# [[output0_device0, output0_device1],
# [output1_device0, output1_device1],
# [output2_device0, output2_device1]]
output_num = len(last_step_tensor_outputs) // self._num_replicas_in_sync
last_step_tensor_outputs = [
last_step_tensor_outputs[i::output_num] for i in range(output_num)
]
else:
# no tensors returned.
last_step_tensor_outputs = []
_set_last_step_outputs(ctx, last_step_tensor_outputs)
return ctx
def _call_for_each_replica(self, fn, args, kwargs):
# TODO(jhseu): Consider making it so call_for_each_replica implies that
# we're in a tpu.rewrite(), and update TPUMirroredVariable accordingly.
with _TPUReplicaContext(self._container_strategy()):
return fn(*args, **kwargs)
def _experimental_initialize_system(self):
"""Experimental method added to be used by Estimator.
This is a private method only to be used by Estimator. Other frameworks
should directly be calling `tf.tpu.experimental.initialize_tpu_system`
"""
tpu_strategy_util.initialize_tpu_system(self._tpu_cluster_resolver)
def _create_variable(self, next_creator, *args, **kwargs):
"""Create a TPUMirroredVariable. See `DistributionStrategy.scope`."""
if kwargs.pop("tpu_embedding_variable_creator", False):
return next_creator(*args, **kwargs)
colocate_with = kwargs.pop("colocate_with", None)
if colocate_with is None:
device_map = self._device_map
logical_device = 0 # TODO(josh11b): Get logical device from scope here.
elif isinstance(colocate_with, numpy_dataset.SingleDevice):
with ops.device(colocate_with.device):
return next_creator(*args, **kwargs)
else:
device_map = colocate_with.device_map
logical_device = colocate_with.logical_device
def _real_mirrored_creator(devices, *args, **kwargs): # pylint: disable=g-missing-docstring
initial_value = None
value_list = []
for i, d in enumerate(devices):
with ops.device(d):
if i == 0:
initial_value = kwargs["initial_value"]
# Note: some v1 code expects variable initializer creation to happen
# inside a init_scope.
with maybe_init_scope():
initial_value = initial_value() if callable(
initial_value) else initial_value
if i > 0:
# Give replicas meaningful distinct names:
var0name = value_list[0].name.split(":")[0]
# We append a / to variable names created on replicas with id > 0 to
# ensure that we ignore the name scope and instead use the given
# name as the absolute name of the variable.
kwargs["name"] = "%s/replica_%d/" % (var0name, i)
kwargs["initial_value"] = initial_value
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
v = next_creator(*args, **kwargs)
assert not isinstance(v, values.TPUMirroredVariable)
value_list.append(v)
return value_list
return values.create_mirrored_variable(
self._container_strategy(), device_map, logical_device,
_real_mirrored_creator, values.TPUMirroredVariable,
values.TPUSyncOnReadVariable, *args, **kwargs)
def _reduce_to(self, reduce_op, value, destinations):
if values._enclosing_tpu_context() is not None: # pylint: disable=protected-access
if reduce_op == reduce_util.ReduceOp.MEAN:
# TODO(jhseu): Revisit once we support model-parallelism.
value *= (1. / self._num_replicas_in_sync)
elif reduce_op != reduce_util.ReduceOp.SUM:
raise NotImplementedError(
"Currently only support sum & mean in TPUStrategy.")
return tpu_ops.cross_replica_sum(value)
if not isinstance(value, values.DistributedValues):
# This function handles reducing values that are not PerReplica or
# Mirrored values. For example, the same value could be present on all
# replicas in which case `value` would be a single value or value could
# be 0.
return cross_device_ops_lib.reduce_non_distributed_value(
reduce_op, self._device_map, value, destinations)
# TODO(cjfj): Detect when it is possible to use `cross_replica_sum`.
# Always performs the reduction on the TPU host.
with ops.device(self._host_device):
output = math_ops.add_n(value.values)
if reduce_op == reduce_util.ReduceOp.MEAN:
output *= (1. / len(value.values))
devices = cross_device_ops_lib.get_devices_from(destinations)
if len(devices) == 1:
# If necessary, copy to requested destination.
dest_canonical = device_util.canonicalize(devices[0])
host_canonical = device_util.canonicalize(self._host_device)
if dest_canonical != host_canonical:
with ops.device(dest_canonical):
output = array_ops.identity(output)
else:
output = cross_device_ops_lib.simple_broadcast(output, destinations)
return output
def _update(self, var, fn, args, kwargs, group):
assert isinstance(var, values.TPUVariableMixin) or isinstance(
var, resource_variable_ops.BaseResourceVariable)
if values._enclosing_tpu_context() is not None: # pylint: disable=protected-access
if group:
return fn(var, *args, **kwargs)
else:
return (fn(var, *args, **kwargs),)
# Otherwise, we revert to MirroredStrategy behavior and update each variable
# directly.
updates = []
for i, (d, v) in enumerate(zip(var.devices, var.values)):
name = "update_%d" % i
with ops.device(d), distribute_lib.UpdateContext(i), ops.name_scope(name):
# If args and kwargs are not mirrored, the value is returned as is.
updates.append(fn(v,
*values.select_device_mirrored(d, args),
**values.select_device_mirrored(d, kwargs)))
return values.update_regroup(self, self._device_map, updates, group)
def read_var(self, var):
assert isinstance(var, values.TPUVariableMixin) or isinstance(
var, resource_variable_ops.BaseResourceVariable)
return var.read_value()
def _local_results(self, val):
if isinstance(val, values.DistributedValues):
return val.values
return (val,)
def value_container(self, value):
return value
def _broadcast_to(self, tensor, destinations):
del destinations
# This is both a fast path for Python constants, and a way to delay
# converting Python values to a tensor until we know what type it
# should be converted to. Otherwise we have trouble with:
# global_step.assign_add(1)
# since the `1` gets broadcast as an int32 but global_step is int64.
if isinstance(tensor, (float, int)):
return tensor
if values._enclosing_tpu_context() is not None: # pylint: disable=protected-access
broadcast_tensor = [tensor for _ in range(self._num_replicas_in_sync)]
result = tpu_ops.all_to_all(
broadcast_tensor,
concat_dimension=0,
split_dimension=0,
split_count=self._num_replicas_in_sync)
# This uses the broadcasted value from the first replica because the only
# caller of this is for ONLY_FIRST_REPLICA variables aggregation.
return result[0]
return tensor
@property
def num_hosts(self):
if self._device_assignment is None:
return self._tpu_metadata.num_hosts
return len(set([self._device_assignment.host_device(r)
for r in range(self._device_assignment.num_replicas)]))
@property
def num_replicas_per_host(self):
if self._device_assignment is None:
return self._tpu_metadata.num_of_cores_per_host
# TODO(sourabhbajaj): Remove this method we use inputs and remove infeed
# as the computation of num_replicas_per_host is not a constant
# when using device_assignment. This is a temporary workaround to support
# StatefulRNN as everything is 1 in that case.
# This method needs to take host_id as input for correct computation.
max_models_per_host = (self._tpu_metadata.num_of_cores_per_host //
self._device_assignment.num_cores_per_replica)
return min(self._device_assignment.num_replicas, max_models_per_host)
@property
def _num_replicas_in_sync(self):
if self._device_assignment is None:
return self._tpu_metadata.num_cores
return self._device_assignment.num_replicas
@property
def experimental_between_graph(self):
return False
@property
def experimental_should_init(self):
return True
@property
def should_checkpoint(self):
return True
@property
def should_save_summary(self):
return True
@property
def worker_devices(self):
return self._tpu_devices
@property
def parameter_devices(self):
return self._tpu_devices
def non_slot_devices(self, var_list):
return self._host_device
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
del colocate_with
with ops.device(self._host_device), distribute_lib.UpdateContext(None):
result = fn(*args, **kwargs)
if group:
return result
else:
return nest.map_structure(self._local_results, result)
def _configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
del cluster_spec, task_type, task_id
if session_config:
session_config.CopyFrom(self._update_config_proto(session_config))
def _update_config_proto(self, config_proto):
updated_config = copy.deepcopy(config_proto)
updated_config.isolate_session_state = True
cluster_spec = self._tpu_cluster_resolver.cluster_spec()
if cluster_spec:
updated_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
return updated_config
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""`make_dataset_iterator` and `make_numpy_iterator` use global batch size.
`make_input_fn_iterator` assumes per-replica batching.
Returns:
Boolean.
"""
return True
def tpu_run(self, fn, args, kwargs):
func = self._tpu_function_creator(fn)
return func(args, kwargs)
def _tpu_function_creator(self, fn):
if fn in self._tpu_function_cache:
return self._tpu_function_cache[fn]
strategy = self._container_strategy()
def tpu_function(args, kwargs):
"""TF Function used to replicate the user computation."""
if kwargs is None:
kwargs = {}
# Remove None at the end of args as they are not replicatable
# If there are None in the middle we can't do anything about it
# so let those cases fail.
# For example when Keras model predict is used they pass the targets as
# None. We want to handle it here so all client libraries don't have to
# do this as other strategies can handle None values better.
while args and args[-1] is None:
args = args[:-1]
# Used to re-structure flattened output tensors from `tpu.replicate()`
# into a structured format.
result = [[]]
def replicated_fn(replica_id, replica_args, replica_kwargs):
"""Wraps user function to provide replica ID and `Tensor` inputs."""
with _TPUReplicaContext(strategy, replica_id_in_sync_group=replica_id):
result[0] = fn(*replica_args, **replica_kwargs)
return result[0]
replicate_inputs = [] # By replica.
for i in range(strategy.num_replicas_in_sync):
replicate_inputs.append(
[constant_op.constant(i, dtype=dtypes.int32),
values.select_replica(i, args),
values.select_replica(i, kwargs)])
# Construct and pass `maximum_shapes` so that we could support dynamic
# shapes using dynamic padder.
if self.experimental_enable_dynamic_batch_size and replicate_inputs:
maximum_shapes = []
flattened_list = nest.flatten(replicate_inputs[0])
for input_tensor in flattened_list:
if tensor_util.is_tensor(input_tensor):
maximum_shape = input_tensor.get_shape()
else:
maximum_shape = tensor_shape.TensorShape(np.shape(input_tensor))
maximum_shapes.append(maximum_shape)
maximum_shapes = nest.pack_sequence_as(replicate_inputs[0],
maximum_shapes)
else:
maximum_shapes = None
with strategy.scope():
replicate_outputs = tpu.replicate(
replicated_fn,
replicate_inputs,
device_assignment=self._device_assignment,
maximum_shapes=maximum_shapes)
# Remove all no ops that may have been added during 'tpu.replicate()'
if isinstance(result[0], list):
result[0] = [
output for output in result[0] if tensor_util.is_tensor(output)
]
# Workaround for `tpu.replicate` behaviour when single `Tensor` returned.
if result[0] is None:
replicate_outputs = [None] * len(replicate_outputs)
else:
replicate_outputs = [
nest.pack_sequence_as(result[0], nest.flatten(replica_output))
for replica_output in replicate_outputs
]
device_map = self._device_map # pylint: disable=protected-access
return values.regroup(device_map, replicate_outputs)
if context.executing_eagerly():
tpu_function = def_function.function(tpu_function)
self._tpu_function_cache[fn] = tpu_function
return tpu_function
def _in_multi_worker_mode(self):
"""Whether this strategy indicates working in multi-worker settings."""
# TPUStrategy has different distributed training structure that the whole
# cluster should be treated as single worker from higher-level (e.g. Keras)
# library's point of view.
# TODO(rchao): Revisit this as we design a fault-tolerance solution for
# TPUStrategy.
return False
class _TPUReplicaContext(distribute_lib.ReplicaContext):
"""Replication Context class for TPU Strategy."""
# TODO(sourabhbajaj): Call for each replica should be updating this.
# TODO(b/118385803): Always properly initialize replica_id.
def __init__(self, strategy, replica_id_in_sync_group=None):
if replica_id_in_sync_group is None:
replica_id_in_sync_group = constant_op.constant(0, dtypes.int32)
distribute_lib.ReplicaContext.__init__(
self, strategy, replica_id_in_sync_group=replica_id_in_sync_group)
@property
def devices(self):
distribute_lib.require_replica_context(self)
ds = self._strategy
replica_id = tensor_util.constant_value(self._replica_id_in_sync_group)
if replica_id is None: # Non-constant `Tensor` inside `tpu.replicate`.
# TODO(cjfj): Return other devices when model parallelism is supported.
return (tpu.core(0),)
else:
return (ds.extended.worker_devices[replica_id],)
def _set_last_step_outputs(ctx, last_step_tensor_outputs):
"""Sets the last step outputs on the given context."""
# Convert replicate_outputs to the original dict structure of
# last_step_outputs.
last_step_tensor_outputs_dict = nest.pack_sequence_as(
ctx.last_step_outputs, last_step_tensor_outputs)
for name, reduce_op in ctx._last_step_outputs_reduce_ops.items(): # pylint: disable=protected-access
output = last_step_tensor_outputs_dict[name]
# For outputs that have already been reduced, take the first value
# from the list as each value should be the same. Else return the full
# list of values.
# TODO(josh11b): If reduce_op is NONE, we should return a PerReplica
# value.
if reduce_op is not None:
# TODO(priyag): Should this return the element or a list with 1 element
last_step_tensor_outputs_dict[name] = output[0]
ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access
| [
"[email protected]"
] | |
9ed940e62e4b3bfdf9750564804b04975687106f | 73c01a3f052f8ef63890ec3c2e28403ad41e9a71 | /td/models/driver.py | cd23468080d6a1f5b364dd8199d0b29945d90f9f | [] | no_license | Jokey90/aho | 4c007c65c819efb726a732a8f36067c5a0226100 | 8bcd41e9ef7d40f07499429f385d4fec590636f6 | refs/heads/master | 2020-03-21T22:28:36.395996 | 2018-06-29T09:25:05 | 2018-06-29T09:25:05 | 139,128,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,159 | py | from django.db import models
class Driver(models.Model):
from main.models import Employee
class Meta:
verbose_name = 'Водитель'
verbose_name_plural = 'Водители'
emp = models.OneToOneField(verbose_name='Сотрудник', to=Employee, blank=False, null=False)
comment = models.CharField(verbose_name='Комментарий', blank=True, null=True, default='', max_length=255)
license = models.CharField(verbose_name='Вод. удостоверение', blank=True, null=True, default='', max_length=100)
license_date = models.DateField(verbose_name='Вод. удостоверение до', blank=False, null=False)
phone = models.CharField(verbose_name='Телефон', blank=True, null=True, default='', max_length=100)
active = models.BooleanField(verbose_name='Активен', blank=False, null=False, default=True)
photo = models.FileField(verbose_name='Скан вод. удостоверения', blank=True, upload_to='scans/licenses/', null=True)
def short_name(self):
return self.emp.short_name()
def __str__(self):
return self.emp.short_name() | [
"[email protected]"
] | |
fffc65720d6f0a1225f7ffb51fb0f9b5c0ebfc98 | 7ac223c9aaa46b2533e08928354f72dd03873e64 | /rentals/migrations/0001_initial.py | 47ae12267ae8e66afa4ec9c23d0eea29018c24b3 | [] | no_license | Kyeza/RentalMangementSystem | 9f6e75ffe634510755dbe78fe74f4ef270b5bef5 | 862490d4be6683e40b81384eb4b7dadad35019cc | refs/heads/master | 2023-04-30T06:20:11.281096 | 2019-05-23T20:40:29 | 2019-05-23T20:40:29 | 188,166,783 | 1 | 0 | null | 2023-04-21T20:31:55 | 2019-05-23T05:24:47 | Python | UTF-8 | Python | false | false | 1,426 | py | # Generated by Django 2.2.1 on 2019-05-23 09:53
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, null=True)),
],
),
migrations.CreateModel(
name='Property',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, null=True)),
('image', models.ImageField(default='img_default.png', upload_to='property_imgs')),
('description', models.TextField(blank=True, null=True)),
('address', models.CharField(blank=True, max_length=150, null=True)),
('price', models.DecimalField(decimal_places=2, max_digits=12, null=True)),
('date_listed', models.DateTimeField(default=django.utils.timezone.now)),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='rentals.Category')),
],
),
]
| [
"[email protected]"
] | |
3df86003db484b4b8a7585a6209c6d608eac969b | a79da24bda658f588fd8e71c7e63f01931c1a694 | /bigapple/venv/lib/python3.7/site-packages/plotly/graph_objs/scatterpolargl/marker/_line.py | 2ddbe27ed182b23cf17085d52fd1c1e056d4fe7e | [] | no_license | replicantdeca/bigapple-insys | 60519b486f13e1a3eb18b5ba637e45deaf8e1d8e | 5e7328fb94362fbb04a71c2e297bffd83443eebc | refs/heads/master | 2020-03-27T12:57:31.894182 | 2019-12-01T11:25:13 | 2019-12-01T11:25:13 | 146,580,916 | 0 | 1 | null | 2018-08-29T10:00:28 | 2018-08-29T10:00:27 | null | UTF-8 | Python | false | false | 19,823 | py | from plotly.basedatatypes import BaseTraceHierarchyType
import copy
class Line(BaseTraceHierarchyType):
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is true, the
default palette will be chosen according to whether numbers in
the `color` array are all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['autocolorscale']
@autocolorscale.setter
def autocolorscale(self, val):
self['autocolorscale'] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.line.color`) or the
bounds set in `marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color`is set to a numerical
array. Defaults to `false` when `marker.line.cmin` and
`marker.line.cmax` are set by the user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['cauto']
@cauto.setter
def cauto(self, val):
self['cauto'] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.line.color`is set to a numerical array. Value should
have the same units as in `marker.line.color` and if set,
`marker.line.cmin` must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self['cmax']
@cmax.setter
def cmax(self, val):
self['cmax'] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.line.color`is set to a numerical array. Value should
have the same units as in `marker.line.color` and if set,
`marker.line.cmax` must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self['cmin']
@cmin.setter
def cmin(self, val):
self['cmin'] = val
# color
# -----
@property
def color(self):
"""
Sets themarker.linecolor. It accepts either a specific color or
an array of numbers that are mapped to the colorscale relative
to the max and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A number that will be interpreted as a color
according to scatterpolargl.marker.line.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array. The colorscale
must be an array containing arrays mapping a normalized value
to an rgb, rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and highest (1) values
are required. For example, `[[0, 'rgb(0,0,255)', [1,
'rgb(255,0,0)']]`. To control the bounds of the colorscale in
color space, use`marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,Earth,Electric,Vi
ridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['Greys', 'YlGnBu', 'Greens', 'YlOrRd', 'Bluered', 'RdBu',
'Reds', 'Blues', 'Picnic', 'Rainbow', 'Portland', 'Jet',
'Hot', 'Blackbody', 'Earth', 'Electric', 'Viridis', 'Cividis']
Returns
-------
str
"""
return self['colorscale']
@colorscale.setter
def colorscale(self, val):
self['colorscale'] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['colorsrc']
@colorsrc.setter
def colorsrc(self, val):
self['colorsrc'] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.line.color`is set to a numerical array. If true,
`marker.line.cmin` will correspond to the last color in the
array and `marker.line.cmax` will correspond to the first
color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['reversescale']
@reversescale.setter
def reversescale(self, val):
self['reversescale'] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the lines bounding the marker points.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self['width']
@width.setter
def width(self, val):
self['width'] = val
# widthsrc
# --------
@property
def widthsrc(self):
"""
Sets the source reference on plot.ly for width .
The 'widthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['widthsrc']
@widthsrc.setter
def widthsrc(self, val):
self['widthsrc'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scatterpolargl.marker'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color`is set to a
numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are set by
the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets themarker.linecolor. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color`is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on plot.ly for width .
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmin=None,
color=None,
colorscale=None,
colorsrc=None,
reversescale=None,
width=None,
widthsrc=None,
**kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly.graph_objs.scatterpolargl.marker.Line
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color`is set to a
numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are set by
the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets themarker.linecolor. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color`is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on plot.ly for width .
Returns
-------
Line
"""
super(Line, self).__init__('line')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterpolargl.marker.Line
constructor must be a dict or
an instance of plotly.graph_objs.scatterpolargl.marker.Line"""
)
# Import validators
# -----------------
from plotly.validators.scatterpolargl.marker import (line as v_line)
# Initialize validators
# ---------------------
self._validators['autocolorscale'] = v_line.AutocolorscaleValidator()
self._validators['cauto'] = v_line.CautoValidator()
self._validators['cmax'] = v_line.CmaxValidator()
self._validators['cmin'] = v_line.CminValidator()
self._validators['color'] = v_line.ColorValidator()
self._validators['colorscale'] = v_line.ColorscaleValidator()
self._validators['colorsrc'] = v_line.ColorsrcValidator()
self._validators['reversescale'] = v_line.ReversescaleValidator()
self._validators['width'] = v_line.WidthValidator()
self._validators['widthsrc'] = v_line.WidthsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('autocolorscale', None)
self.autocolorscale = autocolorscale if autocolorscale is not None else _v
_v = arg.pop('cauto', None)
self.cauto = cauto if cauto is not None else _v
_v = arg.pop('cmax', None)
self.cmax = cmax if cmax is not None else _v
_v = arg.pop('cmin', None)
self.cmin = cmin if cmin is not None else _v
_v = arg.pop('color', None)
self.color = color if color is not None else _v
_v = arg.pop('colorscale', None)
self.colorscale = colorscale if colorscale is not None else _v
_v = arg.pop('colorsrc', None)
self.colorsrc = colorsrc if colorsrc is not None else _v
_v = arg.pop('reversescale', None)
self.reversescale = reversescale if reversescale is not None else _v
_v = arg.pop('width', None)
self.width = width if width is not None else _v
_v = arg.pop('widthsrc', None)
self.widthsrc = widthsrc if widthsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
| [
"[email protected]"
] | |
4ee9463d51a2db77f4e61add7e61f16a97c0a4de | 59166105545cdd87626d15bf42e60a9ee1ef2413 | /dbpedia/models/military_conflict.py | 42ad67a51c35ffbc28524cab6a5bcd8650c639c2 | [] | no_license | mosoriob/dbpedia_api_client | 8c594fc115ce75235315e890d55fbf6bd555fa85 | 8d6f0d04a3a30a82ce0e9277e4c9ce00ecd0c0cc | refs/heads/master | 2022-11-20T01:42:33.481024 | 2020-05-12T23:22:54 | 2020-05-12T23:22:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,602 | py | # coding: utf-8
"""
DBpedia
This is the API of the DBpedia Ontology # noqa: E501
The version of the OpenAPI document: v0.0.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from dbpedia.configuration import Configuration
class MilitaryConflict(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'strength': 'list[str]',
'number_of_people_attending': 'list[int]',
'end_date': 'list[str]',
'opponents': 'list[object]',
'description': 'list[str]',
'caused_by': 'list[object]',
'label': 'list[str]',
'type': 'list[str]',
'casualties': 'list[int]',
'participant': 'list[str]',
'result': 'list[str]',
'duration': 'list[float]',
'previous_event': 'list[object]',
'causalties': 'list[str]',
'is_part_of_military_conflict': 'list[object]',
'next_event': 'list[object]',
'combatant': 'list[str]',
'id': 'str',
'following_event': 'list[object]',
'place': 'list[object]',
'start_date': 'list[str]'
}
attribute_map = {
'strength': 'strength',
'number_of_people_attending': 'numberOfPeopleAttending',
'end_date': 'endDate',
'opponents': 'opponents',
'description': 'description',
'caused_by': 'causedBy',
'label': 'label',
'type': 'type',
'casualties': 'casualties',
'participant': 'participant',
'result': 'result',
'duration': 'duration',
'previous_event': 'previousEvent',
'causalties': 'causalties',
'is_part_of_military_conflict': 'isPartOfMilitaryConflict',
'next_event': 'nextEvent',
'combatant': 'combatant',
'id': 'id',
'following_event': 'followingEvent',
'place': 'place',
'start_date': 'startDate'
}
def __init__(self, strength=None, number_of_people_attending=None, end_date=None, opponents=None, description=None, caused_by=None, label=None, type=None, casualties=None, participant=None, result=None, duration=None, previous_event=None, causalties=None, is_part_of_military_conflict=None, next_event=None, combatant=None, id=None, following_event=None, place=None, start_date=None, local_vars_configuration=None): # noqa: E501
"""MilitaryConflict - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._strength = None
self._number_of_people_attending = None
self._end_date = None
self._opponents = None
self._description = None
self._caused_by = None
self._label = None
self._type = None
self._casualties = None
self._participant = None
self._result = None
self._duration = None
self._previous_event = None
self._causalties = None
self._is_part_of_military_conflict = None
self._next_event = None
self._combatant = None
self._id = None
self._following_event = None
self._place = None
self._start_date = None
self.discriminator = None
self.strength = strength
self.number_of_people_attending = number_of_people_attending
self.end_date = end_date
self.opponents = opponents
self.description = description
self.caused_by = caused_by
self.label = label
self.type = type
self.casualties = casualties
self.participant = participant
self.result = result
self.duration = duration
self.previous_event = previous_event
self.causalties = causalties
self.is_part_of_military_conflict = is_part_of_military_conflict
self.next_event = next_event
self.combatant = combatant
if id is not None:
self.id = id
self.following_event = following_event
self.place = place
self.start_date = start_date
@property
def strength(self):
"""Gets the strength of this MilitaryConflict. # noqa: E501
Description not available # noqa: E501
:return: The strength of this MilitaryConflict. # noqa: E501
:rtype: list[str]
"""
return self._strength
@strength.setter
def strength(self, strength):
"""Sets the strength of this MilitaryConflict.
Description not available # noqa: E501
:param strength: The strength of this MilitaryConflict. # noqa: E501
:type: list[str]
"""
self._strength = strength
@property
def number_of_people_attending(self):
"""Gets the number_of_people_attending of this MilitaryConflict. # noqa: E501
Description not available # noqa: E501
:return: The number_of_people_attending of this MilitaryConflict. # noqa: E501
:rtype: list[int]
"""
return self._number_of_people_attending
@number_of_people_attending.setter
def number_of_people_attending(self, number_of_people_attending):
"""Sets the number_of_people_attending of this MilitaryConflict.
Description not available # noqa: E501
:param number_of_people_attending: The number_of_people_attending of this MilitaryConflict. # noqa: E501
:type: list[int]
"""
self._number_of_people_attending = number_of_people_attending
@property
def end_date(self):
"""Gets the end_date of this MilitaryConflict. # noqa: E501
The end date of the event. # noqa: E501
:return: The end_date of this MilitaryConflict. # noqa: E501
:rtype: list[str]
"""
return self._end_date
@end_date.setter
def end_date(self, end_date):
"""Sets the end_date of this MilitaryConflict.
The end date of the event. # noqa: E501
:param end_date: The end_date of this MilitaryConflict. # noqa: E501
:type: list[str]
"""
self._end_date = end_date
@property
def opponents(self):
"""Gets the opponents of this MilitaryConflict. # noqa: E501
\"opponent in a military conflict, an organisation, country, or group of countries. \" # noqa: E501
:return: The opponents of this MilitaryConflict. # noqa: E501
:rtype: list[object]
"""
return self._opponents
@opponents.setter
def opponents(self, opponents):
"""Sets the opponents of this MilitaryConflict.
\"opponent in a military conflict, an organisation, country, or group of countries. \" # noqa: E501
:param opponents: The opponents of this MilitaryConflict. # noqa: E501
:type: list[object]
"""
self._opponents = opponents
@property
def description(self):
"""Gets the description of this MilitaryConflict. # noqa: E501
small description # noqa: E501
:return: The description of this MilitaryConflict. # noqa: E501
:rtype: list[str]
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this MilitaryConflict.
small description # noqa: E501
:param description: The description of this MilitaryConflict. # noqa: E501
:type: list[str]
"""
self._description = description
@property
def caused_by(self):
"""Gets the caused_by of this MilitaryConflict. # noqa: E501
Description not available # noqa: E501
:return: The caused_by of this MilitaryConflict. # noqa: E501
:rtype: list[object]
"""
return self._caused_by
@caused_by.setter
def caused_by(self, caused_by):
"""Sets the caused_by of this MilitaryConflict.
Description not available # noqa: E501
:param caused_by: The caused_by of this MilitaryConflict. # noqa: E501
:type: list[object]
"""
self._caused_by = caused_by
@property
def label(self):
"""Gets the label of this MilitaryConflict. # noqa: E501
short description of the resource # noqa: E501
:return: The label of this MilitaryConflict. # noqa: E501
:rtype: list[str]
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this MilitaryConflict.
short description of the resource # noqa: E501
:param label: The label of this MilitaryConflict. # noqa: E501
:type: list[str]
"""
self._label = label
@property
def type(self):
"""Gets the type of this MilitaryConflict. # noqa: E501
type of the resource # noqa: E501
:return: The type of this MilitaryConflict. # noqa: E501
:rtype: list[str]
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this MilitaryConflict.
type of the resource # noqa: E501
:param type: The type of this MilitaryConflict. # noqa: E501
:type: list[str]
"""
self._type = type
@property
def casualties(self):
"""Gets the casualties of this MilitaryConflict. # noqa: E501
Description not available # noqa: E501
:return: The casualties of this MilitaryConflict. # noqa: E501
:rtype: list[int]
"""
return self._casualties
@casualties.setter
def casualties(self, casualties):
"""Sets the casualties of this MilitaryConflict.
Description not available # noqa: E501
:param casualties: The casualties of this MilitaryConflict. # noqa: E501
:type: list[int]
"""
self._casualties = casualties
@property
def participant(self):
"""Gets the participant of this MilitaryConflict. # noqa: E501
Description not available # noqa: E501
:return: The participant of this MilitaryConflict. # noqa: E501
:rtype: list[str]
"""
return self._participant
@participant.setter
def participant(self, participant):
"""Sets the participant of this MilitaryConflict.
Description not available # noqa: E501
:param participant: The participant of this MilitaryConflict. # noqa: E501
:type: list[str]
"""
self._participant = participant
@property
def result(self):
"""Gets the result of this MilitaryConflict. # noqa: E501
Description not available # noqa: E501
:return: The result of this MilitaryConflict. # noqa: E501
:rtype: list[str]
"""
return self._result
@result.setter
def result(self, result):
"""Sets the result of this MilitaryConflict.
Description not available # noqa: E501
:param result: The result of this MilitaryConflict. # noqa: E501
:type: list[str]
"""
self._result = result
@property
def duration(self):
"""Gets the duration of this MilitaryConflict. # noqa: E501
The duration of the item (movie, audio recording, event, etc.) in ISO 8601 date format # noqa: E501
:return: The duration of this MilitaryConflict. # noqa: E501
:rtype: list[float]
"""
return self._duration
@duration.setter
def duration(self, duration):
"""Sets the duration of this MilitaryConflict.
The duration of the item (movie, audio recording, event, etc.) in ISO 8601 date format # noqa: E501
:param duration: The duration of this MilitaryConflict. # noqa: E501
:type: list[float]
"""
self._duration = duration
@property
def previous_event(self):
"""Gets the previous_event of this MilitaryConflict. # noqa: E501
Description not available # noqa: E501
:return: The previous_event of this MilitaryConflict. # noqa: E501
:rtype: list[object]
"""
return self._previous_event
@previous_event.setter
def previous_event(self, previous_event):
"""Sets the previous_event of this MilitaryConflict.
Description not available # noqa: E501
:param previous_event: The previous_event of this MilitaryConflict. # noqa: E501
:type: list[object]
"""
self._previous_event = previous_event
@property
def causalties(self):
"""Gets the causalties of this MilitaryConflict. # noqa: E501
Description not available # noqa: E501
:return: The causalties of this MilitaryConflict. # noqa: E501
:rtype: list[str]
"""
return self._causalties
@causalties.setter
def causalties(self, causalties):
"""Sets the causalties of this MilitaryConflict.
Description not available # noqa: E501
:param causalties: The causalties of this MilitaryConflict. # noqa: E501
:type: list[str]
"""
self._causalties = causalties
@property
def is_part_of_military_conflict(self):
"""Gets the is_part_of_military_conflict of this MilitaryConflict. # noqa: E501
Description not available # noqa: E501
:return: The is_part_of_military_conflict of this MilitaryConflict. # noqa: E501
:rtype: list[object]
"""
return self._is_part_of_military_conflict
@is_part_of_military_conflict.setter
def is_part_of_military_conflict(self, is_part_of_military_conflict):
"""Sets the is_part_of_military_conflict of this MilitaryConflict.
Description not available # noqa: E501
:param is_part_of_military_conflict: The is_part_of_military_conflict of this MilitaryConflict. # noqa: E501
:type: list[object]
"""
self._is_part_of_military_conflict = is_part_of_military_conflict
@property
def next_event(self):
"""Gets the next_event of this MilitaryConflict. # noqa: E501
Description not available # noqa: E501
:return: The next_event of this MilitaryConflict. # noqa: E501
:rtype: list[object]
"""
return self._next_event
@next_event.setter
def next_event(self, next_event):
"""Sets the next_event of this MilitaryConflict.
Description not available # noqa: E501
:param next_event: The next_event of this MilitaryConflict. # noqa: E501
:type: list[object]
"""
self._next_event = next_event
@property
def combatant(self):
"""Gets the combatant of this MilitaryConflict. # noqa: E501
Description not available # noqa: E501
:return: The combatant of this MilitaryConflict. # noqa: E501
:rtype: list[str]
"""
return self._combatant
@combatant.setter
def combatant(self, combatant):
"""Sets the combatant of this MilitaryConflict.
Description not available # noqa: E501
:param combatant: The combatant of this MilitaryConflict. # noqa: E501
:type: list[str]
"""
self._combatant = combatant
@property
def id(self):
"""Gets the id of this MilitaryConflict. # noqa: E501
identifier # noqa: E501
:return: The id of this MilitaryConflict. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this MilitaryConflict.
identifier # noqa: E501
:param id: The id of this MilitaryConflict. # noqa: E501
:type: str
"""
self._id = id
@property
def following_event(self):
"""Gets the following_event of this MilitaryConflict. # noqa: E501
Description not available # noqa: E501
:return: The following_event of this MilitaryConflict. # noqa: E501
:rtype: list[object]
"""
return self._following_event
@following_event.setter
def following_event(self, following_event):
"""Sets the following_event of this MilitaryConflict.
Description not available # noqa: E501
:param following_event: The following_event of this MilitaryConflict. # noqa: E501
:type: list[object]
"""
self._following_event = following_event
@property
def place(self):
"""Gets the place of this MilitaryConflict. # noqa: E501
Description not available # noqa: E501
:return: The place of this MilitaryConflict. # noqa: E501
:rtype: list[object]
"""
return self._place
@place.setter
def place(self, place):
"""Sets the place of this MilitaryConflict.
Description not available # noqa: E501
:param place: The place of this MilitaryConflict. # noqa: E501
:type: list[object]
"""
self._place = place
@property
def start_date(self):
"""Gets the start_date of this MilitaryConflict. # noqa: E501
The start date of the event. # noqa: E501
:return: The start_date of this MilitaryConflict. # noqa: E501
:rtype: list[str]
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this MilitaryConflict.
The start date of the event. # noqa: E501
:param start_date: The start_date of this MilitaryConflict. # noqa: E501
:type: list[str]
"""
self._start_date = start_date
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MilitaryConflict):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, MilitaryConflict):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
5ef05dd99cd88c1d6387005f2eb546494a1bd520 | fdc2fbb1b9e75a6ce5adacec29aae7482c999135 | /_api/public_api/services/routes.py | f90cad4b74402610f779b7f832b7fd98866cebad | [
"MIT"
] | permissive | bellyfat/membership_and_affiliate_api | b37411a1244fc7d6bf721b6d36ec87b57845169f | 41fb9f5a0c37c1ac5636122c61e98ddaf9c569ff | refs/heads/master | 2023-07-12T17:07:28.399407 | 2021-08-24T13:04:02 | 2021-08-24T13:04:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,532 | py | """
**Public facing Services API**
"""
__author__ = "mobius-crypt"
__email__ = "[email protected]"
__twitter__ = "@blueitserver"
__github_repo__ = "https://github.com/freelancing-solutions/memberships-and-affiliate-api"
__github_profile__ = "https://github.com/freelancing-solutions/"
from typing import Optional
from flask import Blueprint, request, current_app
from config.exceptions import if_bad_request_raise, UnAuthenticatedError, error_codes
from security.api_authenticator import handle_api_auth
from views.services import ServicesView
services_public_api_bp = Blueprint('services_public_api', __name__)
@services_public_api_bp.route('/api/v1/public/service/<string:org_id>/<string:service_id>', methods=["GET"])
@handle_api_auth
def get_services(org_id: str, service_id: str) -> tuple:
"""
**public_services_api**
returns a service governed by organization_id and service_id
:param org_id:
:param service_id:
:return:
"""
service_view: ServicesView = ServicesView()
return service_view.get_service(service_id=service_id, organization_id=org_id)
@services_public_api_bp.route('/api/v1/public/services/<string:org_id>', methods=["GET"])
@handle_api_auth
def get_all_services(org_id: str) -> tuple:
"""
**public_services_api**
returns a service governed by organization_id and service_id
:param org_id:
:return:
"""
service_view: ServicesView = ServicesView()
return service_view.return_services(organization_id=org_id)
| [
"[email protected]"
] | |
ffcf8c9e3bd602c62149a42dac33d47fc5c7fa0a | 4dd695521343d56ff943e8c1768343d7680714e3 | /experiments/scripts_auto_closedset_braccent/config_iVector_400_fold1.py | 34cd51f887e4c284adc8d7e44663401d7056f6b5 | [] | no_license | natharb/environment | ea659ee541f6473e92b5b30c549e52b66f47b280 | 86e6cee6e01d2370abeb7c55a2c8a15001735919 | refs/heads/master | 2021-09-28T02:39:02.222966 | 2018-11-13T12:03:34 | 2018-11-13T12:03:34 | 139,762,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,317 | py | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#Nathália Alves Rocha Batista ([email protected])
import sys
sys.path.insert(0, '.')
import bob.bio.spear
import bob.bio.gmm
import numpy
import scipy.spatial
temp_directory = './results/closedset_braccent/iVector/400/fold_1/temp/'
result_directory = './results/closedset_braccent/iVector/400/fold_1/results/'
sub_directory = 'subdirectory'
database = 'database_iVector_400_fold1.py'
groups = ['dev']
#groups = ['dev', 'eval']
preprocessor = bob.bio.spear.preprocessor.Energy_2Gauss(max_iterations = 10, convergence_threshold = 0.0005, variance_threshold = 0.0005, win_length_ms = 20., win_shift_ms = 10., smoothing_window = 10)
extractor = bob.bio.spear.extractor.Cepstral(win_length_ms = 25, win_shift_ms = 10, n_filters = 24 , dct_norm = False, f_min = 0, f_max = 4000, delta_win = 2, mel_scale = True, with_energy = True, with_delta = True, with_delta_delta = True, n_ceps = 19, pre_emphasis_coef = 0.97)
algorithm = bob.bio.gmm.algorithm.IVector(subspace_dimension_of_t = 400, tv_training_iterations = 10, update_sigma = True, use_whitening = True, use_lda = False, use_wccn = False, use_plda = False, lda_dim = 50, plda_dim_F = 50, plda_dim_G = 50, plda_training_iterations = 50, number_of_gaussians = 256)
parallel = 40
verbose = 2
| [
"[email protected]"
] | |
da3449f427e0b24db486c59bdd24e486619c9e1f | 117f066c80f3863ebef74463292bca6444f9758a | /data_pulling/tax/example.py | 4fdcefef7f117c22146461f85385ac5b612cdb24 | [] | no_license | cottrell/notebooks | c6de3842cbaeb71457d270cbe6fabc8695a6ee1b | 9eaf3d0500067fccb294d064ab78d7aaa03e8b4d | refs/heads/master | 2023-08-09T22:41:01.996938 | 2023-08-04T22:41:51 | 2023-08-04T22:41:51 | 26,830,272 | 3 | 1 | null | 2023-03-04T03:58:03 | 2014-11-18T21:14:23 | Python | UTF-8 | Python | false | false | 1,854 | py | import pandas as pd
import numpy.random as nr
import numpy as np
class PandasPiecewiseLinear():
# dodgy, thing to do piecewise opt
# this is not useful really, you would need to create some n-d simplex thing ... probably a package that does this
def __init__(self, x, y):
""" no extrap """
self.data = pd.Series(y, index=x)
assert np.diff(self.data.index.values).min() > 0
def _reindexed_data(self, x):
return self.data.reindex(x).interpolate(method='linear')
def __mul__(self, other):
data = self.data * other
return PandasPiecewiseLinear(data.index.values, data.values)
def __add__(self, other):
a = self.data.index.values
b = other.data.index.values
assert a.min() == b.min()
assert a.max() == b.max()
x = np.unique(np.hstack([a, b]))
x.sort()
out = self._reindexed_data(x) + other._reindexed_data(x)
return PandasPiecewiseLinear(out.index.values, out.values)
def __call__(self, x):
return si.interp1d(self.data.index.values, self.data.values)(x)
def __sub__(self, other):
return self.__add__(other * -1)
def __repr__(self):
print('PandasPiecewiseLinear')
return self.data.__repr__()
def argmax(self):
return self.data.idxmax()
# test
# n = 5
# xa = sorted([0] + nr.rand(n).tolist() + [1])
# xb = sorted([0] + nr.rand(n).tolist() + [1])
# a = PandasPiecewiseLinear(xa, list(nr.randn(n + 2)))
# b = PandasPiecewiseLinear(xb, list(nr.randn(n + 2)))
# c = a + b
# c = a - b
# print(c)
import do
F = PandasPiecewiseLinear(do.F.x, do.F.y)
F_ni = PandasPiecewiseLinear(do.F_ni.x, do.F_ni.y)
G = PandasPiecewiseLinear(do.G.x, do.G.y)
F_ttl = F + F_ni
x0 = PandasPiecewiseLinear([0, do._max_x], [1, 1])
x1 = PandasPiecewiseLinear([0, do._max_x], [0, do._max_x])
| [
"[email protected]"
] | |
e75b8889a6c0498c6a7e72f280cba8ce56c72660 | 3249577773cf18e5c09ea36de62477ddb43b662b | /Python/django/user_login/apps/dojo_ninjas/views.py | 6f0f71058c7279e806c4f9481c8dec2b16a697ee | [] | no_license | HollinRoberts/code | 5394abe2a7c42bbbe83d8f64a99c50a52f05792b | 8026522ab169c4174037fdf1b271de60b75d79bf | refs/heads/master | 2021-01-01T16:12:11.674680 | 2017-10-18T21:08:10 | 2017-10-18T21:08:10 | 97,786,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
def index(request):
print 'here'
return render(request,'dojo_ninjas/index.html') | [
"[email protected]"
] | |
41d999a5e04ca98fc1bea1f05e638bc5a92839e2 | b891f38eb12eeafdbcec9deee2320acfaac3a7ad | /0x0A-python-inheritance/100-my_int.py | 641a3669fbc5bd16550b3e3d1c87db16add73a55 | [] | no_license | davixcky/holbertonschool-higher_level_programming | bb112af3e18994a46584ac3e78385e46c3d918f6 | fe4cd0e95ee976b93bd47c85c2bc810049f568fa | refs/heads/master | 2023-01-11T00:41:03.145968 | 2020-09-22T22:55:53 | 2020-09-22T22:55:53 | 259,390,611 | 0 | 5 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | #!/usr/bin/python3
'''Module for advanced'''
class MyInt(int):
'''Rebel class'''
def __eq__(self, other):
'''Override == operator'''
return not (self is not other)
def __ne__(self, other):
'''Override != operator'''
return (self is not other)
| [
"[email protected]"
] | |
cade239b0ece789edff3420e5fd5b30a5452ddee | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/era5_scripts/01_netCDF_extraction/erafive902TG/134-tideGauge.py | 5cfea7896c09776590870458447a906f23f3c666 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,595 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 01 10:00:00 2020
ERA5 netCDF extraction script
@author: Michael Tadesse
"""
import time as tt
import os
import pandas as pd
from d_define_grid import Coordinate, findPixels, findindx
from c_read_netcdf import readnetcdf
from f_era5_subsetV2 import subsetter
def extract_data(delta= 1):
"""
This is the master function that calls subsequent functions
to extract uwnd, vwnd, slp for the specified
tide gauges
delta: distance (in degrees) from the tide gauge
"""
print('Delta = {}'.format(delta), '\n')
#defining the folders for predictors
nc_path = {'slp' : "/lustre/fs0/home/mtadesse/era_five/slp",\
"wnd_u": "/lustre/fs0/home/mtadesse/era_five/wnd_u",\
'wnd_v' : "/lustre/fs0/home/mtadesse/era_five/wnd_v"}
surge_path = "/lustre/fs0/home/mtadesse/obs_surge"
csv_path = "/lustre/fs0/home/mtadesse/erafive_localized"
#cd to the obs_surge dir to get TG information
os.chdir(surge_path)
tg_list = os.listdir()
#################################
#looping through the predictor folders
#################################
for pf in nc_path.keys():
print(pf, '\n')
os.chdir(nc_path[pf])
####################################
#looping through the years of the chosen predictor
####################################
for py in os.listdir():
os.chdir(nc_path[pf]) #back to the predictor folder
print(py, '\n')
#get netcdf components - give predicor name and predictor file
nc_file = readnetcdf(pf, py)
lon, lat, time, pred = nc_file[0], nc_file[1], nc_file[2], \
nc_file[3]
x = 134
y = 135
#looping through individual tide gauges
for t in range(x, y):
#the name of the tide gauge - for saving purposes
# tg = tg_list[t].split('.mat.mat.csv')[0]
tg = tg_list[t]
#extract lon and lat data from surge csv file
print("tide gauge", tg, '\n')
os.chdir(surge_path)
if os.stat(tg).st_size == 0:
print('\n', "This tide gauge has no surge data!", '\n')
continue
surge = pd.read_csv(tg, header = None)
#surge_with_date = add_date(surge)
#define tide gauge coordinate(lon, lat)
tg_cord = Coordinate(float(surge.iloc[1,4]), float(surge.iloc[1,5]))
print(tg_cord)
#find closest grid points and their indices
close_grids = findPixels(tg_cord, delta, lon, lat)
ind_grids = findindx(close_grids, lon, lat)
ind_grids.columns = ['lon', 'lat']
#loop through preds#
#subset predictor on selected grid size
print("subsetting \n")
pred_new = subsetter(pred, ind_grids, time)
#create directories to save pred_new
os.chdir(csv_path)
#tide gauge directory
tg_name = tg.split('.csv')[0]
try:
os.makedirs(tg_name)
os.chdir(tg_name) #cd to it after creating it
except FileExistsError:
#directory already exists
os.chdir(tg_name)
#predictor directory
pred_name = pf
try:
os.makedirs(pred_name)
os.chdir(pred_name) #cd to it after creating it
except FileExistsError:
#directory already exists
os.chdir(pred_name)
#time for saving file
print("saving as csv")
yr_name = py.split('_')[-1]
save_name = '_'.join([tg_name, pred_name, yr_name])\
+ ".csv"
pred_new.to_csv(save_name)
#return to the predictor directory
os.chdir(nc_path[pf])
#run script
extract_data(delta= 1) | [
"[email protected]"
] | |
b31979f988725cf694bfbad19ec793def2e31147 | 88e8e28b58092d5ba051582930c156872b9565a5 | /unews/unews/items.py | ecc7fba0c3ac1e5fd2ade006f035a54de44947d0 | [] | no_license | dorahero/crawlers | b8a4a1c2592e817b365d56a87bee021d29598810 | 88e134fdd2493330622848f931638aabd6c906fe | refs/heads/master | 2023-02-19T07:54:54.945144 | 2021-01-23T09:13:42 | 2021-01-23T09:13:42 | 276,884,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | # Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class UnewsItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
title = scrapy.Field()
text = scrapy.Field()
time = scrapy.Field()
pass
| [
"[email protected]"
] | |
ed00045f461eab3dcc7ea8a25b7d0eabb5c47a29 | 0a1356b97465cc1d5c3f661f61b3b8c51fb05d46 | /android_binding/.buildozer/android/platform/build-armeabi-v7a/build/python-installs/cross_platform_calc/kivy/uix/behaviors/knspace.py | ef65fa5fbf43f9500cd6bed73fd637b4b478df04 | [
"MIT"
] | permissive | Rohan-cod/cross_platform_calc | 00360f971e4da68dd36d6836c9ddbb157f6b77d5 | 5785a5e8150d174019b330c812e7eb012cc4dd79 | refs/heads/master | 2022-12-22T10:29:05.317051 | 2021-06-05T10:52:44 | 2021-06-05T10:52:44 | 237,465,912 | 2 | 1 | MIT | 2022-12-09T05:18:55 | 2020-01-31T16:07:31 | C | UTF-8 | Python | false | false | 20,096 | py | '''
Kivy Namespaces
===============
.. versionadded:: 1.9.1
.. warning::
This code is still experimental, and its API is subject to change in a
future version.
The :class:`KNSpaceBehavior` `mixin <https://en.wikipedia.org/wiki/Mixin>`_
class provides namespace functionality for Kivy objects. It allows kivy objects
to be named and then accessed using namespaces.
:class:`KNSpace` instances are the namespaces that store the named objects
in Kivy :class:`~kivy.properties.ObjectProperty` instances.
In addition, when inheriting from :class:`KNSpaceBehavior`, if the derived
object is named, the name will automatically be added to the associated
namespace and will point to a :attr:`~kivy.uix.widget.proxy_ref` of the
derived object.
Basic examples
--------------
By default, there's only a single namespace: the :attr:`knspace` namespace. The
simplest example is adding a widget to the namespace:
.. code-block:: python
from kivy.uix.behaviors.knspace import knspace
widget = Widget()
knspace.my_widget = widget
This adds a kivy :class:`~kivy.properties.ObjectProperty` with `rebind=True`
and `allownone=True` to the :attr:`knspace` namespace with a property name
`my_widget`. And the property now also points to this widget.
This can be done automatically with:
.. code-block:: python
class MyWidget(KNSpaceBehavior, Widget):
pass
widget = MyWidget(knsname='my_widget')
Or in kv:
.. code-block:: kv
<MyWidget@KNSpaceBehavior+Widget>
MyWidget:
knsname: 'my_widget'
Now, `knspace.my_widget` will point to that widget.
When one creates a second widget with the same name, the namespace will
also change to point to the new widget. E.g.:
.. code-block:: python
widget = MyWidget(knsname='my_widget')
# knspace.my_widget now points to widget
widget2 = MyWidget(knsname='my_widget')
# knspace.my_widget now points to widget2
Setting the namespace
---------------------
One can also create ones own namespace rather than using the default
:attr:`knspace` by directly setting :attr:`KNSpaceBehavior.knspace`:
.. code-block:: python
class MyWidget(KNSpaceBehavior, Widget):
pass
widget = MyWidget(knsname='my_widget')
my_new_namespace = KNSpace()
widget.knspace = my_new_namespace
Initially, `my_widget` is added to the default namespace, but when the widget's
namespace is changed to `my_new_namespace`, the reference to `my_widget` is
moved to that namespace. We could have also of course first set the namespace
to `my_new_namespace` and then have named the widget `my_widget`, thereby
avoiding the initial assignment to the default namespace.
Similarly, in kv:
.. code-block:: kv
<MyWidget@KNSpaceBehavior+Widget>
MyWidget:
knspace: KNSpace()
knsname: 'my_widget'
Inheriting the namespace
------------------------
In the previous example, we directly set the namespace we wished to use.
In the following example, we inherit it from the parent, so we only have to set
it once:
.. code-block:: kv
<MyWidget@KNSpaceBehavior+Widget>
<MyLabel@KNSpaceBehavior+Label>
<MyComplexWidget@MyWidget>:
knsname: 'my_complex'
MyLabel:
knsname: 'label1'
MyLabel:
knsname: 'label2'
Then, we do:
.. code-block:: python
widget = MyComplexWidget()
new_knspace = KNSpace()
widget.knspace = new_knspace
The rule is that if no knspace has been assigned to a widget, it looks for a
namespace in its parent and parent's parent and so on until it find one to
use. If none are found, it uses the default :attr:`knspace`.
When `MyComplexWidget` is created, it still used the default namespace.
However, when we assigned the root widget its new namespace, all its
children switched to using that new namespace as well. So `new_knspace` now
contains `label1` and `label2` as well as `my_complex`.
If we had first done:
.. code-block:: python
widget = MyComplexWidget()
new_knspace = KNSpace()
knspace.label1.knspace = knspace
widget.knspace = new_knspace
Then `label1` would remain stored in the default :attr:`knspace` since it was
directly set, but `label2` and `my_complex` would still be added to the new
namespace.
One can customize the attribute used to search the parent tree by changing
:attr:`KNSpaceBehavior.knspace_key`. If the desired knspace is not reachable
through a widgets parent tree, e.g. in a popup that is not a widget's child,
:attr:`KNSpaceBehavior.knspace_key` can be used to establish a different
search order.
Accessing the namespace
-----------------------
As seen in the previous example, if not directly assigned, the namespace is
found by searching the parent tree. Consequently, if a namespace was assigned
further up the parent tree, all its children and below could access that
namespace through their :attr:`KNSpaceBehavior.knspace` property.
This allows the creation of multiple widgets with identically given names
if each root widget instance is assigned a new namespace. For example:
.. code-block:: kv
<MyComplexWidget@KNSpaceBehavior+Widget>:
Label:
text: root.knspace.pretty.text if root.knspace.pretty else ''
<MyPrettyWidget@KNSpaceBehavior+TextInput>:
knsname: 'pretty'
text: 'Hello'
<MyCompositeWidget@KNSpaceBehavior+BoxLayout>:
MyComplexWidget
MyPrettyWidget
Now, when we do:
.. code-block:: python
knspace1, knspace2 = KNSpace(), KNSpace()
composite1 = MyCompositeWidget()
composite1.knspace = knspace1
composite2 = MyCompositeWidget()
composite2.knspace = knspace2
knspace1.pretty = "Here's the ladder, now fix the roof!"
knspace2.pretty = "Get that raccoon off me!"
Because each of the `MyCompositeWidget` instances have a different namespace
their children also use different namespaces. Consequently, the
pretty and complex widgets of each instance will have different text.
Further, because both the namespace :class:`~kivy.properties.ObjectProperty`
references, and :attr:`KNSpaceBehavior.knspace` have `rebind=True`, the
text of the `MyComplexWidget` label is rebound to match the text of
`MyPrettyWidget` when either the root's namespace changes or when the
`root.knspace.pretty` property changes, as expected.
Forking a namespace
-------------------
Forking a namespace provides the opportunity to create a new namespace
from a parent namespace so that the forked namespace will contain everything
in the origin namespace, but the origin namespace will not have access to
anything added to the forked namespace.
For example:
.. code-block:: python
child = knspace.fork()
grandchild = child.fork()
child.label = Label()
grandchild.button = Button()
Now label is accessible by both child and grandchild, but not by knspace. And
button is only accessible by the grandchild but not by the child or by knspace.
Finally, doing `grandchild.label = Label()` will leave `grandchild.label`
and `child.label` pointing to different labels.
A motivating example is the example from above:
.. code-block:: kv
<MyComplexWidget@KNSpaceBehavior+Widget>:
Label:
text: root.knspace.pretty.text if root.knspace.pretty else ''
<MyPrettyWidget@KNSpaceBehavior+TextInput>:
knsname: 'pretty'
text: 'Hello'
<MyCompositeWidget@KNSpaceBehavior+BoxLayout>:
knspace: 'fork'
MyComplexWidget
MyPrettyWidget
Notice the addition of `knspace: 'fork'`. This is identical to doing
`knspace: self.knspace.fork()`. However, doing that would lead to infinite
recursion as that kv rule would be executed recursively because `self.knspace`
will keep on changing. However, allowing `knspace: 'fork'` cirumvents that.
See :attr:`KNSpaceBehavior.knspace`.
Now, having forked, we just need to do:
.. code-block:: python
composite1 = MyCompositeWidget()
composite2 = MyCompositeWidget()
composite1.knspace.pretty = "Here's the ladder, now fix the roof!"
composite2.knspace.pretty = "Get that raccoon off me!"
Since by forking we automatically created a unique namespace for each
`MyCompositeWidget` instance.
'''
__all__ = ('KNSpace', 'KNSpaceBehavior', 'knspace')
from kivy.event import EventDispatcher
from kivy.properties import StringProperty, ObjectProperty, AliasProperty
knspace = None
'''The default :class:`KNSpace` namespace. See :attr:`KNSpaceBehavior.knspace`
for more details.
'''
class KNSpace(EventDispatcher):
'''Each :class:`KNSpace` instance is a namespace that stores the named Kivy
objects associated with this namespace. Each named object is
stored as the value of a Kivy :class:`~kivy.properties.ObjectProperty` of
this instance whose property name is the object's given name. Both `rebind`
and `allownone` are set to `True` for the property.
See :attr:`KNSpaceBehavior.knspace` for details on how a namespace is
associated with a named object.
When storing an object in the namespace, the object's `proxy_ref` is
stored if the object has such an attribute.
:Parameters:
`parent`: (internal) A :class:`KNSpace` instance or None.
If specified, it's a parent namespace, in which case, the current
namespace will have in its namespace all its named objects
as well as the named objects of its parent and parent's parent
etc. See :meth:`fork` for more details.
'''
parent = None
'''(internal) The parent namespace instance, :class:`KNSpace`, or None. See
:meth:`fork`.
'''
__has_applied = None
keep_ref = False
'''Whether a direct reference should be kept to the stored objects.
If ``True``, we use the direct object, otherwise we use
:attr:`~kivy.uix.widget.proxy_ref` when present.
Defaults to False.
'''
def __init__(self, parent=None, keep_ref=False, **kwargs):
self.keep_ref = keep_ref
super(KNSpace, self).__init__(**kwargs)
self.parent = parent
self.__has_applied = set(self.properties().keys())
def __setattr__(self, name, value):
prop = super(KNSpace, self).property(name, quiet=True)
has_applied = self.__has_applied
if prop is None:
if hasattr(self, name):
super(KNSpace, self).__setattr__(name, value)
else:
self.apply_property(
**{name:
ObjectProperty(None, rebind=True, allownone=True)}
)
if not self.keep_ref:
value = getattr(value, 'proxy_ref', value)
has_applied.add(name)
super(KNSpace, self).__setattr__(name, value)
elif name not in has_applied:
self.apply_property(**{name: prop})
has_applied.add(name)
if not self.keep_ref:
value = getattr(value, 'proxy_ref', value)
super(KNSpace, self).__setattr__(name, value)
else:
if not self.keep_ref:
value = getattr(value, 'proxy_ref', value)
super(KNSpace, self).__setattr__(name, value)
def __getattribute__(self, name):
if name in super(KNSpace, self).__getattribute__('__dict__'):
return super(KNSpace, self).__getattribute__(name)
try:
value = super(KNSpace, self).__getattribute__(name)
except AttributeError:
parent = super(KNSpace, self).__getattribute__('parent')
if parent is None:
raise AttributeError(name)
return getattr(parent, name)
if value is not None:
return value
parent = super(KNSpace, self).__getattribute__('parent')
if parent is None:
return None
try:
return getattr(parent, name) # if parent doesn't have it
except AttributeError:
return None
def property(self, name, quiet=False):
# needs to overwrite EventDispatcher.property so kv lang will work
prop = super(KNSpace, self).property(name, quiet=True)
if prop is not None:
return prop
prop = ObjectProperty(None, rebind=True, allownone=True)
self.apply_property(**{name: prop})
self.__has_applied.add(name)
return prop
def fork(self):
'''Returns a new :class:`KNSpace` instance which will have access to
all the named objects in the current namespace but will also have a
namespace of its own that is unique to it.
For example:
.. code-block:: python
forked_knspace1 = knspace.fork()
forked_knspace2 = knspace.fork()
Now, any names added to `knspace` will be accessible by the
`forked_knspace1` and `forked_knspace2` namespaces by the normal means.
However, any names added to `forked_knspace1` will not be accessible
from `knspace` or `forked_knspace2`. Similar for `forked_knspace2`.
'''
return KNSpace(parent=self)
class KNSpaceBehavior(object):
'''Inheriting from this class allows naming of the inherited objects, which
are then added to the associated namespace :attr:`knspace` and accessible
through it.
Please see the :mod:`knspace behaviors module <kivy.uix.behaviors.knspace>`
documentation for more information.
'''
_knspace = ObjectProperty(None, allownone=True)
_knsname = StringProperty('')
__last_knspace = None
__callbacks = None
def __init__(self, knspace=None, **kwargs):
self.knspace = knspace
super(KNSpaceBehavior, self).__init__(**kwargs)
def __knspace_clear_callbacks(self, *largs):
for obj, name, uid in self.__callbacks:
obj.unbind_uid(name, uid)
last = self.__last_knspace
self.__last_knspace = self.__callbacks = None
assert self._knspace is None
assert last
new = self.__set_parent_knspace()
if new is last:
return
self.property('_knspace').dispatch(self)
name = self.knsname
if not name:
return
if getattr(last, name) == self:
setattr(last, name, None)
if new:
setattr(new, name, self)
else:
raise ValueError('Object has name "{}", but no namespace'.
format(name))
def __set_parent_knspace(self):
callbacks = self.__callbacks = []
fbind = self.fbind
append = callbacks.append
parent_key = self.knspace_key
clear = self.__knspace_clear_callbacks
append((self, 'knspace_key', fbind('knspace_key', clear)))
if not parent_key:
self.__last_knspace = knspace
return knspace
append((self, parent_key, fbind(parent_key, clear)))
parent = getattr(self, parent_key, None)
while parent is not None:
fbind = parent.fbind
parent_knspace = getattr(parent, 'knspace', 0)
if parent_knspace is not 0:
append((parent, 'knspace', fbind('knspace', clear)))
self.__last_knspace = parent_knspace
return parent_knspace
append((parent, parent_key, fbind(parent_key, clear)))
new_parent = getattr(parent, parent_key, None)
if new_parent is parent:
break
parent = new_parent
self.__last_knspace = knspace
return knspace
def _get_knspace(self):
_knspace = self._knspace
if _knspace is not None:
return _knspace
if self.__callbacks is not None:
return self.__last_knspace
# we only get here if we never accessed our knspace
return self.__set_parent_knspace()
def _set_knspace(self, value):
if value is self._knspace:
return
knspace = self._knspace or self.__last_knspace
name = self.knsname
if name and knspace and getattr(knspace, name) == self:
setattr(knspace, name, None) # reset old namespace
if value == 'fork':
if not knspace:
knspace = self.knspace # get parents in case we haven't before
if knspace:
value = knspace.fork()
else:
raise ValueError('Cannot fork with no namespace')
for obj, prop_name, uid in self.__callbacks or []:
obj.unbind_uid(prop_name, uid)
self.__last_knspace = self.__callbacks = None
if name:
if value is None: # if None, first update the recursive knspace
knspace = self.__set_parent_knspace()
if knspace:
setattr(knspace, name, self)
self._knspace = None # cause a kv trigger
else:
setattr(value, name, self)
knspace = self._knspace = value
if not knspace:
raise ValueError('Object has name "{}", but no namespace'.
format(name))
else:
if value is None:
self.__set_parent_knspace() # update before trigger below
self._knspace = value
knspace = AliasProperty(
_get_knspace, _set_knspace, bind=('_knspace', ), cache=False,
rebind=True, allownone=True)
'''The namespace instance, :class:`KNSpace`, associated with this widget.
The :attr:`knspace` namespace stores this widget when naming this widget
with :attr:`knsname`.
If the namespace has been set with a :class:`KNSpace` instance, e.g. with
`self.knspace = KNSpace()`, then that instance is returned (setting with
`None` doesn't count). Otherwise, if :attr:`knspace_key` is not None, we
look for a namespace to use in the object that is stored in the property
named :attr:`knspace_key`, of this instance. I.e.
`object = getattr(self, self.knspace_key)`.
If that object has a knspace property, then we return its value. Otherwise,
we go further up, e.g. with `getattr(object, self.knspace_key)` and look
for its `knspace` property.
Finally, if we reach a value of `None`, or :attr:`knspace_key` was `None`,
the default :attr:`~kivy.uix.behaviors.knspace.knspace` namespace is
returned.
If :attr:`knspace` is set to the string `'fork'`, the current namespace
in :attr:`knspace` will be forked with :meth:`KNSpace.fork` and the
resulting namespace will be assigned to this instance's :attr:`knspace`.
See the module examples for a motivating example.
Both `rebind` and `allownone` are `True`.
'''
knspace_key = StringProperty('parent', allownone=True)
'''The name of the property of this instance, to use to search upwards for
a namespace to use by this instance. Defaults to `'parent'` so that we'll
search the parent tree. See :attr:`knspace`.
When `None`, we won't search the parent tree for the namespace.
`allownone` is `True`.
'''
def _get_knsname(self):
return self._knsname
def _set_knsname(self, value):
old_name = self._knsname
knspace = self.knspace
if old_name and knspace and getattr(knspace, old_name) == self:
setattr(knspace, old_name, None)
self._knsname = value
if value:
if knspace:
setattr(knspace, value, self)
else:
raise ValueError('Object has name "{}", but no namespace'.
format(value))
knsname = AliasProperty(
_get_knsname, _set_knsname, bind=('_knsname', ), cache=False)
'''The name given to this instance. If named, the name will be added to the
associated :attr:`knspace` namespace, which will then point to the
`proxy_ref` of this instance.
When named, one can access this object by e.g. self.knspace.name, where
`name` is the given name of this instance. See :attr:`knspace` and the
module description for more details.
'''
knspace = KNSpace()
| [
"[email protected]"
] | |
f821168daacde5daead66cadb6450cd2019b90f6 | c6e7e1a32ca22c7e8fcb7f8c62ca5478e1e34344 | /venv/bin/pip | 1040e724235617c89eba1fc9b902082e7ca57c4b | [] | no_license | swordwielder/RESTapi | 13b2dca65a2f49f5a2fbd85309a698019c5ebc87 | 5541bad50608e848ee5c2ff05687d6fa7392f90a | refs/heads/master | 2020-07-23T18:53:30.376658 | 2020-03-06T18:31:29 | 2020-03-06T18:31:29 | 207,674,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | #!/home/sword/PycharmProjects/pyworkspace/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"[email protected]"
] | ||
23c71b0d50b91315b570e7451f6325c9285a753a | 021dcf39f7cfb303ff427d7344026004f9d4cfdd | /bookit/geo/models/area.py | a95118209730353550ebf7ebe4ccac7d64d64fed | [
"MIT"
] | permissive | kamranhossain/bookit | dfaca266b93e0ee8a50e88a2a7702a6f5ece35f1 | 4189a0ed620d7a595de2c113bb3a2d435d66d5f0 | refs/heads/master | 2021-05-11T23:36:00.630917 | 2017-08-16T20:30:33 | 2017-08-16T20:30:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | from django.db import models
class Area(models.Model):
name = models.CharField(
max_length=140
)
def __str__(self):
return self.name
| [
"[email protected]"
] | |
d29d5c2be6de0c5ffb14a35773221396e731327d | 627cca9406c31ce30c493ff7502f79eb4c57eee3 | /xcha/wallet/wallet_user_store.py | 599b6716d9fb71d4e7865a65ef4cc60d9f06933f | [
"Apache-2.0"
] | permissive | blockchiansea/xcha-blockchain | 40c6d36813f671e94316a522904238f495f39f6b | 7de0ba89056236e30069aef12fe25843f6093bcf | refs/heads/master | 2023-07-26T02:36:57.654196 | 2021-09-06T06:04:21 | 2021-09-06T06:04:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,788 | py | from typing import List, Optional
import aiosqlite
from xcha.util.db_wrapper import DBWrapper
from xcha.util.ints import uint32
from xcha.wallet.util.wallet_types import WalletType
from xcha.wallet.wallet_info import WalletInfo
class WalletUserStore:
"""
WalletUserStore keeps track of all user created wallets and necessary smart-contract data
"""
db_connection: aiosqlite.Connection
cache_size: uint32
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper):
self = cls()
self.db_wrapper = db_wrapper
self.db_connection = db_wrapper.db
await self.db_connection.execute("pragma journal_mode=wal")
await self.db_connection.execute("pragma synchronous=2")
await self.db_connection.execute(
(
"CREATE TABLE IF NOT EXISTS users_wallets("
"id INTEGER PRIMARY KEY AUTOINCREMENT,"
" name text,"
" wallet_type int,"
" data text)"
)
)
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS name on users_wallets(name)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS type on users_wallets(wallet_type)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS data on users_wallets(data)")
await self.db_connection.commit()
await self.init_wallet()
return self
async def init_wallet(self):
all_wallets = await self.get_all_wallet_info_entries()
if len(all_wallets) == 0:
await self.create_wallet("Chia Wallet", WalletType.STANDARD_WALLET, "")
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM users_wallets")
await cursor.close()
await self.db_connection.commit()
async def create_wallet(
self, name: str, wallet_type: int, data: str, id: Optional[int] = None, in_transaction=False
) -> Optional[WalletInfo]:
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT INTO users_wallets VALUES(?, ?, ?, ?)",
(id, name, wallet_type, data),
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
return await self.get_last_wallet()
async def delete_wallet(self, id: int, in_transaction: bool):
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(f"DELETE FROM users_wallets where id={id}")
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def update_wallet(self, wallet_info: WalletInfo, in_transaction):
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT or REPLACE INTO users_wallets VALUES(?, ?, ?, ?)",
(
wallet_info.id,
wallet_info.name,
wallet_info.type,
wallet_info.data,
),
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def get_last_wallet(self) -> Optional[WalletInfo]:
cursor = await self.db_connection.execute("SELECT MAX(id) FROM users_wallets;")
row = await cursor.fetchone()
await cursor.close()
if row is None:
return None
return await self.get_wallet_by_id(row[0])
async def get_all_wallet_info_entries(self) -> List[WalletInfo]:
"""
Return a set containing all wallets
"""
cursor = await self.db_connection.execute("SELECT * from users_wallets")
rows = await cursor.fetchall()
await cursor.close()
result = []
for row in rows:
result.append(WalletInfo(row[0], row[1], row[2], row[3]))
return result
async def get_wallet_by_id(self, id: int) -> Optional[WalletInfo]:
"""
Return a wallet by id
"""
cursor = await self.db_connection.execute("SELECT * from users_wallets WHERE id=?", (id,))
row = await cursor.fetchone()
await cursor.close()
if row is None:
return None
return WalletInfo(row[0], row[1], row[2], row[3])
| [
"[email protected]"
] | |
5dc79f13f7a92a35dd46e4a129b04ec494cea9fc | ad13583673551857615498b9605d9dcab63bb2c3 | /output/models/nist_data/atomic/id/schema_instance/nistschema_sv_iv_atomic_id_max_length_4_xsd/__init__.py | 2ea09c36077f1939d69de901a715d7c02f4faea0 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 263 | py | from output.models.nist_data.atomic.id.schema_instance.nistschema_sv_iv_atomic_id_max_length_4_xsd.nistschema_sv_iv_atomic_id_max_length_4 import (
NistschemaSvIvAtomicIdMaxLength4,
Out,
)
__all__ = [
"NistschemaSvIvAtomicIdMaxLength4",
"Out",
]
| [
"[email protected]"
] | |
ca228e5593dea749fc2dba4b27d14b41224d62c8 | 8f64bee79b8051a052b1ab601da3d3e09418ec78 | /demo.py | e286eed396875818f4ca53d0a1b86d14c75ee7c2 | [] | no_license | sirodoht/xml-diff | f6f14df641b45c249cdacf2f27209c3c202c02a2 | a67f43030548ba8b91a55631bd69335618a5b92d | refs/heads/master | 2020-04-10T12:56:17.952458 | 2018-03-08T00:27:11 | 2018-03-08T00:27:11 | 124,275,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | from diff import files_diff
files_diff('datafiles/left_demo.xml', 'datafiles/right_demo.xml', True)
| [
"[email protected]"
] | |
ace1317e4d350f5245822cf11849f62bbcfe6a10 | ae9f0a71576cf2d9f46f684ae412c741261c9ded | /tests/plugins/shortcircuit.py | 8403b68ec141b5afb308aa44d94754063b583f46 | [
"MIT"
] | permissive | sugarchain-project/lightning | 95adf492b2f5d3b4c20170b17a7e556b9b3abbd1 | c73e21e099ca2f5ed02afae316f477e8a1fd1280 | refs/heads/master | 2020-08-05T17:02:38.936348 | 2019-10-04T08:56:23 | 2019-10-04T08:56:23 | 212,624,853 | 2 | 2 | NOASSERTION | 2019-10-03T16:20:19 | 2019-10-03T16:20:18 | null | UTF-8 | Python | false | false | 231 | py | #!/usr/bin/env python3
from lightning import Plugin
plugin = Plugin()
@plugin.hook("htlc_accepted")
def on_htlc_accepted(onion, htlc, plugin, **kwargs):
return {"result": "resolve", "payment_key": "00" * 32}
plugin.run()
| [
"[email protected]"
] | |
d8f90ee6ddf75cf52f82aca971d5e66cc8a69bad | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/T/tlevine/landbank_branches.py | e7046f1be1fbea8d12176b46f65cc7ee7138faf7 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,898 | py | from scraperwiki.sqlite import save
from scraperwiki import swimport
keyify=swimport('keyify').keyify
from lxml.html import fromstring
from urllib2 import urlopen
URL='http://www.landbank.co.za/contact/branches.php'
from time import time
strip_address = swimport('strip_address').strip_address
DATE=time()
def main():
blocks=get_blocks()
blockId=0
for block in blocks:
blockId+=1
block_info=block.data()
block_info['blockId']=blockId
block_info['date_scraped']=DATE
save([],block_info,'blocks')
for branch in block.branches():
branch_info=branch.data()
branch_info['blockId']=blockId
branch_info['date_scraped']=DATE
save([],branch_info,'branches')
def get_blocks():
x=fromstring(urlopen(URL).read())
blocks=x.xpath('//div[div[@class="float_info_left"]]')
return [Block(block) for block in blocks]
class Block:
def __init__(self,block):
self.block=block
def __str__(self):
title,person=self.header()
return title
def header(self):
title,person=self.block.xpath('preceding-sibling::strong[position()<=2]/text()')
return title,person
def region(self):
return self.block.xpath('preceding-sibling::div[div[@class="darker"]]/div/h3/text()')[-1]
def branch_names(self):
return self.block.xpath('descendant::strong/text()')
def data(self):
title,person=self.header()
return {
"blockName":title
, "blockPerson":person
, "region":self.region()
}
def branches(self):
b=[]
for branch_name in self.branch_names():
nodes=self.block.xpath('descendant::p[strong/text()="%s"]'%branch_name)
assert len(nodes)==1
b.append(Branch(nodes[0]))
return b
class Branch:
def __init__(self,p):
self.p=p
def __str__(self):
return self.name()
def name(self):
nodes=self.p.xpath('strong/text()')
assert 1==len(nodes)
return nodes[0]
def address(self):
return '\n'.join(self.p.xpath('text()'))
def phonecount(self):
return len(self.b_text())
def address_sans_phone(self):
return '\n'.join(self.p.xpath('text()')[0:-self.phonecount()])
def postcode(self):
return self.p.xpath('text()')[-self.phonecount()-1]
def town(self):
return self.p.xpath('text()')[-self.phonecount()-2]
def street_address(self):
return '\n'.join(self.p.xpath('text()')[0:-self.phonecount()-2])
def b_text(self):
return self.p.xpath('b/text()')
def phones(self):
numbers=self.p.xpath('text()')[-self.phonecount():]
return zip(self.b_text(),numbers)
def data(self):
d=dict([ (keyify(phone[0]),phone[1]) for phone in self.phones() ])
d.update({
"branchName":self.name()
, "address_raw":self.address()
, "town":strip_address(self.town())
, "address":strip_address(self.address_sans_phone())
, "street-address":strip_address(self.street_address())
, "postcode":strip_address(self.postcode())
})
return d
main()from scraperwiki.sqlite import save
from scraperwiki import swimport
keyify=swimport('keyify').keyify
from lxml.html import fromstring
from urllib2 import urlopen
URL='http://www.landbank.co.za/contact/branches.php'
from time import time
strip_address = swimport('strip_address').strip_address
DATE=time()
def main():
blocks=get_blocks()
blockId=0
for block in blocks:
blockId+=1
block_info=block.data()
block_info['blockId']=blockId
block_info['date_scraped']=DATE
save([],block_info,'blocks')
for branch in block.branches():
branch_info=branch.data()
branch_info['blockId']=blockId
branch_info['date_scraped']=DATE
save([],branch_info,'branches')
def get_blocks():
x=fromstring(urlopen(URL).read())
blocks=x.xpath('//div[div[@class="float_info_left"]]')
return [Block(block) for block in blocks]
class Block:
def __init__(self,block):
self.block=block
def __str__(self):
title,person=self.header()
return title
def header(self):
title,person=self.block.xpath('preceding-sibling::strong[position()<=2]/text()')
return title,person
def region(self):
return self.block.xpath('preceding-sibling::div[div[@class="darker"]]/div/h3/text()')[-1]
def branch_names(self):
return self.block.xpath('descendant::strong/text()')
def data(self):
title,person=self.header()
return {
"blockName":title
, "blockPerson":person
, "region":self.region()
}
def branches(self):
b=[]
for branch_name in self.branch_names():
nodes=self.block.xpath('descendant::p[strong/text()="%s"]'%branch_name)
assert len(nodes)==1
b.append(Branch(nodes[0]))
return b
class Branch:
def __init__(self,p):
self.p=p
def __str__(self):
return self.name()
def name(self):
nodes=self.p.xpath('strong/text()')
assert 1==len(nodes)
return nodes[0]
def address(self):
return '\n'.join(self.p.xpath('text()'))
def phonecount(self):
return len(self.b_text())
def address_sans_phone(self):
return '\n'.join(self.p.xpath('text()')[0:-self.phonecount()])
def postcode(self):
return self.p.xpath('text()')[-self.phonecount()-1]
def town(self):
return self.p.xpath('text()')[-self.phonecount()-2]
def street_address(self):
return '\n'.join(self.p.xpath('text()')[0:-self.phonecount()-2])
def b_text(self):
return self.p.xpath('b/text()')
def phones(self):
numbers=self.p.xpath('text()')[-self.phonecount():]
return zip(self.b_text(),numbers)
def data(self):
d=dict([ (keyify(phone[0]),phone[1]) for phone in self.phones() ])
d.update({
"branchName":self.name()
, "address_raw":self.address()
, "town":strip_address(self.town())
, "address":strip_address(self.address_sans_phone())
, "street-address":strip_address(self.street_address())
, "postcode":strip_address(self.postcode())
})
return d
main() | [
"[email protected]"
] | |
04e72e6c7258fd53dbcd5d95b249789f8fd864f3 | c5b738612c1ecbce0583a327032db1e6c339de0b | /bv-av互转工具/bv_av.py | f9638358b8b57e57698495513ae8afc32d628271 | [
"BSD-2-Clause"
] | permissive | helloworldSB/Gear-s-toolBox-archive | c766d27e45a9eb9a99dfa360ac68a0ac1c93d8c7 | 40bc05cbb0060ecc3d9f941276c9caccbe1b8d82 | refs/heads/master | 2023-01-01T00:38:31.211587 | 2020-10-16T12:43:25 | 2020-10-16T12:43:25 | 306,789,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,831 | py | class av_bv_cls:
def __init__(self):
self.table='fZodR9XQDSUm21yCkr6zBqiveYah8bt4xsWpHnJE7jL5VG3guMTKNPAwcF'
self.tr={}
for i in range(58):
self.tr[self.table[i]]=i
self.s=[11,10,3,8,4,6]
self.xor=177451812
self.add=8728348608
def dec(self,x):
r=0
for i in range(6):
r+=self.tr[x[self.s[i]]]*58**i
return (r-self.add)^self.xor
def enc(self,x):
x=(x^self.xor)+self.add
r=list('BV1 4 1 7 ')
for i in range(6):
r[self.s[i]]=self.table[x//58**i%58]
return ''.join(r)
from urllib import request,parse
from json import loads#,dumps
while True:
ctrl_num = input(r'''
您希望采用哪种API?(输入数字)
1.官方API 2.离线算法
按 CTRL+C 结束
''')
if ctrl_num == '1' or ctrl_num == '2':
video_num = input('''
请输入视频号
记得带上av或BV前缀
''')
if video_num[:2] == 'av' or video_num[:2] == 'AV':flag = False
elif video_num[:2] == 'BV' or video_num[:2] == 'bv':flag = True
if ctrl_num == '1':
URL = r'http://api.bilibili.com/x/web-interface/archive/stat?'
if flag:URL += r'bvid='
else:URL += r'aid='
URL += video_num[2:]
req = request.Request(URL)
req.add_header("User-Agent","Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36(KHTML, like Gecko) Chrome/50.0.2661.87 Safari/537.36")
with request.urlopen(req) as f:
temp1 = f.read()
temp1 = loads(temp1)
if temp1['code'] != 0:
print('转换错误!错误代码:',temp1['code'],'错误值:',temp1['message'])
continue
print('\n转换成功!')
temp2 = temp1['data']
print('av号:',temp2['aid'])
print('bv号:',temp2['bvid'])
print('投币数:',temp2['coin'])
print('共被浏览过',temp2['view'],'次')
print('共有',temp2['reply'],'个评论')
print('点赞数:',temp2['like'])
print('收藏数:',temp2['favorite'])
print('分享数:',temp2['share'])
print(r'授权方式(1代表原创,2代表搬运):',temp2['copyright'])
print(r'历史排名:',temp2['his_rank'])
print(r'白嫖数:',temp2['view'] - temp2['coin'] - temp2['like'] - temp2['favorite'])
print('\n',r'注:白嫖数=观看数-投币人数-点赞人数-收藏人数')
elif ctrl_num == '2':
if flag:print('av号:av',av_bv_cls().dec(video_num),sep='')
else:print('bv号:',av_bv_cls().enc(int(video_num[2:])))
| [
"[email protected]"
] | |
2f73122c5e78d3cf60b87788fd8a8f65b26e7611 | 9625c5665611a5a1e92fa8fbe230ede1154d5a49 | /apps/messenger/samples/responses.py | 78adf0c0357fc80fb5579ac9caeff04340542dc2 | [] | no_license | Alfredynho/Sistema-Venta-de-Motos | 94a6ffcc45409faaea44f89389f89b6b1bfe0905 | 136b6d7c7cbcf4b5432212ae588d47a27fdcb348 | refs/heads/master | 2021-05-15T00:46:55.811827 | 2017-09-10T17:58:58 | 2017-09-10T17:58:58 | 103,049,391 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 23,392 | py | from django.conf import settings
from apps.messenger.components.attachments import TemplateAttachment, ImageAttachment, VideoAttachment
from apps.messenger.components.buttons import PostbackButton, URLButton
from apps.messenger.components.elements import ListElement, Element
from apps.messenger.components.messages import Message
from apps.messenger.components.replies import QuickReplies, TextReply, LocationReply
from apps.messenger.components.requests import MessageRequest
from apps.messenger.components.templates import ButtonTemplate, ListTemplate, GenericTemplate
# from apps.delivery.models import Promotion, Order
import logging
try:
import coloredlogs
coloredlogs.install()
except Exception as e:
pass
DEFAULT_VIDEO = "https://www.dropbox.com/s/v5k3jqjpkhccfg7/developer.mp4?dl=1"
TOKEN_REQUEST = "TOKEN_REQUEST"
HELP_RESOURCES = [
{
"name": TOKEN_REQUEST,
"url": "https://www.dropbox.com/s/v5k3jqjpkhccfg7/developer.mp4?dl=1"
}
]
if settings.DEBUG:
SERVER_URL = "https://484db321.ngrok.io"
else:
SERVER_URL = "https://chuspita.net"
LOGIN_URL = SERVER_URL+ "/messenger/login/"
class HelpMedia(object):
@staticmethod
def get(sender, resource):
if resource in HELP_RESOURCES:
url = HELP_RESOURCES[resource]["url"]
else:
url = DEFAULT_VIDEO
message = Message(
attachment=VideoAttachment(
url=url
)
)
return MessageRequest(sender, message)
class Components(object):
@staticmethod
def typing(requests, sender):
requests.append(MessageRequest(sender, sender_action='mark_seen'))
requests.append(MessageRequest(sender, sender_action='typing_on'))
@staticmethod
def make_answer(sender, answer):
responses = list()
responses.append(Components.typing(responses, sender))
message = Message(text=answer)
responses.append(MessageRequest(sender, message))
return responses
@staticmethod
def make_welcome(sender):
responses = list()
responses.append(Components.typing(responses, sender))
message = Message(text="Bienvenido Terricola")
responses.append(MessageRequest(sender, message))
return responses
@staticmethod
def make_distance(sender):
responses = list()
responses.append(Components.typing(responses, sender))
message = Message(text="Estas a 5km/10min caminando de distancia de la sucursal")
responses.append(MessageRequest(sender, message))
return responses
@staticmethod
def make_start(sender):
# Mensaje
responses = list()
message = Message(text="Ya no mas filas en el banco")
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, message))
template = ListTemplate(
style="compact",
elements=[
ListElement(
title="GENERAR UN TICKET",
image_url="https://s14.postimg.org/u9ohxgqsd/obtener.png",
subtitle="Ahorra tiempo con tickets electrónicos",
buttons=[
PostbackButton(
title="OBTENER TICKET",
payload="GET_TICKET"
)
],
),
ListElement(
title="MIS TICKETS",
image_url="https://s14.postimg.org/ea5udwuql/mistickets.png",
subtitle="Administra los tickets pendientes que tienes",
buttons=[
PostbackButton(
title="VER MIS TICKETS",
payload="MY_TICKETS"
)
],
)
]
)
attachment = TemplateAttachment(template=template)
message = Message(attachment=attachment)
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, message))
return responses
# >>>>>>>>>>>>>>>>>>>>>>>>>> START RECHARGE <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
@staticmethod
def make_init(sender):
responses = list()
responses.append(Components.typing(responses, sender))
message = Message(text="Bienvenido Terricola")
responses.append(MessageRequest(sender, message))
return responses
@staticmethod
def make_number_phones(sender):
print("pase x aqui")
telephone_numbers = None
responses = list()
elements = []
if telephone_numbers.count() > 0:
for telephone_number in telephone_numbers:
text = TextReply(
title=telephone_number.number,
payload="VIEW_BRANCHES",
image_url='http://www.teslasrl.net/imagenes/trabajos/ENTEL.jpg'
)
elements.append(text)
numbers = QuickReplies(
replies=elements
)
message = Message(
text="Estos son tus numeros ... ",
quick_replies=numbers
)
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, message))
return responses
else:
message = Message(text="No tienes numeros registrados :(")
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, message))
return responses
@staticmethod
def recharge_amount(sender):
responses = list()
replies = QuickReplies(
replies=[
TextReply(
title="10",
payload="VIEW_BRANCHES",
),
TextReply(
title="20",
payload="VIEW_BRANCHES",
),
TextReply(
title="30",
payload="VIEW_BRANCHES",
),
TextReply(
title="50",
payload="VIEW_BRANCHES",
),
TextReply(
title="100",
payload="VIEW_BRANCHES",
)
]
)
message = Message(
text="Cuanto Deseas Recargar?",
quick_replies=replies
)
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, message))
return responses
@staticmethod
def show_cards(sender):
# Mensaje
responses = list()
message = Message(text="Ya no mas filas en el banco")
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, message))
template = ListTemplate(
style="compact",
elements=[
ListElement(
title="GENERAR UN TICKET",
image_url="https://s14.postimg.org/u9ohxgqsd/obtener.png",
subtitle="Ahorra tiempo con tickets electrónicos",
buttons=[
PostbackButton(
title="OBTENER TICKET",
payload="GET_TICKET"
)
],
),
ListElement(
title="MIS TICKETS",
image_url="https://s14.postimg.org/ea5udwuql/mistickets.png",
subtitle="Administra los tickets pendientes que tienes",
buttons=[
PostbackButton(
title="VER MIS TICKETS",
payload="MY_TICKETS"
)
],
)
]
)
attachment = TemplateAttachment(template=template)
message = Message(attachment=attachment)
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, message))
return responses
@staticmethod
def make_recharge(sender):
responses = list()
responses.append(Components.typing(responses, sender))
template = GenericTemplate(
elements=[
Element(
title="Se Recargo con exito 50 Bs al 73053045",
subtitle="Gracias por usar el servicio",
buttons=[
URLButton(
title="INICIO",
url="https://www.bcb.gob.bo/librerias/indicadores/otras/ultimo.php"
),
URLButton(
title="HACER OTRA RECARGA",
url="https://www.bcb.gob.bo/librerias/indicadores/otras/ultimo.php"
)
]
)
]
)
attachment = TemplateAttachment(template=template)
component = Message(attachment=attachment)
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, component))
return responses
@staticmethod
def insufficient_balance(sender):
responses = list()
responses.append(Components.typing(responses, sender))
template = GenericTemplate(
elements=[
Element(
title="Saldo insuficiente para recargar",
subtitle="Advertencia",
buttons=[
URLButton(
title="OTRA RECARGA",
url="https://www.bcb.gob.bo/librerias/indicadores/otras/ultimo.php"
),
URLButton(
title="CANCELAR",
url="https://www.bcb.gob.bo/librerias/indicadores/otras/ultimo.php"
)
]
)
]
)
attachment = TemplateAttachment(template=template)
component = Message(attachment=attachment)
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, component))
return responses
# >>>>>>>>>>>>>>>>>>>>>>>>>> END RECHARGE <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
@staticmethod
def make_position_request(sender):
# Mensaje y Componente
responses = list()
replies = QuickReplies(
replies=[
LocationReply(),
TextReply(
title="VER CAJEROS",
payload="VIEW_BRANCHES",
)
]
)
message = Message(
text="Por favor envianos tu ubicación para determinar que sucursales estan cerca de ti",
quick_replies=replies
)
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, message))
return responses
@staticmethod
def get_map_screen(lat, lng):
return "https://maps.googleapis.com/maps/api/staticmap?zoom=15&size=500x250&markers=color:red%7Clabel:S%7C" + str(
lat) \
+ "," + str(lng) + "&key=AIzaSyBwHPJ6_aVyk9QNkzMIPRoC22QFnvnHjME"
@staticmethod
def get_map_url(lat, lng):
return "https://maps.google.com/maps?q=" + str(lat) + "," + str(lng)
@staticmethod
def get_position(number):
geo = str(number.position).split(",")
return geo[0], geo[1]
@staticmethod
def make_branches(sender):
"""
Envia la lista de Suscripciones de un usuario o la peticion para registrar una suscripción
"""
branches = None
responses = list()
elements = []
if branches.count() > 0:
# Mensaje
message = Message(text="Esta es la lista de Sucursales Cerca :)", )
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, message))
# Componente
for branch in branches:
lat, lng = Components.get_position(branch)
element = Element(
title=branch.name,
item_url=Components.get_map_url(lat, lng),
image_url=Components.get_map_screen(lat, lng),
subtitle=branch.address,
buttons=[
PostbackButton(
title="VER",
payload="%(payload)s:%(id)s" % {
"payload": "SELECT_BRANCH",
"id": branch.id
}
)
]
)
elements.append(element)
template = GenericTemplate(
elements=elements
)
attachment = TemplateAttachment(template=template)
component = Message(attachment=attachment)
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, component))
else:
message = Message(text="No hay ninguna sucursales registradas :(")
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, message))
return responses
@staticmethod
def make_atms(sender):
"""
Envia la lista de Suscripciones de un usuario o la peticion para registrar una suscripción
"""
branches = None
responses = list()
elements = []
if branches.count() > 0:
# Mensaje
message = Message(text="Esta es la lista de Cajeros cerca :)", )
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, message))
# Componente
for branch in branches:
lat, lng = Components.get_position(branch)
element = Element(
title=branch.name,
item_url=Components.get_map_url(lat, lng),
image_url=Components.get_map_screen(lat, lng),
subtitle=branch.address,
buttons=[
PostbackButton(
title="VER",
payload="%(payload)s:%(id)s" % {
"payload": "SELECT_BRANCH",
"id": branch.id
}
)
]
)
elements.append(element)
template = GenericTemplate(
elements=elements
)
attachment = TemplateAttachment(template=template)
component = Message(attachment=attachment)
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, component))
else:
message = Message(text="No hay cajeros registrados :(")
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, message))
return responses
@staticmethod
def make_change(sender):
responses = list()
responses.append(Components.typing(responses, sender))
template = GenericTemplate(
elements=[
Element(
title="El Dolar Cotiza a 6,929 Bs.",
subtitle="1 USD = 6,929 Bs.",
default_action=URLButton(
url="https://www.bcb.gob.bo/librerias/indicadores/otras/ultimo.php"
),
buttons=[
URLButton(
title="VER OTRAS MONEDAS",
url="https://www.bcb.gob.bo/librerias/indicadores/otras/ultimo.php"
)
]
)
]
)
attachment = TemplateAttachment(template=template)
component = Message(attachment=attachment)
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, component))
return responses
@staticmethod
def make_schedules(sender):
# Mensaje
responses = list()
message = Message(text="¿Deseas agendar una cita con un asesor?, (y) Estas son las fechas disponibles")
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, message))
template = ListTemplate(
style="compact",
elements=[
ListElement(
title="21 de Diciembre del 2016",
image_url="https://s14.postimg.org/hn996fblp/date.jpg",
subtitle="15:00 Hrs",
buttons=[
PostbackButton(
title="AGENDAR",
payload="AGENDA_1"
)
],
),
ListElement(
title="15 de Enero del 2017",
image_url="https://s14.postimg.org/hn996fblp/date.jpg",
subtitle="19:00 Hrs.",
buttons=[
PostbackButton(
title="AGENDAR",
payload="AGENDA_2"
)
],
)
]
)
attachment = TemplateAttachment(template=template)
message = Message(attachment=attachment)
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, message))
return responses
@staticmethod
def make_ticket(sender, mode=None):
# Mensaje
# TODO cambiar las imagenes por tickets generados.
responses = list()
message = Message(text="Este es tu ticket, gracias por confiar en Chuspita")
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, message))
if mode == "CASH":
url = "https://s14.postimg.org/j8qdab43h/caja.png"
elif mode == "PLATFORM":
url = "https://s14.postimg.org/olf7ofrzx/plataforma.png"
else:
url = "https://s14.postimg.org/olf7ofrzx/plataforma.png"
message = Message(
attachment=ImageAttachment(
url=url
)
)
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, message))
return responses
@staticmethod
def make_own_tickets(sender):
# tickets = Ticket.objects.all()
tickets = list()
responses = list()
elements = []
if tickets.count() > 0:
# Mensaje
message = Message(text="Estos son tus tickets", )
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, message))
# Componente
for ticket in tickets:
lat, lng = Components.get_position(ticket.branch)
if ticket.type == "CASH":
tty = "CAJA"
else:
tty = "PLATAFORMA"
element = Element(
title="%s: %s" % (tty, ticket.code),
subtitle="%s: %s" % (ticket.branch.bank.name, ticket.branch.name),
default_action=URLButton(
url=Components.get_map_url(lat, lng)
),
buttons=[
PostbackButton(
title="CANCELAR",
payload="%(payload)s:%(id)s" % {
"payload": "CANCEL_TICKET",
"id": ticket.id
}
),
PostbackButton(
title="RENOVAR",
payload="%(payload)s:%(id)s" % {
"payload": "REGENERATE_TICKET",
"id": ticket.id
}
)
]
)
elements.append(element)
template = GenericTemplate(
elements=elements
)
attachment = TemplateAttachment(template=template)
component = Message(attachment=attachment)
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, component))
else:
message = Message(text="No tienes tickets")
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, message))
return responses
@staticmethod
def make_cancel_ticket(sender):
responses = list()
message = Message(text="Se ha cancelado el ticket que solicitaste :'(", )
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, message))
return responses
@staticmethod
def make_regenerate_ticket(sender):
responses = list()
message = Message(text="Enviame tu ubicación para generar el ticket que mas te convenga ;)")
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, message))
return responses
@staticmethod
def make_promotions(sender):
"""
Envia la lista de promociones
"""
promotions = None
responses = list()
elements = []
# Mensaje
message = Message(text="Esta es la lista de promociones esta semana :)", )
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, message))
# Componente
for promotion in promotions:
element = Element(
title=promotion.name,
item_url=promotion.url,
image_url=SERVER_URL + promotion.cover.url,
subtitle=promotion.description,
buttons=[
URLButton(
title="VER",
url=promotion.url
)
]
)
elements.append(element)
template = GenericTemplate(
elements=elements
)
attachment = TemplateAttachment(template=template)
component = Message(attachment=attachment)
responses.append(Components.typing(responses, sender))
responses.append(MessageRequest(sender, component))
return responses
| [
"[email protected]"
] | |
c3c32b313e00e643cc818413d71b1bfdc3db915c | eee3a183136bdebed599249604b63f0b0f02ba71 | /pyrates/ui.py | 095ed9079bf11379368972be213dff233665f534 | [] | no_license | gutomaia/pyrates | 3971d332a0977a8d130c0836e86c3368a8531a8b | a81ef57c368e1341a0e09b548a94f27801f89546 | refs/heads/master | 2021-06-29T12:53:56.033594 | 2017-09-20T19:00:45 | 2017-09-20T19:00:45 | 104,124,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,567 | py | import wx, sys, os, pygame
### PYGAME IN WX ###
# A simple test of embedding Pygame in a wxPython frame
#
# By David Barker (aka Animatinator), 14/07/2010
# Patch for cross-platform support by Sean McKean, 16/07/2010
# Patch to fix redrawing issue by David Barker, 20/07/2010
# Second window demo added by David Barker, 21/07/2010
class PygameDisplay(wx.Window):
def __init__(self, parent, ID):
wx.Window.__init__(self, parent, ID)
self.parent = parent
self.hwnd = self.GetHandle()
self.size = self.GetSizeTuple()
self.size_dirty = True
self.timer = wx.Timer(self)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_TIMER, self.Update, self.timer)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.fps = 60.0
self.timespacing = 1000.0 / self.fps
self.timer.Start(self.timespacing, False)
self.linespacing = 5
def Update(self, event):
self.Redraw()
def Redraw(self):
if self.size_dirty:
self.screen = pygame.Surface(self.size, 0, 32)
self.size_dirty = False
self.pygame_redraw(self.timer.GetInterval())
s = pygame.image.tostring(self.screen, 'RGB') # Convert the surface to an RGB string
img = wx.ImageFromData(self.size[0], self.size[1], s) # Load this string into a wx image
bmp = wx.BitmapFromImage(img) # Get the image in bitmap form
dc = wx.ClientDC(self) # Device context for drawing the bitmap
dc.DrawBitmap(bmp, 0, 0, False) # Blit the bitmap image to the display
del dc
def pygame_redraw(self, deltaTime):
self.screen.fill((0,0,0))
cur = 0
w, h = self.screen.get_size()
while cur <= h:
pygame.draw.aaline(self.screen, (255, 255, 255), (0, h - cur), (cur, 0))
cur += self.linespacing
def OnPaint(self, event):
self.Redraw()
event.Skip() # Make sure the parent frame gets told to redraw as well
def OnSize(self, event):
self.size = self.GetSizeTuple()
self.size_dirty = True
def Kill(self, event):
# Make sure Pygame can't be asked to redraw /before/ quitting by unbinding all methods which
# call the Redraw() method
# (Otherwise wx seems to call Draw between quitting Pygame and destroying the frame)
# This may or may not be necessary now that Pygame is just drawing to surfaces
self.Unbind(event = wx.EVT_PAINT, handler = self.OnPaint)
self.Unbind(event = wx.EVT_TIMER, handler = self.Update, source = self.timer)
ID_ABOUT = 12753
class Frame(wx.Frame):
def init_menubar(self):
self.menubar = wx.MenuBar()
fileMenu = wx.Menu()
newitem = wx.MenuItem(fileMenu, wx.ID_NEW, text='New', kind = wx.ITEM_NORMAL)
fileMenu.AppendItem(newitem)
fileMenu.AppendSeparator()
quit = wx.MenuItem(fileMenu, wx.ID_EXIT, '&Quit\tCtrl+Q')
fileMenu.AppendItem(quit)
helpMenu = wx.Menu()
aboutItem = wx.MenuItem(helpMenu, ID_ABOUT, text='About', kind = wx.ITEM_NORMAL)
helpMenu.AppendItem(aboutItem)
self.menubar.Append(fileMenu, '&File')
self.menubar.Append(helpMenu, '&Help')
self.SetMenuBar(self.menubar)
self.Bind(wx.EVT_MENU, self.menuhandler)
def menuhandler(self, event):
menu_id = event.GetId()
if menu_id == wx.ID_EXIT:
self.Kill(event)
def init_toolbar(self):
# self.toolbar = self.CreateToolBar(wx.TB_TEXT, wx.TB_NOICONS, -1)
self.toolbar = self.CreateToolBar()
run = self.toolbar.AddLabelTool(wx.ID_ANY, 'Run', wx.Bitmap('assets/icons/run.png'))
self.Bind(wx.EVT_TOOL, self.run_command, run)
self.toolbar.Realize()
def run_command(self, event):
source = self.editor.GetText()
self.display.active_scene.input_code(source)
def init_statusbar(self):
self.statusbar = self.CreateStatusBar()
self.statusbar.SetFieldsCount(3)
self.statusbar.SetStatusWidths([-3, -4, -2])
self.statusbar.SetStatusText("pyRATES", 0)
self.statusbar.SetStatusText("Look, it's a nifty status bar!!!", 1)
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, size = (600, 600))
self.SetTitle("Pyrates")
self.init_menubar()
self.init_toolbar()
self.init_statusbar()
from gameengine import DisplayScene
self.display = DisplayScene(self, -1)
# self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_CLOSE, self.Kill)
self.curframe = 0
self.timer = wx.Timer(self)
# self.Bind(wx.EVT_SCROLL, self.OnScroll)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_TIMER, self.Update, self.timer)
self.timer.Start((1000.0 / self.display.fps))
from editor import SourceEditor
self.editor = SourceEditor(self)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.display, 1, flag = wx.EXPAND)
self.sizer.Add(self.editor, 1, flag = wx.EXPAND)
self.SetAutoLayout(True)
self.SetSizer(self.sizer)
self.Layout()
def Kill(self, event):
self.display.Kill(event)
self.Destroy()
def OnSize(self, event):
self.Layout()
def Update(self, event):
pass
# self.statusbar.SetStatusText("Frame %i" % self.curframe, 2)
def OnScroll(self, event):
self.display.linespacing = self.slider.GetValue()
| [
"[email protected]"
] | |
5279d2be8c763659dcb6380332b42896fbc7aadf | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /P.O.R.-master/pirates/band/BandConstance.py | 3084b1fafdd7057904fb44feb8192cad6b7ec899 | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,244 | py | outcome_ok = 0
outcome_declined = 9
outcome_not_online = 10
outcome_already_invited = 11
outcome_already_in_Band = 12
outcome_full = 13
outcome_recently_invited = 14
MAX_BAND_MEMBERS = 12
BandMakeEvent = 'BandMakeEvent'
BandAddEvent = 'BandAddEvent'
BandSetCaptainEvent = 'BandSetCaptainEvent'
BandRemoveEvent = 'BandRemoveEvent'
BandOnlineEvent = 'BandOnlineEvent'
BandOfflineEvent = 'BandOfflineEvent'
BandDetailsEvent = 'BandDetailsEvent'
BandRejectInviteEvent = 'BandRejectInviteEvent'
BandRetractInviteEvent = 'BandRetractInviteEvent'
BandInvitationEvent = 'BandInvitationEvent'
BandInvitationResponceEvent = 'BandIinvitationResponce'
BandRejoinEvent = 'BandRejoinEvent'
BandMemberNameChange = 'BandMemberNameChange'
BandMemberHpChange = 'BandMemberHpChange'
BandMemberMaxHpChange = 'BandMemberMaxHpChange'
BandMemberShipChange = 'BandMemberShipChange'
BandMemberSinceChange = 'BandMemberSinceChange'
BandMemberOnlineChange = 'BandMemberOnlineChange'
BandMemberPVPChange = 'BandMemberPVPChange'
BandMemberParlorChange = 'BandMemberParlorChange'
BandMemberStatusChange = 'BandMemberStatusChange'
BandMemberManagerChange = 'BandMemberManagerChange'
BandMemberNameChange = 'BandMemberNameChange'
BandMembershipChange = 'BandMembershipChange'
| [
"[email protected]"
] | |
c85814fada2df5966221b0945bc1ba5ac1480924 | 095521582f598b65b76f222d8c1acbcaca0c24bf | /output_raw/output_input_Lx1Ly1.py | 6fc5163f524ebec0cbe9a033d06a5b0d688ee15f | [
"MIT"
] | permissive | ryuikaneko/itps_contraction | cf07e41d32e93c10db6ebeb1c4f5246b238e737b | 10816fb6c90d77f5a3b2f804ab22573d1d676eb4 | refs/heads/master | 2020-08-28T23:05:00.262183 | 2020-08-03T01:04:22 | 2020-08-03T01:04:22 | 217,847,703 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | def Contract_scalar_1x1(\
t0_2,t1_2,t2_2,\
t0_1,t1_1,t2_1,\
t0_0,t1_0,t2_0,\
o1_1\
):
##############################
# ./input/input_Lx1Ly1.dat
##############################
# (o1_1*(t1_1.conj()*((t2_1*(t2_0*t1_0))*(t1_1*((t0_0*t0_1)*(t0_2*(t2_2*t1_2)))))))
# cpu_cost= 6.04e+10 memory= 4.0004e+08
# final_bond_order ()
##############################
return np.tensordot(
o1_1, np.tensordot(
t1_1.conj(), np.tensordot(
np.tensordot(
t2_1, np.tensordot(
t2_0, t1_0, ([1], [0])
), ([1], [0])
), np.tensordot(
t1_1, np.tensordot(
np.tensordot(
t0_0, t0_1, ([1], [0])
), np.tensordot(
t0_2, np.tensordot(
t2_2, t1_2, ([0], [1])
), ([1], [1])
), ([1], [0])
), ([0, 1], [1, 4])
), ([0, 1, 3, 4], [5, 0, 3, 1])
), ([0, 1, 2, 3], [3, 4, 0, 1])
), ([0, 1], [1, 0])
)
| [
"[email protected]"
] | |
86f698f7d8d5c7cfd608eaafe79d63a8b0eb18e9 | 4111ca5a73a22174f189361bef654c3f91c3b7ed | /Lintcode/Ladder_all_A_OA/1563. Shortest path to the destination.py | 48bee8bb3ef8e656816609bf09cf2f8d0f824c98 | [
"MIT"
] | permissive | ctc316/algorithm-python | 58b541b654509ecf4e9eb8deebfcbdf785699cc4 | ac4580d55e05e93e407c6156c9bb801808027d60 | refs/heads/master | 2020-03-16T06:09:50.130146 | 2019-08-02T02:50:49 | 2019-08-02T02:50:49 | 132,548,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 937 | py | class Solution:
"""
@param targetMap:
@return: nothing
"""
def shortestPath(self, targetMap):
n = len(targetMap)
m = len(targetMap[0])
from queue import Queue
q = Queue()
q.put((0, 0))
visited = [[False for _ in range(m)] for __ in range(n)]
visited[0][0] = True
steps = -1
while not q.empty():
steps += 1
for _ in range(q.qsize()):
x, y = q.get()
if targetMap[x][y] == 2:
return steps
for move in [[0, 1], [1, 0], [-1, 0], [0, -1]]:
x_ = x + move[0]
y_ = y + move[1]
if x_ < 0 or x_ >= n or y_ < 0 or y_ >= m or targetMap[x_][y_] == 1 or visited[x_][y_]:
continue
q.put((x_, y_))
visited[x_][y_] = True
return -1 | [
"[email protected]"
] | |
ee81b899c6541035f84172ed6bdc9122b5c2ad05 | 58ca1aedfd2c2c43ce3f71e7877f92c51d41adf8 | /filter_boost.py | 7eb7dd7c85ddbaf8c0158ea3ec0773068f690eb8 | [] | no_license | seaun163/DeepSLAM | 00d88ee00367987cb4b7a57db3b0bedafeeb4e68 | a038772bd7de897fb8253214813bfab09e31d62f | refs/heads/master | 2021-01-25T08:19:28.198277 | 2016-10-18T19:11:32 | 2016-10-18T19:11:32 | 93,752,917 | 1 | 0 | null | 2017-06-08T13:32:24 | 2017-06-08T13:32:24 | null | UTF-8 | Python | false | false | 4,391 | py | import numpy as np
import h5py
import scipy.sparse
import scipy.io
from constants import *
import ipdb
import os
import pickle
# frame length, which also dictates the delay being frame capture and feedback
# because of forward_fit
# which isn't even in the report...
flen = DEE
flen_2 = 3
dt = EPSILON
st = 0.75 #kind of equivalent to sigma
"""
import sys
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
color_scheme='Linux', call_pdb=1)
"""
res_dict = {}
ground_truth = scipy.io.loadmat('GroundTruth_Eynsham_40meters.mat')['ground_truth']
for fname in os.listdir("good"):
### Get matches from confusion matrix ###
# load the confusion matrix
dname = "dataset"
print("opening file %s" %fname)
h5f = h5py.File("good/"+fname, 'r')
conf_matrix = h5f[dname][:]
h5f.close()
print("procesing layer")
# grab the testing matrix from the confusion matrix
test_matrix = conf_matrix[0:4789, 4789:9575]
# the min score is the best match
b = np.argmin(test_matrix, axis=0)
# Percentage of top matches used in the vibration calculation, allows the occasional outlier
inlier_fraction = 5/6.0
matches = np.zeros(int(b.size - flen + flen_2))
stable_count = 0
# WHY NOT FILTER AROUND? Change to get same results but neater?
for i in range(0, b.size - flen):
match_index = int(i + flen_2)
# Check that the match being considered is continous with those around it
vibrations = np.abs( np.diff(b[i:i + flen]) )
sorted_vib = np.sort(vibrations)
max_diff = np.max(sorted_vib[ 0 : int(np.round(inlier_fraction * flen)) ])
stable = max_diff <= dt
# linear regression to get slope of fit
pt = np.polyfit( np.arange(0, flen), b[i:i + flen], 1)
# This is the slope, because highest powers first
velocity = pt[0]
# forward match with a tolerance of -1 and +1
# absolute value to check going forwards or backwards
forward_match = np.abs(velocity - 1) < st or np.abs(velocity + 1) < st
if stable and forward_match:
# smooth the value based off of those around it
matches[match_index] = pt[1] + pt[0] * 0.5 * flen
for j in range(1, flen_2 + 1):
back_chk = match_index - j
front_chk = match_index + j
# fill in the zero (default) values if possible
if matches[back_chk] == 0:
matches[back_chk] = b[back_chk]
# fill in base values for future vals
if front_chk < 4783:
matches[front_chk] = b[front_chk]
### Compare to ground truth ###
print("zeros")
print(np.where(matches == 0)[0].size)
print("comparing to ground truth")
start_first = 1
end_first = 4788
len_first = end_first - start_first + 1
start_second = 4789
end_second = 9574
len_second = end_second - start_second + 1
half_matrix = 4785
ground_matrix = np.zeros((len_second, len_first))
tp_num = 0
tp_value = []
fp_num = 0
fp_value = []
for ground_idx in range(start_second, end_second):
value_ground = ground_truth[ground_idx, :]
value_fit = value_ground.toarray().flatten().nonzero()[0]
# only store those in first round
value_fit2 = value_fit[ np.where(value_fit < end_first)[0].astype(int) ]
value_fit3 = value_fit2 - start_first + 1
value_fit4 = value_fit3[ np.where(value_fit3 > 0)[0].astype(int) ]
matrix_idx = ground_idx - start_second + 1
ground_matrix[matrix_idx, value_fit4] = 1
for truth_idx in range(0, matches.size):
ground_row = ground_truth[truth_idx+end_first, :]
ground_row_idx = ground_row.toarray().flatten().nonzero()[0]
if matches[truth_idx] != 0:
truth_va = np.round(matches[truth_idx])
if np.any(ground_row_idx == np.round(truth_va)):
tp_num = tp_num + 1
tp_value = [tp_value, truth_idx]
else:
fp_num = fp_num + 1
fp_value = [fp_value, truth_idx]
precision = tp_num / float(tp_num + fp_num)
print(precision)
recall = tp_num / float(b.size)
print(recall)
res_dict[fname] = (precision, recall)
pickle.dump(res_dict, open("filter_res.p", "wb"))
| [
"[email protected]"
] | |
dff1b0851ea07a81abe823ad1eca3f3fa6f6f05b | 569cd39c950be39cac44bd6a70da4c0242d6a39d | /3333333.py | 28b1f833c8d45fa6d8dc4c03d5dac85fe0479479 | [] | no_license | bhavyatamil55/PYTHON | cda8c1f93c1d4f173c4ae09479816e730c1d04a2 | 36c0dd7b65f3d22d90a0e356e571edb235d0068f | refs/heads/master | 2020-06-09T17:25:56.802539 | 2019-07-09T10:02:11 | 2019-07-09T10:02:11 | 193,476,836 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | b=int(input())
s=list(map(int,input().split()))
print(min(s))
| [
"[email protected]"
] | |
9358246c129bb1d8b1e564a81ba419196d73a04a | 30f6633a24d799fddd51672c528e4baee649d8cd | /6.01/designLab09/simulator/core/search/search.py | 11d2ff11f60cd14eada34c78bd6c767fc197f676 | [] | no_license | Rajpratik71/mit-courses | e12c864435a1af2c8b7034af956fd2f53d559cfc | 86a06a3192e17230a05c5c7beeed5699df73be22 | refs/heads/master | 2023-06-22T21:05:37.240985 | 2023-01-26T06:44:49 | 2023-01-26T06:44:49 | 192,182,074 | 0 | 2 | null | 2023-04-05T04:00:47 | 2019-06-16T11:15:24 | TeX | UTF-8 | Python | false | false | 3,299 | py | """
Search infrastructure.
Credit to Chapter 7 of MIT 6.01 notes
(http://mit.edu/6.01/www/handouts/readings.pdf).
"""
__author__ = '[email protected] (Michael Mekonnen)'
from constants import PRINT_FAIL_REASON
from core.data_structures.priority_queue import Priority_Queue
class Search_Node:
"""
Representation for a node in the search graph. Clients of the search
infrastructure should use subclasses of Search_Node implementing the
get_children method.
"""
def __init__(self, state, parent=None, cost=0):
"""
|state|: state of the search node, dependent on the application.
|parent|: parent node to this node, None if this node is the root.
|cost|: cost to reach from the root node to this node.
"""
self.state = state
self.parent = parent
self.cost = cost
def get_children(self):
"""
Should return a list of the Search_Nodes that are reachable from this node.
"""
raise NotImplementedError('subclasses should implement this')
def get_path(self):
"""
Returns a list of the states of the nodes from the root to this node.
"""
path = []
current = self
while current is not None:
path = [current.state] + path
current = current.parent
return path
def a_star(start_node, goal_test, heuristic=lambda state: 0, best_first=False,
progress=lambda state, cost: None, max_states_to_expand=None, verbose=True):
"""
Runs an A* search starting at |start_node| until a node that satisfies the
|goal_test| is found. |goal_test| should be a function that takes in a
state of a node and returns True if the desired goal has been satisfied.
|heuristic| is a map from node states to estimates of distance to the
goal, should be admissible to produce optimal value, and can result in
considerable speed-up! (See Chapter 7 of MIT 6.01 course notes for more.)
Returns the node whose state satisfies teh |goal_test|, or None if no such
node is found. Also returns the total number of nodes expanded.
For progress checks, everytime a node is popped out of the priority queue,
this methods calls |progress| with the state and cost of the node that
was just popped.
So that a search problem does not take too long without success, may give a
|max_states_to_expand| after which the search stops and returns None.
"""
if goal_test(start_node.state):
return start_node, 0
agenda = Priority_Queue()
agenda.push(start_node, (not best_first) * start_node.cost +
heuristic(start_node.state))
expanded = set()
while agenda:
parent, cost = agenda.pop()
progress(parent.state, cost)
if parent.state not in expanded:
if goal_test(parent.state):
return parent, len(expanded)
expanded.add(parent.state)
for child in parent.get_children():
if child.state not in expanded:
agenda.push(child, (not best_first) * child.cost +
heuristic(child.state))
if max_states_to_expand and len(expanded) > max_states_to_expand:
if PRINT_FAIL_REASON:
if verbose:
print 'exceeded number of states to expand'
return None, len(expanded)
if PRINT_FAIL_REASON:
if verbose:
print 'exhausted search space'
return None, len(expanded)
| [
"[email protected]"
] | |
70be3975013551c5c951c1f9da8444e2c6273397 | 9eb48a3b8d5b1127012579a818ad349d21df2414 | /Django2.6/djusers2/djusers2/urls.py | cb92ccbd44dd9b5cf5eef70172579be1432e0fdf | [] | no_license | rahulsayon/Django1-to-Django-1.8 | b864deb46529c29a6cd424e3c9f1e99baa8942af | 2f4ae9343b631ff53caa50c8a822c7b4718c5512 | refs/heads/master | 2022-12-11T17:02:24.183982 | 2020-09-06T15:31:02 | 2020-09-06T15:31:02 | 293,305,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | """djusers2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from accounts.views import register , user_login ,user_logout, activate_user_view
urlpatterns = [
path('admin/', admin.site.urls),
path('register/', register),
path('login/', user_login),
path('logout/', user_logout),
path('logout/', user_logout),
path('activate/<slug:code>', activate_user_view),
]
| [
"[email protected]"
] | |
808034c7ccf8082a00e739cd27d0b9f1e4d28040 | c4af06a090818ea05b3e6c11866406b4a5d3378a | /diary/tests/test_views.py | d75d629eb6e2caf9d9a6ef65d1557b70f5eb02f1 | [] | no_license | shige-horiuchi/private_diary | c88adff27bf4208ca7451bff841e5f300ac64d0a | e8b3cb63129c73d6c98f530ef543c19b02a5e79c | refs/heads/master | 2021-01-03T19:03:29.599143 | 2020-02-21T06:51:37 | 2020-02-21T06:51:37 | 240,201,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,717 | py | from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse_lazy
from ..models import Diary
class LoggedInTestCase(TestCase):
"""各テストクラスで共通の事前準備処理をオーバーライドした独自TestCaseクラス"""
def setUp(self):
"""テストメソッド実行前の事前設定"""
# テストユーザーのパスワード
self.password = '<ログインパスワード>'
# 各インスタンスメソッドで使うテスト用ユーザーを生成し
# インスタンス変数に格納しておく
self.test_user = get_user_model().objects.create_user(
username='<ログインユーザー名>',
email='<ログインユーザーのメールアドレス>',
password=self.password)
# テスト用ユーザーでログインする
self.client.login(email=self.test_user.email, password=self.password)
class TestDiaryCreateView(LoggedInTestCase):
"""DiaryCreateView用のテストクラス"""
def test_create_diary_success(self):
"""日記作成処理が成功することを検証する"""
# Postパラメータ
params = {'title': 'テストタイトル',
'content': '本文',
'photo1': '',
'photo2': '',
'photo3': ''}
# 新規日記作成処理(Post)を実行
response = self.client.post(reverse_lazy('diary:diary_create'), params)
# 日記リストページへのリダイレクトを検証
self.assertRedirects(response, reverse_lazy('diary:diary_list'))
# 日記データがDBに登録されたかを検証
self.assertEqual(Diary.objects.filter(title='テストタイトル').count(), 1)
def test_create_diary_failure(self):
"""新規日記作成処理が失敗することを検証する"""
# 新規日記作成処理(Post)を実行
response = self.client.post(reverse_lazy('diary:diary_create'))
# 必須フォームフィールドが未入力によりエラーになることを検証
self.assertFormError(response, 'form', 'title', 'このフィールドは必須です。')
class TestDiaryUpdateView(LoggedInTestCase):
"""DiaryUpdateView用のテストクラス"""
def test_update_diary_success(self):
"""日記編集処理が成功することを検証する"""
# テスト用日記データの作成
diary = Diary.objects.create(user=self.test_user, title='タイトル編集前')
# Postパラメータ
params = {'title': 'タイトル編集後'}
# 日記編集処理(Post)を実行
response = self.client.post(reverse_lazy('diary:diary_update', kwargs={'pk': diary.pk}), params)
# 日記詳細ページへのリダイレクトを検証
self.assertRedirects(response, reverse_lazy('diary:diary_detail', kwargs={'pk': diary.pk}))
# 日記データが編集されたかを検証
self.assertEqual(Diary.objects.get(pk=diary.pk).title, 'タイトル編集後')
def test_update_diary_failure(self):
"""日記編集処理が失敗することを検証する"""
# 日記編集処理(Post)を実行
response = self.client.post(reverse_lazy('diary:diary_update', kwargs={'pk': 999}))
# 存在しない日記データを編集しようとしてエラーになることを検証
self.assertEqual(response.status_code, 404)
class TestDiaryDeleteView(LoggedInTestCase):
"""DiaryDeleteView用のテストクラス"""
def test_delete_diary_success(self):
"""日記削除処理が成功することを検証する"""
# テスト用日記データの作成
diary = Diary.objects.create(user=self.test_user, title='タイトル')
# 日記削除処理(Post)を実行
response = self.client.post(reverse_lazy('diary:diary_delete', kwargs={'pk': diary.pk}))
# 日記リストページへのリダイレクトを検証
self.assertRedirects(response, reverse_lazy('diary:diary_list'))
# 日記データが削除されたかを検証
self.assertEqual(Diary.objects.filter(pk=diary.pk).count(), 0)
def test_delete_diary_failure(self):
"""日記削除処理が失敗することを検証する"""
# 日記削除処理(Post)を実行
response = self.client.post(reverse_lazy('diary:diary_delete', kwargs={'pk': 999}))
# 存在しない日記データを削除しようとしてエラーになることを検証
self.assertEqual(response.status_code, 404)
| [
"[email protected]"
] | |
2210cba8d3da1e4a07b474131e7a1c9266cffc5a | 35522da66f15ee51a251b008b39d3457e70cf7de | /web/nut/models/NUTInput.py | cc23dbe18e3c2bb3285d3337197789cb5d7fa33c | [] | no_license | yeleman/nut | 34d74e72d137903285f3938f3165cefb45afb7ea | 5e68ae23df6c4d77a055dfbe85ae37a9fcdc4cd0 | refs/heads/master | 2016-09-06T03:19:55.898651 | 2012-02-06T18:30:25 | 2012-02-06T18:30:25 | 2,733,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | #!/usr/bin/env python
# encoding=utf_8
# maintainer: rgaudin
from django.db import models
from django.utils.translation import ugettext_lazy as _, ugettext
class NUTInput(models.Model):
""" Input """
class Meta:
app_label = 'nut'
verbose_name = _(u"Input")
verbose_name_plural = _(u"Inputs")
name = models.CharField(_(u"Name"), max_length=50)
slug = models.SlugField(_(u"Slug"), max_length=15, primary_key=True)
def __unicode__(self):
return self.name
| [
"[email protected]"
] | |
8d48f45e0e6026af661aef287d578466e41d8245 | d22db204c665d16847447551cedc07756d357eb2 | /hydrus/client/gui/ClientGUICore.py | 11dfa96ae748ebcb4c2a0789eebcbcacc9172d7b | [
"WTFPL"
] | permissive | Suika/hydrus | 9d5070d47c328b7054a9699de310ce580e563528 | 4b2b15e152e4bed900aa972c7d4b27f7bf242f29 | refs/heads/master | 2023-05-28T00:32:50.364999 | 2023-05-10T20:22:34 | 2023-05-10T20:22:34 | 237,063,790 | 1 | 2 | NOASSERTION | 2022-10-29T22:36:54 | 2020-01-29T19:23:21 | Python | UTF-8 | Python | false | false | 2,153 | py | from qtpy import QtCore as QC
from qtpy import QtGui as QG
from qtpy import QtWidgets as QW
from hydrus.core import HydrusConstants as HC
from hydrus.client.gui import ClientGUIMenus
class GUICore( QC.QObject ):
my_instance = None
def __init__( self ):
QC.QObject.__init__( self )
self._menu_open = False
GUICore.my_instance = self
@staticmethod
def instance() -> 'GUICore':
if GUICore.my_instance is None:
raise Exception( 'GUICore is not yet initialised!' )
else:
return GUICore.my_instance
def MenubarMenuIsOpen( self ):
self._menu_open = True
def MenubarMenuIsClosed( self ):
self._menu_open = False
def MenuIsOpen( self ):
return self._menu_open
def PopupMenu( self, window: QW.QWidget, menu: QW.QMenu ):
if HC.PLATFORM_MACOS and window.window().isModal():
# Ok, seems like Big Sur can't do menus at the moment lmao. it shows the menu but the mouse can't interact with it
from hydrus.core import HydrusGlobals as HG
if HG.client_controller.new_options.GetBoolean( 'do_macos_debug_dialog_menus' ):
from hydrus.client.gui import ClientGUICoreMenuDebug
ClientGUICoreMenuDebug.ShowMenuDialog( window, menu )
ClientGUIMenus.DestroyMenu( menu )
return
if not menu.isEmpty():
self._menu_open = True
menu.exec_( QG.QCursor.pos() ) # This could also be window.mapToGlobal( QC.QPoint( 0, 0 ) ), but in practice, popping up at the current cursor position feels better.
self._menu_open = False
ClientGUIMenus.DestroyMenu( menu )
core = GUICore.instance
| [
"[email protected]"
] | |
d7a93b3db19f615ebcdd0ac1d10a919df7fbe912 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03946/s474179666.py | 715113807ff685cf063ff9f19abb2a900c2d1f50 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | n,t=map(int,input().split())
a=list(map(int,input().split()))
diff_v=0
max_a_v=0
max_a_pos=[]
aa=set([])
bb=set([])
for i in range(1,len(a)):
index=len(a)-i-1
if max_a_v<a[index+1]:
max_a_v=a[index+1]
max_a_pos=[index+1]
elif max_a_v==a[index+1]:
max_a_pos.append(index+1)
if max_a_v-a[index]>diff_v:
diff_v=max_a_v-a[index]
aa=set([index])
bb=set(max_a_pos)
elif max_a_v-a[index]==diff_v:
aa.add(index)
bb|=set(max_a_pos)
#print(a[index],max_a_v,diff_v,aa,bb)
print(min(len(list(aa)),len(list(bb))))
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.