blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5cf854bdd02a84baf5207c995481de3cd741ec6b | 076ee22413ccb5860885e5fbe50f3d2ff46b50b4 | /module/Err.py | 4b69be9633fbca079774bf553360e64012d42284 | [] | no_license | jojo-cn/chat-svr | bca99f8046a0c202936a106b88beb89cf89de485 | cfda32032cb5273e4cd97c843dcd54e188008161 | refs/heads/master | 2020-03-19T03:07:52.290534 | 2018-06-24T09:24:00 | 2018-06-24T09:24:00 | 135,698,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | """
错误代码
"""
class Err(object):
""" """
def __init__(self):
super(Err, self).__init__()
# User module handle error
USER_LOGON_FAILED = 'Username or password is wrong'
# User already logon
USER_ALREADY_LOGON = 'The user is already logon'
| [
"[email protected]"
] | |
c810b83e4d978275269dbf2edf81ba3749d40a39 | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /travelport/models/general_remark_3.py | 37e4e5426aafa26b117cdb81d48dcfa92904cffc | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 4,367 | py | from __future__ import annotations
from dataclasses import dataclass, field
from xsdata.models.datatype import XmlDate, XmlDateTime
from travelport.models.type_direction_3 import TypeDirection3
from travelport.models.type_element_status_4 import TypeElementStatus4
from travelport.models.type_product_3 import TypeProduct3
__NAMESPACE__ = "http://www.travelport.com/schema/common_v33_0"
@dataclass
class GeneralRemark3:
"""A textual remark container to hold any printable text.
(max 512 chars)
Parameters
----------
remark_data
Actual remarks data.
booking_traveler_ref
Reference to Booking Traveler.
key
category
A category to group and organize the various remarks. This is not
required, but it is recommended.
type_in_gds
supplier_type
The type of product this reservation is relative to
provider_reservation_info_ref
Provider reservation reference key.
provider_code
supplier_code
direction
Direction Incoming or Outgoing of the GeneralRemark.
create_date
The date and time that this GeneralRemark was created.
use_provider_native_mode
Will be true when terminal process required, else false
el_stat
This attribute is used to show the action results of an element.
Possible values are "A" (when elements have been added to the UR)
and "M" (when existing elements have been modified). Response only.
key_override
If a duplicate key is found where we are adding elements in some
cases like URAdd, then instead of erroring out set this attribute to
true.
"""
class Meta:
name = "GeneralRemark"
namespace = "http://www.travelport.com/schema/common_v33_0"
remark_data: None | str = field(
default=None,
metadata={
"name": "RemarkData",
"type": "Element",
"required": True,
}
)
booking_traveler_ref: list[str] = field(
default_factory=list,
metadata={
"name": "BookingTravelerRef",
"type": "Element",
"max_occurs": 999,
}
)
key: None | str = field(
default=None,
metadata={
"name": "Key",
"type": "Attribute",
}
)
category: None | str = field(
default=None,
metadata={
"name": "Category",
"type": "Attribute",
"max_length": 10,
}
)
type_in_gds: None | str = field(
default=None,
metadata={
"name": "TypeInGds",
"type": "Attribute",
"max_length": 30,
}
)
supplier_type: None | TypeProduct3 = field(
default=None,
metadata={
"name": "SupplierType",
"type": "Attribute",
}
)
provider_reservation_info_ref: None | str = field(
default=None,
metadata={
"name": "ProviderReservationInfoRef",
"type": "Attribute",
}
)
provider_code: None | str = field(
default=None,
metadata={
"name": "ProviderCode",
"type": "Attribute",
"min_length": 2,
"max_length": 5,
}
)
supplier_code: None | str = field(
default=None,
metadata={
"name": "SupplierCode",
"type": "Attribute",
"min_length": 2,
"max_length": 5,
}
)
direction: None | TypeDirection3 = field(
default=None,
metadata={
"name": "Direction",
"type": "Attribute",
}
)
create_date: None | XmlDateTime = field(
default=None,
metadata={
"name": "CreateDate",
"type": "Attribute",
}
)
use_provider_native_mode: bool = field(
default=False,
metadata={
"name": "UseProviderNativeMode",
"type": "Attribute",
}
)
el_stat: None | TypeElementStatus4 = field(
default=None,
metadata={
"name": "ElStat",
"type": "Attribute",
}
)
key_override: None | bool = field(
default=None,
metadata={
"name": "KeyOverride",
"type": "Attribute",
}
)
| [
"[email protected]"
] | |
6da402554a5677cc3feb6dd00f350a495e2d3355 | 2dc9ee4a8c39d00c255f52e8af2486e7c2891a98 | /sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/aio/operations/_private_endpoint_connections_operations.py | 742624e8eb6a11c2dcc7c8daf69f8f34d2441314 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | JoshuaLai/azure-sdk-for-python | fd780c2ab145a35ec0bf9519c4d08c928081e79c | 07614796a332bcfeed35dddee9dbfc2f5487a39f | refs/heads/master | 2023-04-04T17:49:58.177790 | 2021-04-06T21:31:48 | 2021-04-06T21:31:48 | 348,842,434 | 0 | 0 | MIT | 2021-03-17T20:24:55 | 2021-03-17T20:24:54 | null | UTF-8 | Python | false | false | 23,467 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionsOperations:
"""PrivateEndpointConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
workspace_name: str,
private_endpoint_connection_name: str,
**kwargs
) -> "_models.PrivateEndpointConnection":
"""Gets a private endpoint connection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
workspace_name: str,
private_endpoint_connection_name: str,
request: "_models.PrivateEndpointConnection",
**kwargs
) -> "_models.PrivateEndpointConnection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(request, 'PrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
workspace_name: str,
private_endpoint_connection_name: str,
request: "_models.PrivateEndpointConnection",
**kwargs
) -> AsyncLROPoller["_models.PrivateEndpointConnection"]:
"""Approve or reject a private endpoint connection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:param request: Request body of private endpoint connection to create.
:type request: ~azure.mgmt.synapse.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.synapse.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
private_endpoint_connection_name=private_endpoint_connection_name,
request=request,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
workspace_name: str,
private_endpoint_connection_name: str,
**kwargs
) -> Optional["_models.OperationResource"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.OperationResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 202:
deserialized = self._deserialize('OperationResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
workspace_name: str,
private_endpoint_connection_name: str,
**kwargs
) -> AsyncLROPoller["_models.OperationResource"]:
"""Delete a private endpoint connection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
private_endpoint_connection_name=private_endpoint_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('OperationResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def list(
self,
resource_group_name: str,
workspace_name: str,
**kwargs
) -> AsyncIterable["_models.PrivateEndpointConnectionList"]:
"""Lists private endpoint connection in workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnectionList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.synapse.models.PrivateEndpointConnectionList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnectionList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnectionList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/privateEndpointConnections'} # type: ignore
| [
"[email protected]"
] | |
1c2d8c0ec9171bfd84af10d6470c3cc58490aa3e | f4c1280bb0ad92ed2e17024b15890b2bcfe86c38 | /DWF-server/controller/task_scheduler.py | 8b0c32fe8810d9ec395a27b50ab7c1bc131d0b16 | [
"Apache-2.0"
] | permissive | aladics/DeepWaterFramework | 5b5d9f8cc64f05cecc796f1c52d50e9ed20987e8 | 30e9c0145300e24834a3d4a7dde25002c638dd5a | refs/heads/master | 2023-07-14T14:52:35.914260 | 2021-02-22T18:38:57 | 2021-02-22T18:38:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,916 | py | from model import Priority
from controller import task_store as ts
class TaskScheduler:
def __init__(self):
self.running_experiments = []
self.running_immediate_experiments = []
def get_next(self):
return self._get_exp_id(self.running_immediate_experiments) or self._get_exp_id(self.running_experiments)
@staticmethod
def _get_exp_id(exp_list):
if not exp_list:
return None
res = exp_list[0]
exp_list.pop(0)
exp_list.append(res)
return res
def add_experiment(self, exp_id, priority):
if exp_id in self.running_experiments or exp_id in self.running_immediate_experiments:
return
if priority < Priority.IMMEDIATE:
self.running_experiments.append(exp_id)
for _ in range(0, priority * priority):
self.running_experiments.insert(0, exp_id)
else:
self.running_immediate_experiments.append(exp_id)
def remove_experiment(self, exp_id):
if exp_id not in self.running_experiments and exp_id not in self.running_immediate_experiments:
return
if exp_id in self.running_immediate_experiments:
self.running_immediate_experiments.remove(exp_id)
else:
while exp_id in self.running_experiments:
self.running_experiments.remove(exp_id)
def change_exp_priority(self, exp_id, priority):
self.remove_experiment(exp_id)
self.add_experiment(exp_id, priority)
def check_experiment(self, exp_id, priority):
exp_has_task, _ = ts.search_task_by_order(exp_id)
if exp_has_task:
self.add_experiment(exp_id, priority)
else:
self.remove_experiment(exp_id)
scheduler = TaskScheduler()
def init(exp_list):
for exp_id, exp in exp_list:
scheduler.check_experiment(exp_id, exp.priority)
| [
"[email protected]"
] | |
f05ef3db35b450b438b8fb35fa7352dd1b07cac9 | 6dcc0eca403f58dc48bab0131c5343080d67e548 | /pushover2.py | 2fba86343518cd3a66f38d7f7e15db104035618b | [] | no_license | cosmicc/galaxymodules | fd81f9ddac485030f593d02797e986b1be36c86d | e35faecf9c6873b4210011fd6158229632f1b074 | refs/heads/master | 2020-03-14T00:28:48.777772 | 2018-05-08T02:38:53 | 2018-05-08T02:38:53 | 131,357,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | #!/usr/bin/python2
import logging
import urllib
import urllib2
from configparser import ConfigParser
import json
import logging
configfile = '/etc/galaxymediatools.cfg'
log = logging.getLogger(__name__)
config = ConfigParser()
config.read(configfile)
def pushover(app_key, title='', msg=''):
user_key = config.get('pushover', 'user_key')
config = {
'api': 'https://api.pushover.net/1/messages.json',
'user': user_key,
'token': app_key
}
data = urllib.urlencode({
'user': config['user'],
'token': config['token'],
'title': title,
'message': msg
})
try:
req = urllib2.Request(config['api'], data)
response = urllib2.urlopen(req)
except urllib2.HTTPError:
log.error('Pushover notification failed. HTTPError')
return False
res = json.load(response)
if res['status'] == 1:
log.info('Pushover notification successfully sent')
else:
log.error('Pushover notification failed')
| [
"[email protected]"
] | |
1f177c883e449841bbf202d21e9fa569fa6e0707 | a478ef3c5ac85dc108859fcc4cf4902858a54fb8 | /ML/maclib/bin/wheel | fe2c8f6bb880a4e49f000f82d0e5ec47b76dec8c | [] | no_license | ramtiwary/ML_Programs | 57ab4328071837f3e56da7f4cb1fefb7693c0994 | ace46bf1d493283a3f0a7d15538646c7514a7d0e | refs/heads/master | 2020-06-02T20:34:24.975842 | 2019-06-13T06:24:06 | 2019-06-13T06:24:06 | 191,300,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | #!/home/admin1/Desktop/ML/maclib/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
026ae6da65a3565ed41343c71461ca05cff9ab4f | cf80c258d031c9098e933fd72b4182f06c09971f | /cnn.py | 79932b29edf7c9bca4c8cbbbefd58579d73e9545 | [] | no_license | ceyhunozkaptan/Convolutional-Neural-Network | 51041d447b3dcc43d4b1e00019640004a3697682 | ded11d803183a2e4d26d39f8ea6adae769f1fc54 | refs/heads/master | 2022-03-30T15:58:52.508113 | 2020-04-19T01:50:21 | 2020-04-19T01:50:21 | 256,891,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,972 | py | # %% import standard PyTorch modules
import torch
import torch.nn as nn
import torch.optim as optim
# import torchvision module to handle image manipulation
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
if torch.cuda.is_available():
device = torch.device("cuda:0")
print("PyTorch is running on GPU!")
else:
device = torch.device("cpu")
print("PyTorch is running on CPU!")
# %% CNN MODELS
class CNN_model(nn.Module):
def __init__(self):
super().__init__()
# define layers
self.layer1_conv = nn.Sequential(
nn.BatchNorm2d(num_features=1), # Keeping estimates of mean/variance during training to normalize data during testing/evaluation [https://arxiv.org/pdf/1502.03167.pdf]
nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, padding=2), #1@28x28 -> 32@28x28
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=5, padding=2), #32@28x28 -> 32@28x28
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2), #32@28x28 -> 32@14x14
nn.Dropout(0.2)) # Overfitting is reduced by randomly omitting the feature detectors on every forward call [https://arxiv.org/abs/1207.0580]
self.layer2_conv = nn.Sequential(
nn.BatchNorm2d(num_features=32),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, padding=2), #32@14x14 -> 64@14x14
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=5, padding=2), #64@14x14 -> 64@14x14
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2), #64@14x14 -> 64@7x7
nn.Dropout(0.3))
self.layer3_fc = nn.Sequential(
nn.BatchNorm2d(num_features=64),
nn.Flatten(),
nn.Linear(in_features=64*7*7, out_features=1000),
nn.ReLU())
self.layer4_fc = nn.Sequential(
nn.Linear(in_features=1000, out_features=100),
nn.ReLU())
# don't need softmax here since we'll use cross-entropy as activation.
self.layer5_out = nn.Linear(in_features=100, out_features=10)
# define forward function
def forward(self, t):
t = self.layer1_conv(t)
t = self.layer2_conv(t)
t = self.layer3_fc(t)
t = self.layer4_fc(t)
t = self.layer5_out(t)
return t
# %% PARAMETERS & DATA
lr = 0.001
batch_size = 100
epochs = 20
val_set_ratio = 0.1
criterion = nn.CrossEntropyLoss()
# Using standard FashionMNIST dataset
train_set = torchvision.datasets.FashionMNIST(
root = './data/FashionMNIST',
train = True,
download = True,
transform = transforms.Compose([
transforms.ToTensor()
])
)
# train_set, val_set = torch.utils.data.random_split(train_set, [int((1-val_set_ratio)*len(train_set)), int(val_set_ratio*len(train_set))])
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True)
# val_loader = torch.utils.data.DataLoader(val_set, batch_size=batch_size, shuffle=True)
test_set = torchvision.datasets.FashionMNIST(
root = './data/FashionMNIST',
train = False,
download = True,
transform = transforms.Compose([
transforms.ToTensor()
])
)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=1000, shuffle=False)
# %% CNN TRAINING & VALIDATION
CNN = CNN_model()
CNN.to(device)
optimizer = optim.Adam(CNN.parameters(), lr=lr)
acc_cnn_train = np.zeros(epochs)
loss_cnn_train = np.zeros(epochs)
acc_cnn_val = np.zeros(epochs)
loss_cnn_val = np.zeros(epochs)
best_val_acc = 0
for epoch in range(epochs):
#Training Phase
CNN.train() # set the CNN to training mode
count = 0
correct = 0
total_loss = 0
prog_bar = tqdm(train_loader, desc="Epoch %d (Training the model) " %(epoch), leave=False, ncols = 100)
for images,labels in prog_bar:
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad() # Zero out the gradients in the optimizer
# forward + backward + optimize
preds = CNN(images) # This calls the forward() function
loss = criterion (preds, labels) # Calculate the loss
loss.backward() # Backpropagate the gradients
optimizer.step() # Update the parameters based on the backpropagation
total_loss += loss
count += labels.size(0)
correct += preds.argmax(dim=1).eq(labels).sum().item()
loss_cnn_train[epoch] = total_loss/len(train_loader)
acc_cnn_train[epoch] = (correct/count)*100.0
print('Epoch %d : Training Accuracy = %.3f%%, Loss = %.3f (%.2fit/s)' % (epoch, acc_cnn_train[epoch], loss_cnn_train[epoch], 1/prog_bar.avg_time) )
# Validation Phase
CNN.eval() # Set the CNN to test mode
with torch.no_grad(): # Disables the autograd functionality in the model
correct = 0
count = 0
total_loss = 0
for i, (images, labels) in enumerate(test_loader):
images, labels = images.to(device), labels.to(device)
preds = CNN(images)
loss = criterion (preds, labels)
total_loss += loss
count += labels.size(0)
correct += preds.argmax(dim=1).eq(labels).sum().item()
loss_cnn_val[epoch] = total_loss/len(test_loader)
acc_cnn_val[epoch] = (correct/count)*100.0
print('Validation Accuracy = %.3f%%, Loss = %.3f' % (acc_cnn_val[epoch], loss_cnn_val[epoch]))
if (best_val_acc < acc_cnn_val[epoch]):
best_val_acc = acc_cnn_val[epoch]
torch.save(CNN, 'best_model')
# %% CNN TESTING
CNN_best = torch.load('best_model')
CNN_best.to(device)
CNN_best.eval() # Set the CNN_best to test mode
acc_cnn_test_batch = np.zeros(len(test_loader))
loss_cnn_test_batch = np.zeros(len(test_loader))
with torch.no_grad(): # Disables the autograd functionality in the model
correct = 0
count = 0
for i, (images, labels) in enumerate(test_loader):
images, labels = images.to(device), labels.to(device)
preds = CNN_best(images)
loss = criterion (preds, labels)
count += labels.size(0)
correct += preds.argmax(dim=1).eq(labels).sum().item()
acc_cnn_test_batch[i] = preds.argmax(dim=1).eq(labels).sum().item()/labels.size(0)*100.0
loss_cnn_test_batch[i] = loss
print('Batch %d : Test Accuracy = %.3f%%, Loss = %.3f' % (i,acc_cnn_test_batch[i], loss_cnn_test_batch[i]))
print('Overall Accuracy = %.3f%%' % ((correct/count)*100.0))
acc_cnn_test = (correct/count)*100
# %% PlOTTING RESULTS
f_test = plt.figure(figsize=(8,5))
plt.plot(acc_cnn_test_batch)
plt.title("Test Accuracy on Different Batches (Overall = %.3f%%)" %(acc_cnn_test))
plt.ylabel("Accuracy (%)")
plt.xlabel('Batch Number')
plt.grid(True, linestyle=':')
plt.xticks(np.arange(0, len(test_loader)))
plt.autoscale(enable=True, axis='x', tight=True)
f_train = plt.figure(figsize=(8,5))
plt.plot(acc_cnn_train, label='Training Accuracy')
plt.plot(acc_cnn_val, label='Validation Accuracy')
plt.title("Training and Validation Accuracy")
plt.ylabel("Accuracy (%)")
plt.xlabel('Number of Epochs')
plt.grid(True, linestyle=':')
plt.xticks(np.arange(0, len(acc_cnn_train),step=2))
plt.autoscale(enable=True, axis='x', tight=True)
f_train_loss = plt.figure(figsize=(8,5))
plt.plot(loss_cnn_train, label='Training Loss')
plt.plot(loss_cnn_val, label='Validation Loss')
plt.title("Training and Validation Loss")
plt.ylabel("Cross Entropy Loss")
plt.xlabel('Number of Epochs')
plt.grid(True, linestyle=':')
plt.xticks(np.arange(0, len(acc_cnn_train),step=2))
plt.autoscale(enable=True, axis='x', tight=True)
plt.show()
| [
"[email protected]"
] | |
066c3775bbd45fe401a7ff580b9ac70a1e898d56 | 1f27158da1b1c8af9a6f3cbb0964b16034e92c5b | /src/no_symptoms_classif.py | d70404cf6e129cdabcb298afd9ea9e30ecae69dd | [] | no_license | GaetanChambres/respiratory_classifier | d1791689102575505880f78d181e200a0a100a8d | c474e7ba533230747dd32c9c82675a6e8bb87ba1 | refs/heads/master | 2021-06-29T22:39:30.712593 | 2020-10-23T14:18:18 | 2020-10-23T14:18:18 | 178,048,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,254 | py | import pandas as pd
import numpy as np
import xgboost as xgb
import csv as csv
import sys
# from sklearn.model_selection import train_test_split
import sklearn.metrics as sklm
import warnings
warnings.filterwarnings("ignore")
def csv_nb_cols(fname,delimiter):
line = fname.readline()
data = line.split(delimiter)
nb_col = len(data)
return nb_col
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
arguments = sys.argv
dir = arguments[1]
# input_train = "./experiments/data/equal_split/ft_train.csv"
# info_train = "./experiments/data/equal_split/train.csv"
# input_train = "./experiments/data/lookalike/ft_train.csv"
# info_train = "./experiments/data/lookalike/train.csv"
# input_train = "./experiments/data/all/ft_train.csv"
# info_train = "./experiments/data/all/train.csv"
# input_train = "./experiments/data/equal_split/ft_test.csv"
# info_train = "./experiments/data/equal_split/test.csv"
# input_train = "./experiments/data/lookalike/ft_test.csv"
# info_train = "./experiments/data/lookalike/test.csv"
# input_train = "./experiments/data/all/ft_test.csv"
# info_train = "./experiments/data/all/test.csv"
# input_train = "./experiments/data/horse_v3/ft_test.csv"
# info_train = "./experiments/data/horse_v3/test.csv"
input_train = dir+"ft_test.csv"
info_train = dir+"test.csv"
# -----------------------------------------------------------------------------
# input_test = "./experiments/data/equal_split/ft_test.csv"
# info_test = "./experiments/data/equal_split/test.csv"
# input_test = "./experiments/data/lookalike/ft_test.csv"
# info_test = "./experiments/data/lookalike/test.csv"
# input_test = "./experiments/data/all/ft_test.csv"
# info_test = "./experiments/data/all/test.csv"
# input_test = "./experiments/data/equal_split/ft_train.csv"
# info_test = "./experiments/data/equal_split/train.csv"
# input_test = "./experiments/data/lookalike/ft_train.csv"
# info_test = "./experiments/data/lookalike/train.csv"
# input_test = "./experiments/data/all/ft_train.csv"
# info_test = "./experiments/data/all/train.csv"
# input_test = "./experiments/data/horse_v3/ft_train.csv"
# info_test = "./experiments/data/horse_v3/train.csv"
input_test = dir+"ft_train.csv"
info_test = dir+"train.csv"
with open(input_train) as f1:
nbcols_train = csv_nb_cols(f1,delimiter = ",")
# print(nbcols_train)
with open(input_test) as f2:
nbcols_test = csv_nb_cols(f2,delimiter = ",")
# print(nbcols_test)
print("-- Loading train files")
features_train = np.loadtxt(input_train, delimiter=",", skiprows = 0, usecols=range(1,nbcols_train))
# print(train_dataset)
labels_train = np.loadtxt(info_train,delimiter = ',',skiprows = 0, usecols=range(1,2))
# labels = train_info[:,0:1]
# labels count
unique, counts = np.unique(labels_train, return_counts=True)
print("Train class counter :"+str(dict(zip(unique, counts))))
print("-- Loading test files")
features_test = np.loadtxt(input_test, delimiter=",", skiprows = 0, usecols=range(1,nbcols_test))
# print(features_test)
labels_test = np.loadtxt(info_test,delimiter = ',', skiprows = 0, usecols=range(1,2))
# print(labels_test)
# labels count
unique, counts = np.unique(labels_test, return_counts=True)
print("Test class counter :"+str(dict(zip(unique, counts))))
tree = xgb.XGBClassifiertree = xgb.XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bytree=1, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=100,
n_jobs=1, nthread=None, objective='binary:logistic', random_state=0,
reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None, silent=True, subsample=1)
print("-- Training model")
tree.fit(features_train,labels_train)
# xgb.plot_importance(tree,max_num_features = 25)
from matplotlib import pyplot
# xgb.plot_tree(model)
# pyplot.show()
print()
preds = tree.predict(features_test)
predictions = [round(value) for value in preds]
print("-- Testing model")
from sklearn.metrics import accuracy_score
# print(preds)
accuracy = accuracy_score(labels_test, preds)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
from sklearn.metrics import classification_report
print(classification_report(labels_test, preds))
| [
"[email protected]"
] | |
6a1ca19e78e7528364bc0b93d19a51ccb649f761 | f4be3422b28dda8802ea75368d665d17b634b83f | /babo/__init__.py | 5d6700be06951195e3f22bed513d5b4a0e966b6e | [] | no_license | carpedm20/babo | 0bab8abee49058fb4c0c6ab629f174d8a85d50a7 | 1fae47214918b4a64fc305787fb6a4df07c49768 | refs/heads/master | 2020-04-15T16:12:10.274756 | 2014-08-15T12:00:33 | 2014-08-15T12:00:33 | 22,943,235 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | # -*- coding: utf-8 -*-
"""
babo
~~~~
The world will have a generation of idiots.. by Albert Einstein
:copyright: (c) 2014 by Taehoon Kim.
:license: BSD, see LICENSE for more details.
"""
__copyright__ = 'Copyright 2014 by Taehoon Kim'
__version__ = '0.0.1'
__license__ = 'BSD'
__author__ = 'Taehoon Kim'
__author_email__ = '[email protected]'
__url__ = 'http://github.com/carpedm20/babo'
__all__ = [
]
| [
"[email protected]"
] | |
64627dc136bf1d70631a5615c7dcda6512f17bd4 | d66cdcdf898744d3b697dffae0030bf6ed861fc3 | /src/item_manager/urls.py | 8b8746735ce07c072b443e8a8a51b8eebc32eb39 | [] | no_license | lautarobarba/item-manager | 9ab1ad69a8d91ca6e4ad1d8dbb29fb1386bae342 | 034ce075c5e5e246fed8fa9baf71ccfbffafc0ee | refs/heads/main | 2023-03-15T03:36:25.027355 | 2021-03-05T22:18:11 | 2021-03-05T22:18:11 | 341,726,519 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | """item_manager URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.contrib.auth import views as auth_views
urlpatterns = [
path('', include('app.urls')),
#path('admin/', admin.site.urls),
path('accounts/', include('django.contrib.auth.urls')),
]
| [
"[email protected]"
] | |
7d2e47d7ad466c15c34f6e332fe816a049e752ba | 157ccf908516e0707d88a30aa7048cdb68180154 | /Experiment/PythonOperators.py | b8c11146645d46427a67836208658b787c143c4d | [] | no_license | BhuiyanMH/Python3 | bed45ba55a7d0b71eec9b59da24e1ad01410e4ac | ef1615fd46e8b9793467a427c61eea9d161b75e3 | refs/heads/master | 2021-08-24T10:31:04.166448 | 2017-12-09T06:54:46 | 2017-12-09T06:54:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | #floor division
print(15//6)
#exponent
print(2**5)
#Bitwise not
print(~2)
#bitwise XOR
print(3^4)
#bitwise Left shift
print(8<<1)
#bitwise right shift
print(32>>1)
#Identity operator
#is -> true if operands refers to the same object
#is not -> True if operands does not refers to the same object
#list are always different
x = 5
y = 5
print(x is y)
print(x is not y)
#membership operator
#in -> True if value or variable is found in the sequence
#not in -> True if value/variable is not found in the sequence
x = "hello world"
y={1:'a', 2:'b', 3:'c'}
print('h' in x)
print('a' in y)#match the keys
| [
"[email protected]"
] | |
7fd1c69b2e824a1d6bd46e9f2c6ba098cffdf4a6 | 0335a5fb8a553f243e29e5aee704238612c6dd11 | /doc/Sphinx/_static/vecto_maxwellian_plasma_3d.py | 7806e010b0d1477519f8bbc41a7559c245f7148c | [] | no_license | ALaDyn/Smilei | a91b093da10ae4953e5a8d3397846952029a9979 | 81d858dd97ecbceb71dac804ee009763d50eb836 | refs/heads/master | 2023-07-20T16:56:01.085438 | 2023-07-05T10:13:57 | 2023-07-05T10:13:57 | 62,309,652 | 5 | 0 | null | 2016-06-30T12:41:29 | 2016-06-30T12:41:28 | null | UTF-8 | Python | false | false | 2,603 | py | # __________________________________________________________
#
# Maxwellian homogeneous plasma namelist for SMILEI
# This script is used for the vectorization study
# __________________________________________________________
import os
import numpy as np
c = 299792458
lambdar = 1e-6
wr = 2*np.pi*c/lambdar
Te = 100./511. # electron & ion temperature in me c^2
Ti = 10./511. # electron & ion temperature in me c^2
n0 = 1.
lambdap = 2*np.pi/n0
Lde = np.sqrt(Te) # Debye length in units of c/\omega_{pe}
cell_length = [0.5*Lde,0.5*Lde,0.5*Lde]
# timestep (0.95 x CFL)
dt = 0.95 * cell_length[0]/np.sqrt(3.)
# Number of particles per cells
particles_per_cell = 10
# Vectorization
vectorization = "adaptive"
# Small patches that can fit in cache
cells_per_patch = [8,8,8]
# 24 cores per sockets so at least 48 patches per node to fill all cores
# Depends on the architecture
patches_per_node = [5,8,8]
# Simulation time
simulation_time = 100*dt
# Particle initialization
position_initialization = 'random'
# Density profile
def n0_(x,y,z):
return n0
# Number of nodes
nodes = [1,1,1]
# Number of patches
patches = [nodes[i]*patches_per_node[i] for i in range(3)]
# grid length
grid_length = [cells_per_patch[i]*patches[i]*cell_length[i] for i in range(3)]
Main(
geometry = "3Dcartesian",
interpolation_order = 2,
timestep = dt,
simulation_time = simulation_time,
cell_length = cell_length,
grid_length = grid_length,
number_of_patches = patches,
EM_boundary_conditions = [ ["periodic"] ],
patch_arrangement = "linearized_XYZ",
)
Vectorization(
mode = vectorization,
)
Species(
name = "proton",
position_initialization = position_initialization,
momentum_initialization = "mj",
particles_per_cell = particles_per_cell,
c_part_max = 1.0,
mass = 1836.0,
charge = 1.0,
charge_density = n0,
mean_velocity = [0., 0.0, 0.0],
temperature = [Ti],
pusher = "boris",
boundary_conditions = [
["periodic", "periodic"],
["periodic", "periodic"],
["periodic", "periodic"],
],
)
Species(
name = "electron",
position_initialization = position_initialization,
momentum_initialization = "mj",
particles_per_cell = particles_per_cell,
c_part_max = 1.0,
mass = 1.0,
charge = -1.0,
charge_density = n0,
mean_velocity = [0., 0.0, 0.0],
temperature = [Te],
pusher = "boris",
boundary_conditions = [
["periodic", "periodic"],
["periodic", "periodic"],
["periodic", "periodic"],
],
)
| [
"[email protected]"
] | |
e037a3f03bb035f9294a2db24cabd7bccc5d1501 | 96dcea595e7c16cec07b3f649afd65f3660a0bad | /homeassistant/components/demo/time.py | 56ab715a7f7ed7eb6c4c24c99973845bd00f6ad4 | [
"Apache-2.0"
] | permissive | home-assistant/core | 3455eac2e9d925c92d30178643b1aaccf3a6484f | 80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743 | refs/heads/dev | 2023-08-31T15:41:06.299469 | 2023-08-31T14:50:53 | 2023-08-31T14:50:53 | 12,888,993 | 35,501 | 20,617 | Apache-2.0 | 2023-09-14T21:50:15 | 2013-09-17T07:29:48 | Python | UTF-8 | Python | false | false | 1,515 | py | """Demo platform that offers a fake time entity."""
from __future__ import annotations
from datetime import time
from homeassistant.components.time import TimeEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.device_registry import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import DOMAIN
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the demo time platform."""
async_add_entities([DemoTime("time", "Time", time(12, 0, 0), "mdi:clock", False)])
class DemoTime(TimeEntity):
"""Representation of a Demo time entity."""
_attr_has_entity_name = True
_attr_name = None
_attr_should_poll = False
def __init__(
self,
unique_id: str,
device_name: str,
state: time,
icon: str,
assumed_state: bool,
) -> None:
"""Initialize the Demo time entity."""
self._attr_assumed_state = assumed_state
self._attr_icon = icon
self._attr_native_value = state
self._attr_unique_id = unique_id
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, unique_id)}, name=device_name
)
async def async_set_value(self, value: time) -> None:
"""Update the time."""
self._attr_native_value = value
self.async_write_ha_state()
| [
"[email protected]"
] | |
bc46fa92cc0e5aa96905e3ca9d4de129a510c935 | d9759cc2d395670909fcfc6e077592d2a9f0f808 | /venv/bin/flask | dd5fc33e65fa343038bb09cecdad066acd8eee4d | [] | no_license | milosnikic/DashProject | 2aec93a6941a80803d9375111f6cbf6d46100a4b | 60b5eb3ef9ab44826e9564274036d06c75364d6b | refs/heads/master | 2022-12-11T12:08:44.263222 | 2019-06-05T15:39:58 | 2019-06-05T15:39:58 | 190,413,685 | 0 | 0 | null | 2022-12-08T05:13:06 | 2019-06-05T14:46:49 | Python | UTF-8 | Python | false | false | 274 | #!/home/milos/Desktop/python_projects/data_visualization_crowd/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
dcfd89b86c66a498d7c4394c360ef086735f3163 | 2d706b8bd69aa172277515e89ed01add51a604d5 | /Projeto3_camadas/server.py | 1101684c09ce0c9774a7fb77eeb11d942fa3f078 | [] | no_license | guilhermecarvalhoacc/Camada_fisica | a17ae73e5065c09745c3ca32277906a23ddf3228 | 863fced4bfecd04358bad6258dfee8212cfd5af2 | refs/heads/master | 2023-04-03T12:00:54.245965 | 2021-04-08T19:52:34 | 2021-04-08T19:52:34 | 344,915,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,165 | py | #####################################################
# Camada Física da Computação
#Carareto
#11/08/2020
#Aplicação
####################################################
#esta é a camada superior, de aplicação do seu software de comunicação serial UART.
#para acompanhar a execução e identificar erros, construa prints ao longo do código!
from math import ceil
from os import sendfile
from enlace import *
import time
import numpy as np
from funcoes import *
# voce deverá descomentar e configurar a porta com através da qual ira fazer comunicaçao
# para saber a sua porta, execute no terminal :
# python -m serial.tools.list_ports
# se estiver usando windows, o gerenciador de dispositivos informa a porta
#use uma das 3 opcoes para atribuir à variável a porta usada
serialName = "/dev/ttyACM1" # Ubuntu (variacao de)
imageW = './img/recebidaCopia.jpg'
def main():
try:
#declaramos um objeto do tipo enlace com o nome "com". Essa é a camada inferior à aplicação. Observe que um parametro
#para declarar esse objeto é o nome da porta.
com2 = enlace(serialName)
certo = (9).to_bytes(2, byteorder='big')
print(f"ESSA É A RESPOSTA DO SERVIDOR FALANDO QUE O ENVIO DEU CERTO: {certo}")
errado = (7).to_bytes(2, byteorder='big')
print(f"ESSA É A RESPOSTA DO SERVIDOR FALANDO QUE O ENVIO DEU ERRADO: {errado}")
confirmacao = cria_head(zero_bytes_id,zero_bytes,zero_bytes,certo) + EOP_bytes
print(f"ESSE AKI EH A CONFIRMAÇÃO: {confirmacao}")
negacao = cria_head(zero_bytes_id,zero_bytes,zero_bytes,errado) + EOP_bytes
# Ativa comunicacao. Inicia os threads e a comunicação seiral
com2.enable()
#Se chegamos até aqui, a comunicação foi aberta com sucesso. Faça um print para informar.
print('A comunicação foi aberta com sucesso!')
recebe_handshake = com2.getData(14)[0]
print("Recebeu hankdshake")
print(f" ESSE EH O HANDSHAKE: {recebe_handshake}")
time.sleep(0.2)
com2.sendData(cria_handshake(is_handshake=True))
print("enviando resposta")
print(f"esse eh o hankdshake: {recebe_handshake}")
tipo_msg_handshake = recebe_handshake[8:10]
#if tipo_msg_handshake == (2).to_bytes(2, byteorder='big'):
# print("Handshake recebido com sucesso! Comunicação operacional")
comando = True
lista_payload = bytearray()
id_pacote = 1
while comando:
id_byte = (1).to_bytes(4, byteorder='big')
time.sleep(0.5)
recebe_head = com2.getData(10)[0]
#print(recebe_head)
recebe_id = recebe_head[0:4]
#print(f"esse eh o id:{recebe_id}")
recebe_num_pacotes = recebe_head[4:6]
#print(f"esse eh o num de pacotes: {recebe_num_pacotes}")
recebe_tamanho_payload = recebe_head[6:8]
recebe_tipo_msg = recebe_head[8:10]
tamanho_payload_int = int.from_bytes(recebe_tamanho_payload, byteorder='big')
#print(f"esse eh o tamanho do payload em inteiro:{tamanho_payload_int}")
recebe_payload = com2.getData(tamanho_payload_int)[0]
recebe_EOP = com2.getData(4)[0]
#print(f"esse eh o EOP: {recebe_EOP}")
lista_payload.extend(recebe_payload)
recebe_id_int = int.from_bytes(recebe_id, byteorder='big')
recebe_num_pacotes_int = int.from_bytes(recebe_num_pacotes, byteorder='big')
#print(f"Esse eh o id do pacote em int{recebe_id_int}")
#print(id_pacote)
if recebe_id_int != id_pacote:
com2.sendData(negacao) #msg de erro enviada para client solicitando reenvio.
print("id do pacote diferente")
else:
if len(recebe_payload) != tamanho_payload_int:
print("TAMANHO DO PAYLOAD DIFERENTE")
com2.sendData(negacao)
else:
if recebe_EOP != EOP_bytes:
print("EDOP DIFERENTE")
com2.sendData(negacao)
else:
com2.sendData(confirmacao)
print("ENVIANDO CONFIRMAÇÃO DE RECEBIMENTO PRA O CLIENT")
id_pacote += 1
if recebe_id_int == recebe_num_pacotes_int:
print("Ultimo pacote recebido!")
comando = False
print("saiu do loop")
print(lista_payload)
print('Salvando dados dos arquivos: ')
f = open(imageW, 'wb')
f.write(lista_payload)
# Encerra comunicação
print("-------------------------")
print("Comunicação encerrada")
print("-------------------------")
com2.disable()
except Exception as erro:
print("ops! :-\\")
print(erro)
com2.disable()
#so roda o main quando for executado do terminal ... se for chamado dentro de outro modulo nao roda
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
1e897b3e4ce74d5cb83c9c03a4eaab8cb37cd714 | 3f71801940fc9717126c2ed0c21d68baaf854c94 | /modules/machine_learning/advanced_model/main.py | 43b93fbb75e72e04eea59233eee96d0d5c62592d | [] | no_license | QMIND-Team/Sabermetrics | 7cf548bf88b2a609d0f74d1207f7d49d80516246 | 9638a93bf36c936b43c0a546bec3ed5d1796862c | refs/heads/master | 2020-04-05T13:48:06.322694 | 2019-03-31T17:36:30 | 2019-03-31T17:36:30 | 156,910,170 | 3 | 3 | null | 2019-03-31T17:43:22 | 2018-11-09T19:44:11 | Jupyter Notebook | UTF-8 | Python | false | false | 2,468 | py |
import warnings
import modules.machine_learning.main_model.help_main as help_main
warnings.filterwarnings("ignore")
import modules.machine_learning.advanced_model.main_advanced_functions as maf
'''
Methods:
LR = Linear Regression
SVM = Support Vector Machine
XGB = XGBOOST
---------------------------------------------------------------------------------------------------
---------------------------------------------------------------------------------------------------
---------------------------------------------------------------------------------------------------
_____________________________________________CONTROL START_________________________________________
'''
#Starting features for the model (currently the best for LR)
#testedFeatures = ['G_y', 'GS', 'WAR_x', 'BB%','K/9', 'GB%', 'ERA','xFIP', 'LOB%'] #best combination for LR
testedFeatures = ['L']
requiredColumns = ["Season","Team",]
allFeatures = ['L', 'SV', 'G_x', 'GS', 'IP', 'SO', 'K/9',
'BB/9', 'HR/9', 'BABIP_x', 'LOB%', 'GB%', 'HR/FB', 'ERA', 'FIP', 'xFIP',
'WAR_x', 'G_y', 'PA', 'HR', 'R', 'RBI', 'SB', 'BB%', 'K%', 'ISO',
'BABIP_y', 'AVG', 'OBP', 'SLG', 'wOBA', 'wRC+', 'BsR', 'Off', 'Def',
'WAR_y']
targetFeature = "W"
method = "LR"
# --------------------------------------------Functions Start-----------------------------------------------
#Train
advancedModel = 1
if advancedModel ==1:
seasonToPredict = 2016
testRange = 4
end = seasonToPredict
start = end - (testRange*3)
showProcess = 1
#features = ['G_y', 'OBP', 'IP', 'WAR_x', 'SB', 'AVG', 'HR/FB', 'K%', 'LOB%', 'xFIP', 'BB/9', 'Def', 'ERA', 'SLG', 'BABIP_x', 'PA', 'SV', 'R', 'Off', 'WAR_y', 'HR/9']
features = ['G_y'] #quick model
'''
_________________________________________________CONTROL END_______________________________________
---------------------------------------------------------------------------------------------------
---------------------------------------------------------------------------------------------------
---------------------------------------------------------------------------------------------------'''
requiredColumns.append(targetFeature)
df = help_main.getAll(start, end)
allFeatures = df.columns
toTestFeatures = help_main.getSortedFeatures(testedFeatures, requiredColumns, allFeatures)
if advancedModel ==1:
maf.advancedModel(features,df,end,start,showProcess,testRange,targetFeature,method,testRange)
| [
"[email protected]"
] | |
0b1ac565b5be40a5358caec120a9e5dace2871eb | e8bc60d7de7d70418fc13bf60c4d2d9905686fad | /node_modules/websocket/build/config.gypi | 577cfb0c38de567c8bb35f5e590aec03117998ef | [
"Apache-2.0"
] | permissive | meizi8475/lotteryapp | ecf220e2a105621e6554d65b18739bf87a372090 | 6eea6c7fbc70996712cd3041eff731e08ec9203b | refs/heads/master | 2021-01-01T16:00:22.989038 | 2012-12-27T02:52:40 | 2012-12-27T02:52:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,766 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 42,
"host_arch": "x64",
"node_install_npm": "true",
"node_install_waf": "true",
"node_prefix": "out/dist-osx/usr/local",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"target_arch": "x64",
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"nodedir": "/Users/wangyuxin/.node-gyp/0.8.14",
"copy_dev_lib": "true",
"save_dev": "",
"viewer": "man",
"browser": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"shell": "/bin/bash",
"init_author_url": "",
"parseable": "",
"userignorefile": "/Users/wangyuxin/.npmignore",
"sign_git_tag": "",
"init_author_email": "",
"cache_max": "null",
"long": "",
"ignore": "",
"npat": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"versions": "",
"message": "%s",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"cache_lock_retries": "10",
"proprietary_attribs": "true",
"fetch_retry_mintimeout": "10000",
"json": "",
"coverage": "",
"pre": "",
"https_proxy": "",
"engine_strict": "",
"description": "true",
"userconfig": "/Users/wangyuxin/.npmrc",
"init_module": "/Users/wangyuxin/.npm-init.js",
"npaturl": "http://npat.npmjs.org/",
"user": "",
"node_version": "v0.8.14",
"save": "",
"editor": "vi",
"tag": "latest",
"global": "",
"username": "",
"optional": "true",
"force": "",
"searchopts": "",
"depth": "null",
"searchsort": "name",
"rebuild_bundle": "true",
"yes": "",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"strict_ssl": "true",
"group": "20",
"fetch_retry_factor": "10",
"dev": "",
"version": "",
"cache_lock_stale": "60000",
"cache_min": "",
"searchexclude": "",
"cache": "/Users/wangyuxin/.npm",
"color": "true",
"save_optional": "",
"user_agent": "node/v0.8.14",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"umask": "18",
"init_version": "0.0.0",
"init_author_name": "",
"git": "git",
"unsafe_perm": "true",
"tmp": "/var/folders/7v/2n4vn90x027dgn3tb2v8wbqm0000gp/T/",
"onload_script": "",
"prefix": "/usr/local",
"link": ""
}
}
| [
"[email protected]"
] | |
e64403cef75b958cb80399e2f3cbc03b75b6fc44 | ffdc77394c5b5532b243cf3c33bd584cbdc65cb7 | /mindspore/python/mindspore/ops/_op_impl/aicpu/adjust_contrastv2.py | 480c1d4ad85d17196db9b50d3504b3bda9cc95b8 | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] | permissive | mindspore-ai/mindspore | ca7d5bb51a3451c2705ff2e583a740589d80393b | 54acb15d435533c815ee1bd9f6dc0b56b4d4cf83 | refs/heads/master | 2023-07-29T09:17:11.051569 | 2023-07-17T13:14:15 | 2023-07-17T13:14:15 | 239,714,835 | 4,178 | 768 | Apache-2.0 | 2023-07-26T22:31:11 | 2020-02-11T08:43:48 | C++ | UTF-8 | Python | false | false | 1,309 | py | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""AdjustContrastv2 op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
adjust_contrastv2_op_info = AiCPURegOp("AdjustContrastv2") \
.fusion_type("OPAQUE") \
.input(0, "images", "required") \
.input(1, "contrast_factor", "required") \
.output(0, "y", "required") \
.dtype_format(DataType.F16_Default, DataType.F32_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
.get_op_info()
@op_info_register(adjust_contrastv2_op_info)
def _adjust_contrastv2_aicpu():
"""AdjustContrastv2 aicpu register"""
return
| [
"[email protected]"
] | |
6d814310509a8617c96fc6933c1b37729a338e20 | e86c331b0b4c3f0fc17cfb652a31b010a02d2154 | /smart_calculater.py | 9a5673e8e05c58356b1a0ebe19aef2499f8f89ea | [] | no_license | Divyansh2611/smart_calculator | 3fc58c4f521fe18ccd5a4d8e2eab84393f3c270d | d43bdab9cf72e98a811df097563cf1eedd68951e | refs/heads/main | 2023-07-08T07:42:30.422824 | 2021-07-26T18:10:15 | 2021-07-26T18:10:15 | 389,727,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,293 | py | import tkinter
from tkinter import *
def fun():
info = e1.get()
a=info.split()
num=[]
for i in range(len(a)):
b=a[i]
if b.isdigit():
num.append(int(b))
if 'add' in a or 'addition' in a:
r=num[0]+num[1]
result['text']=r
elif 'subtract' in a or 'subtraction' in a:
r=num[0]-num[1]
result['text']=r
elif 'multiply' in a or 'multiplication' in a:
r=num[0]*num[1]
result['text']=r
elif 'divide' in a or 'division' in a:
r=num[0]/num[1]
result['text']=r
elif 'mod' in a or 'modulous' in a:
r=num[0]%num[1]
result['text']=r
else:
result['text']='error'
win=Tk()
win.geometry('400x400')
win.title('smart bugger')
win.config(bg='black')
l1=Label(win,text='I am a Smart calculator',width=20,padx=3)
l1.place(x=123,y=30)
l2=Label(win,text='My name is Pagger',padx=3)
l2.place(x=145,y=70)
l3=Label(win,text='How can i help you',padx=3)
l3.place(x=145,y=150)
e1=Entry(win,width=30)
e1.place(x=110,y=180)
b1=Button(win,text='Just this',borderwidth=10,command=fun)
b1.place(x=170,y=210)
result=Label(win,width=20,height=3,bg='white')
result.place(x=130,y=260)
win.mainloop()
| [
"[email protected]"
] | |
5cf95e015512f4ee695276b29effe2912418a63f | 5417c78be442575f3dece55e162914c90c14cef7 | /test/dex_offsets.py | cb862314baad4813ece4fce0fce21a828851cfc5 | [
"MIT"
] | permissive | Casperinous/Lena | 1cbb286600c54f7a59c60ac6372cb8ca67d03980 | 202ed791ecf8e632b00fea8373d81df9b1f54d2c | refs/heads/master | 2021-03-27T11:51:52.468554 | 2018-06-11T02:40:38 | 2018-06-11T02:40:38 | 107,802,298 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | import sys
from androguard.core.bytecodes import dvm
from androguard.core.bytecodes import apk
from androguard.core.bytecodes.dvm import TYPE_MAP_ITEM
sys.path.append('../')
from dex import Dex
FILENAME_INPUT = "../app-debug.apk"
print '[*] Getting map_list ....'
a = apk.APK(FILENAME_INPUT)
vm = dvm.DalvikVMFormat(a.get_dex())
print len(vm.get_all_fields())
print len(vm.map_list.get_item_type("TYPE_FIELD_ID_ITEM").get_obj()) | [
"[email protected]"
] | |
a1250d373944bf65cff70e384219809151ab23bf | fadf50987ab3aaefc993f00187d8a833457e9e97 | /torchstat/model_hook.py | 3ff8d3a0ebcaae409b34f6e8da4cdb375d8cf88d | [
"MIT"
] | permissive | Hulalazz/torchstat | 4cff14e2b272246d9fd7136b969eaab6165abfeb | b533d917ba8f2e0871a60c3ff73704e294b769eb | refs/heads/master | 2020-04-04T08:59:07.626893 | 2018-11-01T09:21:35 | 2018-11-01T09:21:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,980 | py | import time
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
from torchstat import compute_madd
from torchstat import compute_flops
class ModelHook(object):
def __init__(self, model, input_size):
assert isinstance(model, nn.Module)
assert isinstance(input_size, (list, tuple))
self._model = model
self._input_size = input_size
self._origin_call = dict() # sub module call hook
self._hook_model()
x = torch.rand(1, *self._input_size) # add module duration time
self._model.eval()
self._model(x)
@staticmethod
def _register_buffer(module):
assert isinstance(module, nn.Module)
if len(list(module.children())) > 0:
return
module.register_buffer('input_shape', torch.zeros(3).int())
module.register_buffer('output_shape', torch.zeros(3).int())
module.register_buffer('parameter_quantity', torch.zeros(1).int())
module.register_buffer('inference_memory', torch.zeros(1).long())
module.register_buffer('MAdd', torch.zeros(1).long())
module.register_buffer('duration', torch.zeros(1).float())
module.register_buffer('Flops', torch.zeros(1).long())
def _sub_module_call_hook(self):
def wrap_call(module, *input, **kwargs):
assert module.__class__ in self._origin_call
start = time.time()
output = self._origin_call[module.__class__](module, *input, **kwargs)
end = time.time()
module.duration = torch.from_numpy(
np.array([end - start], dtype=np.float32))
module.input_shape = torch.from_numpy(
np.array(input[0].size()[1:], dtype=np.int32))
module.output_shape = torch.from_numpy(
np.array(output.size()[1:], dtype=np.int32))
parameter_quantity = 0
# iterate through parameters and count num params
for name, p in module._parameters.items():
parameter_quantity += (0 if p is None else torch.numel(p.data))
module.parameter_quantity = torch.from_numpy(
np.array([parameter_quantity], dtype=np.long))
inference_memory = 1
for s in output.size()[1:]:
inference_memory *= s
# memory += parameters_number # exclude parameter memory
inference_memory = inference_memory * 4 / (1024 ** 2) # shown as MB unit
module.inference_memory = torch.from_numpy(
np.array([inference_memory], dtype=np.float32))
if len(input) == 1:
madd = compute_madd(module, input[0], output)
flops = compute_flops(module, input[0], output)
elif len(input) > 1:
madd = compute_madd(module, input, output)
flops = compute_flops(module, input, output)
else: # error
madd = 0
flops = 0
module.MAdd = torch.from_numpy(
np.array([madd], dtype=np.int64))
module.Flops = torch.from_numpy(
np.array([flops], dtype=np.int64))
return output
for module in self._model.modules():
if len(list(module.children())) == 0 and module.__class__ not in self._origin_call:
self._origin_call[module.__class__] = module.__class__.__call__
module.__class__.__call__ = wrap_call
def _hook_model(self):
self._model.apply(self._register_buffer)
self._sub_module_call_hook()
@staticmethod
def _retrieve_leaf_modules(model):
leaf_modules = []
for name, m in model.named_modules():
if len(list(m.children())) == 0:
leaf_modules.append((name, m))
return leaf_modules
def retrieve_leaf_modules(self):
return OrderedDict(self._retrieve_leaf_modules(self._model))
| [
"[email protected]"
] | |
5aac12f50a7d3c33b9c2797a1d90192b97b9ea24 | c78e61ccee6ac695d3f71f72fc3212fdd2c1d193 | /cfed_timestep_comp.py | d2ac35905b1a4a0d651d5ae895ee6e77bc35a7ae | [] | no_license | bbw7561135/phd_code | 28557e84228119dd204f9e16ca27d7c7cef81188 | ef06c317115f0744a7941796c4092e489923ef4e | refs/heads/master | 2021-06-13T02:35:08.475474 | 2017-02-26T21:12:17 | 2017-02-26T21:12:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55,751 | py | #------------------------------------------------------------------------------#
# #
# This code is a Python script that reads in arrays of synchrotron intensity #
# produced at different times in the evolution of the simulation, and #
# calculates the normalised correlation functions, structure functions, #
# and quadrupole ratios of the synchrotron intensity maps, for different #
# lines of sight. Plots are then produced of the normalised correlation #
# functions, structure functions, quadrupole ratios. This code is intended to #
# be used with simulations produced by Christoph Federrath. #
# #
# Author: Chris Herron #
# Start Date: 20/1/2016 #
# #
#------------------------------------------------------------------------------#
# First import numpy for array handling, matplotlib for plotting, astropy.io
# for fits manipulation, scipy.stats for calculating statistical quantities
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from scipy import stats
# Import the functions that calculate the structure and correlation functions
# using FFT, as well as the function that calculates the radially averaged
# structure or correlation functions. Also import the function that calculates
# multipoles of the 2D structure functions, and the function that calculates the
# magnitude and argument of the quadrupole ratio
from sf_fft import sf_fft
from cf_fft import cf_fft
from sfr import sfr
from calc_multipole_2D import calc_multipole_2D
from calc_quad_ratio import calc_quad_ratio
# Define a function that calculates the errors in statistics by breaking up
# synchrotron images into quarters, calculating statistics for each quarter, and
# then calculates the standard deviation of the statistics.
def calc_err_bootstrap(sync_map, log = False):
'''
Description
This function divides the given images into quarters, and then
calculates statistics for each quarter. The standard deviation of the
calculated statistics is then returned, representing the error on
each statistic.
Required Input
sync_map - The synchrotron intensity map. Should be a 2D Numpy array.
log - A boolean value. If True, then the moments are calculated for the
the logarithm of the PDF, and not the PDF itself
Output
mean_err - The error calculated for the mean of synchrotron intensity
stdev_err - The error calculated for the standard deviation of the
synchrotron intensity
skew_err - The error calculated for the skewness of synchrotron
intensity
kurt_err - The error calculated for the kurtosis of synchrotron
intensity
m_err - The error calculated for the structure function slope of the
synchrotron intensity
residual_err - The error calculated for the residual of the linear fit
to the structure function of synchrotron intensity
int_quad_err - The error calculated for the integrated quadrupole ratio
modulus of the synchrotron intensity
quad_point_err - The error calculated for the value of the quadrupole
ratio modulus at a point of synchrotron intensity
'''
# Create an array that will hold the quarters of the synchrotron images
quarter_arr = np.zeros((4,np.shape(sync_map)[0]/2,np.shape(sync_map)[1]/2))
# Add the quarters of the images into the array
quarter_arr[0], quarter_arr[1] = np.split(np.split(sync_map,2,axis=0)[0],2,axis=1)
quarter_arr[2], quarter_arr[3] = np.split(np.split(sync_map,2,axis=0)[1],2,axis=1)
# Create arrays that will hold the calculated statistics for each quarter
mean_val = np.zeros(np.shape(quarter_arr)[0])
stdev_val = np.zeros(np.shape(quarter_arr)[0])
skew_val = np.zeros(np.shape(quarter_arr)[0])
kurt_val = np.zeros(np.shape(quarter_arr)[0])
m_val = np.zeros(np.shape(quarter_arr)[0])
resid_val = np.zeros(np.shape(quarter_arr)[0])
int_quad_val = np.zeros(np.shape(quarter_arr)[0])
# Loop over the quarters, to calculate statistics for each one
for i in range(np.shape(quarter_arr)[0]):
# Extract the current image quarter from the array
image = quarter_arr[i]
# Flatten the image, so that we can calculate the skewness and kurtosis
flat_image = image.flatten()
# If we are calculating moments of the log PDFs, then calculate the
# logarithm of the flat image
if log == True:
# In this case we are calculating the moments of the log PDFs, so
# calculate the log PDFs
flat_image = np.log10(flat_image)
# Calculate the mean of the synchrotron intensity map
mean_val[i] = np.mean(flat_image, dtype=np.float64)
# Calculate the standard deviation of the synchrotron intensity map
stdev_val[i] = np.std(flat_image, dtype=np.float64)
# Calculate the biased skewness of the synchrotron intensity map
skew_val[i] = stats.skew(flat_image)
# Calculate the biased Fisher kurtosis of the synchrotron intensity
# maps
kurt_val[i] = stats.kurtosis(flat_image)
# Calculate the structure function (two-dimensional) of the synchrotron
# intensity map. Note that no_fluct = True is set, because we are not
# subtracting the mean from the synchrotron maps before calculating the
# structure function.
strfn = sf_fft(image, no_fluct = True)
# Radially average the calculated 2D structure function, using the
# specified number of bins.
rad_sf = sfr(strfn, num_bins, verbose = False)
# Extract the calculated radially averaged structure function
sf = rad_sf[1]
# Extract the radius values used to calculate this structure function.
sf_rad_arr = rad_sf[0]
# Calculate the spectral index of the structure function calculated for
# this value of gamma. Note that only the first third of the structure
# function is used in the calculation, as this is the part that is
# close to a straight line.
spec_ind_data = np.polyfit(np.log10(\
sf_rad_arr[5:14]),\
np.log10(sf[5:14]), 1, full = True)
# Extract the returned coefficients from the polynomial fit
coeff = spec_ind_data[0]
# Extract the sum of the residuals from the polynomial fit
resid_val[i] = spec_ind_data[1]
# Enter the value of m, the slope of the structure function minus 1,
# into the corresponding array
m_val[i] = coeff[0]-1.0
# Calculate the 2D structure function for this slice of the synchrotron
# intensity data cube. Note that no_fluct = True is set, because we are
# not subtracting the mean from the synchrotron maps before calculating
# the structure function. We are also calculating the normalised
# structure function, which only takes values between 0 and 2.
norm_strfn = sf_fft(image, no_fluct = True, normalise = True)
# Shift the 2D structure function so that the zero radial separation
# entry is in the centre of the image.
norm_strfn = np.fft.fftshift(norm_strfn)
# Calculate the magnitude and argument of the quadrupole ratio
quad_mod, quad_arg, quad_rad = calc_quad_ratio(norm_strfn, num_bins)
# Integrate the magnitude of the quadrupole / monopole ratio from
# one sixth of the way along the radial separation bins, until three
# quarters of the way along the radial separation bins. This integration
# is performed with respect to log separation (i.e. I am ignoring the
# fact that the points are equally separated in log space, to calculate
# the area under the quadrupole / monopole ratio plot when the x axis
# is scaled logarithmically). I normalise the value that is returned by
# dividing by the number of increments in log radial separation used in
# the calculation.
int_quad_val[i] = np.trapz(quad_mod[8:23], dx = 1.0)\
/ (22 - 8)
# At this point, the statistics have been calculated for each quarter
# The next step is to calculate the standard error of the mean of each
# statistic
mean_err = np.std(mean_val) / np.sqrt(len(mean_val))
stdev_err = np.std(stdev_val) / np.sqrt(len(stdev_val))
skew_err = np.std(skew_val) / np.sqrt(len(skew_val))
kurt_err = np.std(kurt_val) / np.sqrt(len(kurt_val))
m_err = np.std(m_val) / np.sqrt(len(m_val))
residual_err = np.std(resid_val) / np.sqrt(len(resid_val))
int_quad_err = np.std(int_quad_val) / np.sqrt(len(int_quad_val))
# Now that all of the calculations have been performed, return the
# calculated errors
return mean_err,stdev_err,skew_err,kurt_err,m_err,residual_err, int_quad_err
# Set a variable to hold the number of bins to use in calculating the
# correlation functions
num_bins = 25
# Create a variable that controls whether the moments of the log PDFs are
# calculated
log = True
# Create a string for the directory that contains the simulated magnetic fields
# and synchrotron intensity maps to use.
simul_loc = '/Users/chrisherron/Documents/PhD/CFed_2016/'
# Create a string for the specific simulated data sets to use in calculations.
# The directories end in:
# 512sM5Bs5886_20 (Solenoidal turbulence, timestep 20)
# 512sM5Bs5886_25 (Solenoidal turbulence, timestep 25)
# 512sM5Bs5886_30 (Solenoidal turbulence, timestep 30)
# 512sM5Bs5886_35 (Solenoidal turbulence, timestep 35)
# 512sM5Bs5886_40 (Solenoidal turbulence, timestep 40)
# 512cM5Bs5886_20 (Compressive turbulence, timestep 20)
# 512cM5Bs5886_25 (Compressive turbulence, timestep 25)
# 512cM5Bs5886_30 (Compressive turbulence, timestep 30)
# 512cM5Bs5886_35 (Compressive turbulence, timestep 35)
# 512cM5Bs5886_40 (Compressive turbulence, timestep 40)
spec_locs = ['512sM5Bs5886_20/', '512sM5Bs5886_25/', '512sM5Bs5886_30/',\
'512sM5Bs5886_35/', '512sM5Bs5886_40/', '512cM5Bs5886_20/', '512cM5Bs5886_25/',\
'512cM5Bs5886_30/', '512cM5Bs5886_35/', '512cM5Bs5886_40/']
# Create an array of strings, where each string gives the legend label for
# a corresponding simulation
sim_labels = ['Sol 20', 'Sol 25', 'Sol 30', 'Sol 35', 'Sol 40',\
'Comp 20', 'Comp 25', 'Comp 30', 'Comp 35', 'Comp 40']
# Create a variable that holds the number of timesteps being used
num_timestep = 5
# Create a variable that controls whether the line of sight is assumed to be
# along the x, y or z axis of the data cube when constructing the synchrotron
# maps. This can include 'x', 'y', or 'z'. Synchrotron maps are produced for
# each line of sight included in the array
line_o_sight = ['x', 'y', 'z']
# Create a variable that specifies the gamma values that will be used to produce
# these synchrotron emission maps
gamma = 2.0
# Create a three dimensional array that will hold all of the information
# for the normalised correlation functions. The first index gives the simulation
# the second gives the line of sight, and the third axis goes along radius.
# The x, y and z axes are numbered with indices 0, 1 and 2 respectively
norm_corr_arr = np.zeros((len(spec_locs), 3, num_bins))
# Create a three dimensional array that will just hold the radius values used
# to make all of the normalised correlation functions. The first axis represents
# the simulation used, the second represents the line of sight, and
# the third axis goes over radius.
# The x, y and z axes are numbered with indices 0, 1 and 2 respectively
corr_rad_arr = np.zeros((len(spec_locs), 3, num_bins))
# Create a three dimensional array that will hold all of the information
# for the structure functions. The first index gives the simulation
# the second gives the line of sight, and the third axis goes along radius.
# The x, y and z axes are numbered with indices 0, 1 and 2 respectively
sf_arr = np.zeros((len(spec_locs), 3, num_bins))
# Create a three dimensional array that will just hold the radius values used
# to make all of the structure functions. The first axis represents the
# simulation used, the second represents the line of sight, and
# the third axis goes over radius.
# The x, y and z axes are numbered with indices 0, 1 and 2 respectively
sf_rad_arr = np.zeros((len(spec_locs), 3, num_bins))
# Create a three dimensional array that will hold all of the information
# for the magnitude of the quadrupole ratios. The first index gives the
# simulation the second gives the line of sight, and the third axis goes
# along radius.
# The x, y and z axes are numbered with indices 0, 1 and 2 respectively
quad_arr = np.zeros((len(spec_locs), 3, num_bins))
# Create a three dimensional array that will just hold the radius values used
# to make all of the quadrupole ratios. The first axis represents the
# simulation used, the second represents the line of sight, and
# the third axis goes over radius.
# The x, y and z axes are numbered with indices 0, 1 and 2 respectively
quad_rad_arr = np.zeros((len(spec_locs), 3, num_bins))
# Create a three dimensional array that will hold all of the information
# for the real part of the quadrupole ratios. The first index gives the
# simulation the second gives the line of sight, and the third axis goes
# along radius.
# The x, y and z axes are numbered with indices 0, 1 and 2 respectively
quad_real_arr = np.zeros((len(spec_locs), 3, num_bins))
# Create a three dimensional array that will hold all of the information
# for the imaginary part of the quadrupole ratios. The first index gives the
# simulation the second gives the line of sight, and the third axis goes
# along radius.
# The x, y and z axes are numbered with indices 0, 1 and 2 respectively
quad_imag_arr = np.zeros((len(spec_locs), 3, num_bins))
# Create an empty array, where each entry specifies the calculated mean of
# the synchrotron intensity image of the corresponding simulation for a
# particular value of gamma. The first index gives the simulation, and the
# second index gives the line of sight as (x,y,z).
mean_arr = np.zeros((len(spec_locs),3))
# Create an empty array, where each entry specifies the calculated standard
# deviation of the synchrotron intensity image of the corresponding simulation
# for a particular value of gamma. The first index gives the simulation, and the
# second index gives the line of sight as (x,y,z).
stdev_arr = np.zeros((len(spec_locs),3))
# Create an empty array, where each entry specifies the calculated skewness of
# the synchrotron intensity image of the corresponding simulation for a
# particular value of gamma. The first index gives the simulation, and the
# second index gives the line of sight as (x,y,z).
# NOTE: We will calculate the biased skewness
skew_arr = np.zeros((len(spec_locs),3))
# Create an empty array, where each entry specifies the calculated kurtosis of
# the synchrotron intensity image of the corresponding simulation for a
# particular value of gamma. The first index gives the simulation, and the
# second index gives the line of sight as (x,y,z).
# NOTE: We will calculate the biased Fisher kurtosis
kurt_arr = np.zeros((len(spec_locs),3))
# Create an empty array, where each entry specifies the calculated slope of
# the structure function of the synchrotron intensity image minus 1, of the
# corresponding simulation, for a particular value of gamma. The first index
# gives the simulation, and the second index gives the line of sight as (x,y,z).
m_arr = np.zeros((len(spec_locs),3))
# Create an empty array, where each entry specifies the residuals of the
# linear fit to the structure function, of the corresponding simulation, for a
# particular value of gamma. The first index gives the simulation, and the
# second index gives the line of sight as (x,y,z).
residual_arr = np.zeros((len(spec_locs),3))
# Create an empty array, where each entry specifies the calculated integral of
# the magnitude of the quadrupole / monopole ratio of the synchrotron intensity
# image, for the corresponding simulation, for a particular value of gamma.
# The first index gives the simulation, and the second index gives the line of
# sight as (x,y,z).
int_quad_arr = np.zeros((len(spec_locs),3))
# Create error arrays for each of the statistics. These errors are calculated
# by the standard deviation of the statistics calculated for sub-images of the
# synchrotron maps. The first index gives the simulation, and the
# second index gives the line of sight as (x,y,z).
mean_err_arr = np.zeros((len(spec_locs),3))
stdev_err_arr = np.zeros((len(spec_locs),3))
skew_err_arr = np.zeros((len(spec_locs),3))
kurt_err_arr = np.zeros((len(spec_locs),3))
m_err_arr = np.zeros((len(spec_locs),3))
residual_err_arr = np.zeros((len(spec_locs),3))
int_quad_err_arr = np.zeros((len(spec_locs),3))
# Loop over the different simulations that we are using to make the plot
for i in range(len(spec_locs)):
# Create a string for the full directory path to use in this calculation
data_loc = simul_loc + spec_locs[i]
# Loop over the lines of sight, to calculate the correlation function,
# structure function and quadrupole ratio for each line of sight
for j in range(3):
# Open the FITS file that contains the synchrotron intensity maps for this
# simulation
sync_fits = fits.open(data_loc + 'synint_{}_gam{}.fits'.format(line_o_sight[j],gamma))
# Extract the data for the simulated synchrotron intensities
sync_data = sync_fits[0].data
# Print a message to the screen to show that the data has been loaded
print 'Synchrotron intensity loaded successfully'
# Flatten the synchrotron intensity map
flat_sync = sync_data.flatten()
# If we are calculating the moments of the log PDFs, then calculate the
# logarithm of the synchrotron intensity values
if log == True:
# In this case we are calculating the moments of the log PDFs, so
# calculate the log of the synchrotron intensities
flat_sync = np.log10(flat_sync/ np.mean(flat_sync, dtype = np.float64))
# Calculate the mean of the synchrotron intensity map, and store the
# result in the corresponding array
mean_arr[i,j] = np.mean(flat_sync, dtype=np.float64)
# Calculate the standard deviation of the synchrotron intensity map, and
# store the result in the corresponding array
stdev_arr[i,j] = np.std(flat_sync, dtype=np.float64)
# Calculate the biased skewness of the synchrotron intensity map, and store
# the result in the corresponding array
skew_arr[i,j] = stats.skew(flat_sync)
# Calculate the biased Fisher kurtosis of the synchrotron intensity
# map, and store the result in the corresponding array
kurt_arr[i,j] = stats.kurtosis(flat_sync)
# Calculate the 2D correlation function for this slice of the synchrotron
# intensity data cube. Note that no_fluct = True is set, because we are
# not subtracting the mean from the synchrotron maps before calculating
# the correlation function
corr = cf_fft(sync_data, no_fluct = True)
# Radially average the calculated 2D correlation function, using the
# specified number of bins
rad_corr = sfr(corr, num_bins, verbose = False)
# Calculate the square of the mean of the synchrotron intensity values
sync_sq_mean = np.power( np.mean(sync_data, dtype = np.float64), 2.0 )
# Calculate the mean of the synchrotron intensity values squared
sync_mean_sq = np.mean( np.power(sync_data, 2.0), dtype = np.float64 )
# Calculate the normalised, radially averaged correlation function for
# this value of gamma
norm_rad_corr = (rad_corr[1] - sync_sq_mean) / (sync_mean_sq - sync_sq_mean)
# Print a message to show that the correlation function of the
# synchrotron intensity has been calculated for this line of sight
print 'Correlation function of synchrotron intensity'\
+ ' calculated for {} LOS'.format(line_o_sight[j])
# Insert the calculated normalised, radially averaged correlation function
# into the matrix that stores all of the calculated correlation functions
norm_corr_arr[i,j] = norm_rad_corr
# Insert the radius values used to calculate this correlation function
# into the matrix that stores the radius values
corr_rad_arr[i,j] = rad_corr[0]
# Print a message to show that the correlation function has been calculated
print 'Normalised, radially averaged correlation function calculated for'\
+ ' {} LOS'.format(line_o_sight[j])
# Calculate the structure function (two-dimensional) of the synchrotron
# intensity map. Note that no_fluct = True is set, because we are not
# subtracting the mean from the synchrotron maps before calculating the
# structure function.
strfn = sf_fft(sync_data, no_fluct = True)
# Radially average the calculated 2D structure function, using the
# specified number of bins.
rad_sf = sfr(strfn, num_bins, verbose = False)
# Extract the calculated radially averaged structure function
sf_arr[i,j] = rad_sf[1]
# Extract the radius values used to calculate this structure function
sf_rad_arr[i,j] = rad_sf[0]
# Calculate the spectral index of the structure function calculated for
# this value of gamma.
spec_ind_data = np.polyfit(np.log10(\
sf_rad_arr[i,j,5:14]),\
np.log10(sf_arr[i,j,5:14]), 1, full = True)
# Extract the returned coefficients from the polynomial fit
coeff = spec_ind_data[0]
# Enter the value of m, the slope of the structure function minus 1,
# into the corresponding array
m_arr[i,j] = coeff[0]-1.0
# Enter the value of the residuals into the corresponding array
residual_arr[i,j] = spec_ind_data[1]
# Calculate the 2D structure function for this slice of the synchrotron
# intensity data cube. Note that no_fluct = True is set, because we are
# not subtracting the mean from the synchrotron maps before calculating
# the structure function. We are also calculating the normalised
# structure function, which only takes values between 0 and 2.
norm_strfn = sf_fft(sync_data, no_fluct = True, normalise = True)
# Shift the 2D structure function so that the zero radial separation
# entry is in the centre of the image.
norm_strfn = np.fft.fftshift(norm_strfn)
# Calculate the magnitude and argument of the quadrupole ratio
quad_mod, quad_arg, quad_rad = calc_quad_ratio(norm_strfn, num_bins)
# Add the calculated modulus of the quadrupole ratio to the final array
quad_arr[i,j] = quad_mod
# Add the radius values used to calculate the quadrupole ratio to the
# corresponding array
quad_rad_arr[i,j] = quad_rad
# Calculate the real part of the quadrupole ratio
quad_real_arr[i,j] = quad_mod * np.cos(quad_arg)
# Calculate the imaginary part of the quadrupole ratio
quad_imag_arr[i,j] = quad_mod * np.sin(quad_arg)
# Integrate the magnitude of the quadrupole / monopole ratio from one sixth
# of the way along the radial separation bins, until three quarters of the
# way along the radial separation bins. This integration is performed with
# respect to log separation (i.e. I am ignoring the fact that the
# points are equally separated in log space, to calculate the area under
# the quadrupole / monopole ratio plot when the x axis is scaled
# logarithmically). I normalise the value that is returned by dividing
# by the number of increments in log radial separation used in the
# calculation.
int_quad_arr[i,j] = np.trapz(quad_mod[8:23], dx = 1.0) / (22 - 8)
# Create errors for each of the statistics. These errors are only for the
# statistics calculated from the y and z axes (perpendicular to the mean
# magnetic field), and are calculated by the standard deviation of the
# statistics calculated for sub-images of the synchrotron maps.
mean_err_arr[i,j], stdev_err_arr[i,j], skew_err_arr[i,j],\
kurt_err_arr[i,j], m_err_arr[i,j],\
residual_err_arr[i,j], int_quad_err_arr[i,j]\
= calc_err_bootstrap(sync_data, log = log)
# Close the fits files, to save memory
sync_fits.close()
# Print a message to show that the calculation has finished successfully
# for this simulation
print 'All statistics calculated for simulation {} LOS {}'.format(\
spec_locs[i], line_o_sight[j])
# When the code reaches this point, the normalised correlation functions,
# structure functions, and quadrupole ratios have been saved for every
# simulation, and every line of sight, so start making the final plots.
# Create an array of marker symbols, so that the plot for each line of sight has
# a different plot symbol
symbol_arr = ['o','^','s','*','D']
# ----------------- Plots of normalised correlation functions ------------------
# Here we want to produce one plot with six subplots. There should be two rows
# of subplots, with three subplots in each row. The left subplot will be the
# normalised correlation functions for a line of sight along the x axis, the
# centre plot will be for the y axis, and the right subplot will be the
# normalised correlation functions for the z axis. In each plot the timesteps
# of the solenoidal and compressive simulations will be compared, with
# solenoidal simulations on the top row, and compressive on the bottom.
# Create a figure to hold all of the subplots
fig = plt.figure(1, figsize=(10,6), dpi = 300)
# Create an axis for the first subplot to be produced, which is for the
# x line of sight, solenoidal simulations
ax1 = fig.add_subplot(231)
# Loop over the simulations to produce plots for each simulation
for i in range(num_timestep):
# Plot the normalised correlation function for this simulation, for this
# line of sight
plt.plot(corr_rad_arr[i,0], norm_corr_arr[i,0], '-' + symbol_arr[i])
# Plot a faded dashed line to represent the line y = 0
plt.plot(np.linspace(0,1000,len(corr_rad_arr[0,0])), \
np.zeros(np.shape(corr_rad_arr[0,0])), 'k--', alpha = 0.5)
# Make the x axis of the plot logarithmic
ax1.set_xscale('log')
# Make the x axis tick labels invisible
plt.setp( ax1.get_xticklabels(), visible=False)
# Create an axis for the second subplot to be produced, which is for the
# y line of sight, solenoidal simulation. Make the y axis limits the same as
# for the x axis plot
ax2 = fig.add_subplot(232, sharey = ax1)
# Loop over the simulations to produce plots for each simulation
for i in range(num_timestep):
# Plot the normalised correlation function for this simulation, for this
# line of sight
plt.plot(corr_rad_arr[i,1], norm_corr_arr[i,1], '-' + symbol_arr[i])
# Plot a faded dashed line to represent the line y = 0
plt.plot(np.linspace(0,1000,len(corr_rad_arr[0,1])), \
np.zeros(np.shape(corr_rad_arr[0,1])), 'k--', alpha = 0.5)
# Make the x axis of the plot logarithmic
ax2.set_xscale('log')
# Make the x axis tick labels invisible
plt.setp( ax2.get_xticklabels(), visible=False)
# Make the y axis tick labels invisible
plt.setp( ax2.get_yticklabels(), visible=False)
# Create an axis for the third subplot to be produced, which is for the
# z line of sight, solenoidal simulation. Make the y axis limits the same as for
# the x axis plot
ax3 = fig.add_subplot(233, sharey = ax1)
# Loop over the simulations to produce plots for each simulation
for i in range(num_timestep):
# Plot the normalised correlation function for this simulation, for this
# line of sight
plt.plot(corr_rad_arr[i,2], norm_corr_arr[i,2], '-' + symbol_arr[i],\
label = '{}'.format(sim_labels[i]))
# Plot a faded dashed line to represent the line y = 0
plt.plot(np.linspace(0,1000,len(corr_rad_arr[0,2])), \
np.zeros(np.shape(corr_rad_arr[0,2])), 'k--', alpha = 0.5)
# Make the x axis of the plot logarithmic
ax3.set_xscale('log')
# Make the x axis tick labels invisible
plt.setp( ax3.get_xticklabels(), visible=False)
# Make the y axis tick labels invisible
plt.setp( ax3.get_yticklabels(), visible=False)
# Force the legend to appear on the plot
plt.legend(fontsize = 8, numpoints=1)
# Create an axis for the fourth subplot to be produced, which is for the
# x line of sight, compressive simulations
ax4 = fig.add_subplot(234, sharex = ax1)
# Loop over the simulations to produce plots for each simulation
for i in range(num_timestep, 2*num_timestep):
# Plot the normalised correlation function for this simulation, for this
# line of sight
plt.plot(corr_rad_arr[i,0], norm_corr_arr[i,0], '-' + symbol_arr[i - num_timestep])
# Plot a faded dashed line to represent the line y = 0
plt.plot(np.linspace(0,1000,len(corr_rad_arr[0,0])), \
np.zeros(np.shape(corr_rad_arr[0,0])), 'k--', alpha = 0.5)
# Make the x axis of the plot logarithmic
ax4.set_xscale('log')
# Create an axis for the fifth subplot to be produced, which is for the
# y line of sight, compressive simulation. Make the y axis limits the same as
# for the x axis plot
ax5 = fig.add_subplot(235, sharex = ax2, sharey = ax4)
# Loop over the simulations to produce plots for each simulation
for i in range(num_timestep, 2*num_timestep):
# Plot the normalised correlation function for this simulation, for this
# line of sight
plt.plot(corr_rad_arr[i,1], norm_corr_arr[i,1], '-' + symbol_arr[i - num_timestep])
# Plot a faded dashed line to represent the line y = 0
plt.plot(np.linspace(0,1000,len(corr_rad_arr[0,1])), \
np.zeros(np.shape(corr_rad_arr[0,1])), 'k--', alpha = 0.5)
# Make the x axis of the plot logarithmic
ax5.set_xscale('log')
# Make the y axis tick labels invisible
plt.setp( ax5.get_yticklabels(), visible=False)
# Create an axis for the sixth subplot to be produced, which is for the
# z line of sight, compressive simulation. Make the y axis limits the same as for
# the x axis plot
ax6 = fig.add_subplot(236, sharex = ax3, sharey = ax4)
# Loop over the simulations to produce plots for each simulation
for i in range(num_timestep, 2*num_timestep):
# Plot the normalised correlation function for this simulation, for this
# line of sight
plt.plot(corr_rad_arr[i,2], norm_corr_arr[i,2], '-' + symbol_arr[i - num_timestep],\
label = '{}'.format(sim_labels[i]))
# Plot a faded dashed line to represent the line y = 0
plt.plot(np.linspace(0,1000,len(corr_rad_arr[0,2])), \
np.zeros(np.shape(corr_rad_arr[0,2])), 'k--', alpha = 0.5)
# Make the x axis of the plot logarithmic
ax6.set_xscale('log')
# Make the y axis tick labels invisible
plt.setp( ax6.get_yticklabels(), visible=False)
# Force the legend to appear on the plot
plt.legend(fontsize = 8, numpoints=1)
# Add a label to the x-axis
plt.figtext(0.5, 0.0, 'Radial Separation [pixels]', ha = 'center', \
va = 'bottom', fontsize = 20)
# Add a label to the y-axis
plt.figtext(0.03, 0.5, 'NCF Sync Intensity', ha = 'left', \
va = 'center', fontsize = 20, rotation = 'vertical')
# Add some text to the figure, to label the left plot as figure a
plt.figtext(0.15, 0.94, 'a) Sol x-LOS', fontsize = 18)
# Add some text to the figure, to label the centre plot as figure b
plt.figtext(0.42, 0.94, 'b) Sol y-LOS', fontsize = 18)
# Add some text to the figure, to label the right plot as figure c
plt.figtext(0.7, 0.94, 'c) Sol z-LOS', fontsize = 18)
# Add some text to the figure, to label the left plot as figure d
plt.figtext(0.15, 0.475, 'd) Comp x-LOS', fontsize = 18)
# Add some text to the figure, to label the centre plot as figure e
plt.figtext(0.42, 0.475, 'e) Comp y-LOS', fontsize = 18)
# Add some text to the figure, to label the right plot as figure f
plt.figtext(0.7, 0.475, 'f) Comp z-LOS', fontsize = 18)
# Make sure that all of the labels are clearly visible in the plot
#plt.tight_layout()
# Save the figure using the given filename and format
plt.savefig(simul_loc + 'ncfs_all_sims_time_gam{}.eps'.format(gamma), format = 'eps')
# Close the figure so that it does not stay in memory
plt.close()
#--------------------------- Structure Functions -------------------------------
# Here we want to produce one plot with six subplots. There should be two rows
# of subplots, with three subplots in each row. The left subplot will be the
# structure functions for a line of sight along the x axis, the centre plot will
# be for the y axis, and the right subplot will be the structure functions for
# the z axis. In each plot the solenoidal and compressive simulations will be
# compared for different timesteps. The top row is for the solenoidal simulation
# and the bottom row is for the compressive simulation.
# Create a figure to hold all of the subplots
fig = plt.figure(1, figsize=(10,6), dpi = 300)
# Create an axis for the first subplot to be produced, which is for the
# x line of sight, solenoidal simulation
ax1 = fig.add_subplot(231)
# Loop over the simulations to produce plots for each simulation
for i in range(num_timestep):
# Plot the structure function for this simulation, for this
# line of sight
plt.plot(sf_rad_arr[i,0], sf_arr[i,0], '-' + symbol_arr[i])
# Plot a faded dashed line to represent the line y = 0
plt.plot(np.linspace(0,1000,len(sf_rad_arr[0,0])), \
np.zeros(np.shape(sf_rad_arr[0,0])), 'k--', alpha = 0.5)
# Make the x axis of the plot logarithmic
ax1.set_xscale('log')
# Make the y axis of the plot logarithmic
ax1.set_yscale('log')
# Make the x axis tick labels invisible
plt.setp( ax1.get_xticklabels(), visible=False)
# Create an axis for the second subplot to be produced, which is for the
# y line of sight, solenoidal simulation. Make the y axis limits the same as for
# the x axis plot
ax2 = fig.add_subplot(232, sharey = ax1)
# Loop over the simulations to produce plots for each simulation
for i in range(num_timestep):
# Plot the structure function for this simulation, for this
# line of sight
plt.plot(sf_rad_arr[i,1], sf_arr[i,1], '-' + symbol_arr[i])
# Plot a faded dashed line to represent the line y = 0
plt.plot(np.linspace(0,1000,len(sf_rad_arr[0,1])), \
np.zeros(np.shape(sf_rad_arr[0,1])), 'k--', alpha = 0.5)
# Make the x axis of the plot logarithmic
ax2.set_xscale('log')
# Make the y axis of the plot logarithmic
ax2.set_yscale('log')
# Make the x axis tick labels invisible
plt.setp( ax2.get_xticklabels(), visible=False)
# Make the y axis tick labels invisible
plt.setp( ax2.get_yticklabels(), visible=False)
# Create an axis for the third subplot to be produced, which is for the
# z line of sight, solenoidal simulation. Make the y axis limits the same as for
# the x axis plot
ax3 = fig.add_subplot(233, sharey = ax1)
# Loop over the simulations to produce plots for each simulation
for i in range(num_timestep):
# Plot the structure function for this simulation, for this
# line of sight
plt.plot(sf_rad_arr[i,2], sf_arr[i,2], '-' + symbol_arr[i],\
label = '{}'.format(sim_labels[i]))
# Plot a faded dashed line to represent the line y = 0
plt.plot(np.linspace(0,1000,len(sf_rad_arr[0,2])), \
np.zeros(np.shape(sf_rad_arr[0,2])), 'k--', alpha = 0.5)
# Make the x axis of the plot logarithmic
ax3.set_xscale('log')
# Make the y axis of the plot logarithmic
ax3.set_yscale('log')
# Make the x axis tick labels invisible
plt.setp( ax3.get_xticklabels(), visible=False)
# Make the y axis tick labels invisible
plt.setp( ax3.get_yticklabels(), visible=False)
# Force the legend to appear on the plot
plt.legend(loc = 4, fontsize = 8, numpoints=1)
# Create an axis for the fourth subplot to be produced, which is for the
# x line of sight, compressive simulation
ax4 = fig.add_subplot(234, sharex = ax1)
# Loop over the simulations to produce plots for each simulation
for i in range(num_timestep, 2*num_timestep):
# Plot the structure function for this simulation, for this
# line of sight
plt.plot(sf_rad_arr[i,0], sf_arr[i,0], '-' + symbol_arr[i - num_timestep])
# Plot a faded dashed line to represent the line y = 0
plt.plot(np.linspace(0,1000,len(sf_rad_arr[0,0])), \
np.zeros(np.shape(sf_rad_arr[0,0])), 'k--', alpha = 0.5)
# Make the x axis of the plot logarithmic
ax4.set_xscale('log')
# Make the y axis of the plot logarithmic
ax4.set_yscale('log')
# Create an axis for the fifth subplot to be produced, which is for the
# y line of sight, compressive simulation. Make the y axis limits the same as for
# the x axis plot
ax5 = fig.add_subplot(235, sharex = ax2, sharey = ax4)
# Loop over the simulations to produce plots for each simulation
for i in range(num_timestep, 2*num_timestep):
# Plot the structure function for this simulation, for this
# line of sight
plt.plot(sf_rad_arr[i,1], sf_arr[i,1], '-' + symbol_arr[i - num_timestep])
# Plot a faded dashed line to represent the line y = 0
plt.plot(np.linspace(0,1000,len(sf_rad_arr[0,1])), \
np.zeros(np.shape(sf_rad_arr[0,1])), 'k--', alpha = 0.5)
# Make the x axis of the plot logarithmic
ax5.set_xscale('log')
# Make the y axis of the plot logarithmic
ax5.set_yscale('log')
# Make the y axis tick labels invisible
plt.setp( ax5.get_yticklabels(), visible=False)
# Create an axis for the sixth subplot to be produced, which is for the
# z line of sight, compressive simulation. Make the y axis limits the same as for
# the x axis plot
ax6 = fig.add_subplot(236, sharex = ax3, sharey = ax4)
# Loop over the simulations to produce plots for each simulation
for i in range(num_timestep, 2*num_timestep):
# Plot the structure function for this simulation, for this
# line of sight
plt.plot(sf_rad_arr[i,2], sf_arr[i,2], '-' + symbol_arr[i - num_timestep],\
label = '{}'.format(sim_labels[i]))
# Plot a faded dashed line to represent the line y = 0
plt.plot(np.linspace(0,1000,len(sf_rad_arr[0,2])), \
np.zeros(np.shape(sf_rad_arr[0,2])), 'k--', alpha = 0.5)
# Make the x axis of the plot logarithmic
ax6.set_xscale('log')
# Make the y axis of the plot logarithmic
ax6.set_yscale('log')
# Make the y axis tick labels invisible
plt.setp( ax6.get_yticklabels(), visible=False)
# Force the legend to appear on the plot
plt.legend(loc = 4, fontsize = 8, numpoints=1)
# Add a label to the x-axis
plt.figtext(0.5, 0.0, 'Radial Separation [pixels]', ha = 'center', \
va = 'bottom', fontsize = 20)
# Add a label to the y-axis
plt.figtext(0.03, 0.5, 'Structure Function Amplitude', ha = 'left', \
va = 'center', fontsize = 20, rotation = 'vertical')
# Add some text to the figure, to label the left plot as figure a
plt.figtext(0.15, 0.94, 'a) Sol x-LOS', fontsize = 18)
# Add some text to the figure, to label the centre plot as figure b
plt.figtext(0.42, 0.94, 'b) Sol y-LOS', fontsize = 18)
# Add some text to the figure, to label the right plot as figure c
plt.figtext(0.7, 0.94, 'c) Sol z-LOS', fontsize = 18)
# Add some text to the figure, to label the left plot as figure d
plt.figtext(0.15, 0.475, 'd) Comp x-LOS', fontsize = 18)
# Add some text to the figure, to label the centre plot as figure e
plt.figtext(0.42, 0.475, 'e) Comp y-LOS', fontsize = 18)
# Add some text to the figure, to label the right plot as figure f
plt.figtext(0.7, 0.475, 'f) Comp z-LOS', fontsize = 18)
# Make sure that all of the labels are clearly visible in the plot
#plt.tight_layout()
# Save the figure using the given filename and format
plt.savefig(simul_loc + 'sfs_all_sims_time_gam{}.eps'.format(gamma), format = 'eps')
# Close the figure so that it does not stay in memory
plt.close()
#----------------------------- Quadrupole Ratios -------------------------------
# Here we want to produce one plot with six subplots. There should be two rows
# of subplots, with three subplots in each row. The left subplot will be the
# quadrupole ratio modulus for a line of sight along the x axis, the centre plot
# will be for the y axis, and the right subplot will be the quadrupole ratio
# modulus for the z axis. In each plot the solenoidal and compressive
# simulations will be compared for different timesteps. The top row is for the
# solenoidal simulation, and the bottom row for the compressive simulation.
# Create a figure to hold all of the subplots
fig = plt.figure(1, figsize=(10,6), dpi = 300)
# Create an axis for the first subplot to be produced, which is for the
# x line of sight, solenoidal simulation
ax1 = fig.add_subplot(231)
# Loop over the simulations to produce plots for each simulation
for i in range(num_timestep):
# Plot the quadrupole ratio modulus for this simulation, for this
# line of sight
plt.plot(quad_rad_arr[i,0], quad_arr[i,0], '-' + symbol_arr[i])
# Plot a faded dashed line to represent the line y = 0
plt.plot(np.linspace(0,1000,len(quad_rad_arr[0,0])), \
np.zeros(np.shape(quad_rad_arr[0,0])), 'k--', alpha = 0.5)
# Make the x axis of the plot logarithmic
ax1.set_xscale('log')
# Make the x axis tick labels invisible
plt.setp( ax1.get_xticklabels(), visible=False)
# Create an axis for the second subplot to be produced, which is for the
# y line of sight, solenoidal simulation. Make the y axis limits the same as for
# the x axis plot
ax2 = fig.add_subplot(232, sharey = ax1)
# Loop over the simulations to produce plots for each simulation
for i in range(num_timestep):
# Plot the quadrupole ratio modulus for this simulation, for this
# line of sight
plt.plot(quad_rad_arr[i,1], quad_arr[i,1], '-' + symbol_arr[i])
# Plot a faded dashed line to represent the line y = 0
plt.plot(np.linspace(0,1000,len(quad_rad_arr[0,1])), \
np.zeros(np.shape(quad_rad_arr[0,1])), 'k--', alpha = 0.5)
# Make the x axis of the plot logarithmic
ax2.set_xscale('log')
# Make the x axis tick labels invisible
plt.setp( ax2.get_xticklabels(), visible=False)
# Make the y axis tick labels invisible
plt.setp( ax2.get_yticklabels(), visible=False)
# Create an axis for the third subplot to be produced, which is for the
# z line of sight, solenoidal simulation. Make the y axis limits the same as for
# the x axis plot
ax3 = fig.add_subplot(233, sharey = ax1)
# Loop over the simulations to produce plots for each simulation
for i in range(num_timestep):
# Plot the quadrupole ratio modulus for this simulation, for this
# line of sight
plt.plot(quad_rad_arr[i,2], quad_arr[i,2], '-' + symbol_arr[i],\
label = '{}'.format(sim_labels[i]))
# Plot a faded dashed line to represent the line y = 0
plt.plot(np.linspace(0,1000,len(quad_rad_arr[0,2])), \
np.zeros(np.shape(quad_rad_arr[0,2])), 'k--', alpha = 0.5)
# Make the x axis of the plot logarithmic
ax3.set_xscale('log')
# Make the x axis tick labels invisible
plt.setp( ax3.get_xticklabels(), visible=False)
# Make the y axis tick labels invisible
plt.setp( ax3.get_yticklabels(), visible=False)
# Force the legend to appear on the plot
plt.legend(fontsize = 8, numpoints=1)
# Create an axis for the fourth subplot to be produced, which is for the
# x line of sight, compressive simulation
ax4 = fig.add_subplot(234, sharex = ax1)
# Loop over the simulations to produce plots for each simulation
for i in range(num_timestep, 2*num_timestep):
# Plot the quadrupole ratio modulus for this simulation, for this
# line of sight
plt.plot(quad_rad_arr[i,0], quad_arr[i,0], '-' + symbol_arr[i - num_timestep])
# Plot a faded dashed line to represent the line y = 0
plt.plot(np.linspace(0,1000,len(quad_rad_arr[0,0])), \
np.zeros(np.shape(quad_rad_arr[0,0])), 'k--', alpha = 0.5)
# Make the x axis of the plot logarithmic
ax4.set_xscale('log')
# Create an axis for the fifth subplot to be produced, which is for the
# y line of sight, compressive simulation. Make the y axis limits the same as for
# the x axis plot
ax5 = fig.add_subplot(235, sharex = ax2, sharey = ax4)
# Loop over the simulations to produce plots for each simulation
for i in range(num_timestep, 2*num_timestep):
# Plot the quadrupole ratio modulus for this simulation, for this
# line of sight
plt.plot(quad_rad_arr[i,1], quad_arr[i,1], '-' + symbol_arr[i - num_timestep])
# Plot a faded dashed line to represent the line y = 0
plt.plot(np.linspace(0,1000,len(quad_rad_arr[0,1])), \
np.zeros(np.shape(quad_rad_arr[0,1])), 'k--', alpha = 0.5)
# Make the x axis of the plot logarithmic
ax5.set_xscale('log')
# Make the y axis tick labels invisible
plt.setp( ax5.get_yticklabels(), visible=False)
# Create an axis for the sixth subplot to be produced, which is for the
# z line of sight, compressive simulation. Make the y axis limits the same as for
# the x axis plot
ax6 = fig.add_subplot(236, sharex = ax3, sharey = ax4)
# Loop over the simulations to produce plots for each simulation
for i in range(num_timestep, 2*num_timestep):
# Plot the quadrupole ratio modulus for this simulation, for this
# line of sight
plt.plot(quad_rad_arr[i,2], quad_arr[i,2], '-' + symbol_arr[i - num_timestep],\
label = '{}'.format(sim_labels[i]))
# Plot a faded dashed line to represent the line y = 0
plt.plot(np.linspace(0,1000,len(quad_rad_arr[0,2])), \
np.zeros(np.shape(quad_rad_arr[0,2])), 'k--', alpha = 0.5)
# Make the x axis of the plot logarithmic
ax6.set_xscale('log')
# Make the y axis tick labels invisible
plt.setp( ax6.get_yticklabels(), visible=False)
# Force the legend to appear on the plot
plt.legend(fontsize = 8, numpoints=1)
# Add a label to the x-axis
plt.figtext(0.5, 0.0, 'Radial Separation [pixels]', ha = 'center', \
va = 'bottom', fontsize = 20)
# Add a label to the y-axis
plt.figtext(0.03, 0.5, 'Quadrupole Ratio', ha = 'left', \
va = 'center', fontsize = 20, rotation = 'vertical')
# Add some text to the figure, to label the left plot as figure a
plt.figtext(0.15, 0.94, 'a) Sol x-LOS', fontsize = 18)
# Add some text to the figure, to label the centre plot as figure b
plt.figtext(0.42, 0.94, 'b) Sol y-LOS', fontsize = 18)
# Add some text to the figure, to label the right plot as figure c
plt.figtext(0.7, 0.94, 'c) Sol z-LOS', fontsize = 18)
# Add some text to the figure, to label the left plot as figure d
plt.figtext(0.15, 0.475, 'd) Comp x-LOS', fontsize = 18)
# Add some text to the figure, to label the centre plot as figure e
plt.figtext(0.42, 0.475, 'e) Comp y-LOS', fontsize = 18)
# Add some text to the figure, to label the right plot as figure f
plt.figtext(0.7, 0.475, 'f) Comp z-LOS', fontsize = 18)
# Make sure that all of the labels are clearly visible in the plot
#plt.tight_layout()
# Save the figure using the given filename and format
plt.savefig(simul_loc + 'quad_ratio_all_sims_time_gam{}.eps'.format(gamma), format = 'eps')
# Close the figure so that it does not stay in memory
plt.close()
# #----------------------- Real and Imaginary Parts of Quad Ratio ----------------
# # Here we want to produce one plot with six subplots. There should be two rows
# # of subplots, with three subplots in each row. The top row will be the real
# # part of the quadrupole ratio, and the bottom row will be the imaginary part.
# # The left column will be for a line of sight along the x axis, the centre
# # column for a line of sight along the y axis, and the right column will be for
# # a line of sight along the z axis.
# # Create a figure to hold all of the subplots
# fig = plt.figure(1, figsize=(10,6), dpi = 300)
# # Create an axis for the first subplot to be produced, which is for the real
# # part of the quadrupole ratio for a line of sight along the x axis
# ax1 = fig.add_subplot(231)
# # Loop over the simulations to produce plots for each simulation
# for i in range(len(spec_locs)):
# # Plot the quadrupole ratio for this simulation, for this line of sight
# plt.plot(quad_rad_arr[i,0], quad_real_arr[i,0], '-' + symbol_arr[i])
# # Plot a faded dashed line to represent the line y = 0
# plt.plot(np.linspace(0,1000,len(quad_rad_arr[0,0])), \
# np.zeros(np.shape(quad_rad_arr[0,0])), 'k--', alpha = 0.5)
# # Make the x axis of the plot logarithmic
# ax1.set_xscale('log')
# # Make the x axis tick labels invisible
# plt.setp( ax1.get_xticklabels(), visible=False)
# # Create an axis for the second subplot to be produced, which is for the real
# # part of the quadrupole ratio for a line of sight along the y axis. Make the y
# # axis limits the same as for the x axis plot
# ax2 = fig.add_subplot(232, sharey = ax1)
# # Loop over the simulations to produce plots for each simulation
# for i in range(len(spec_locs)):
# # Plot the quadrupole ratio for this simulation, for this line of sight
# plt.plot(quad_rad_arr[i,1], quad_real_arr[i,1], '-' + symbol_arr[i])
# # Plot a faded dashed line to represent the line y = 0
# plt.plot(np.linspace(0,1000,len(quad_rad_arr[0,1])), \
# np.zeros(np.shape(quad_rad_arr[0,1])), 'k--', alpha = 0.5)
# # Make the x axis of the plot logarithmic
# ax2.set_xscale('log')
# # Make the x axis tick labels invisible
# plt.setp( ax2.get_xticklabels(), visible=False)
# # Make the y axis tick labels invisible
# plt.setp( ax2.get_yticklabels(), visible=False)
# # Create an axis for the third subplot to be produced, which is for the real
# # part of the quadrupole ratio for a line of sight along the z axis. Make the y
# # axis limits the same as for the x axis plot
# ax3 = fig.add_subplot(233, sharey = ax1)
# # Loop over the simulations to produce plots for each simulation
# for i in range(len(spec_locs)):
# # Plot the quadrupole ratio for this simulation, for this line of sight
# plt.plot(quad_rad_arr[i,2], quad_real_arr[i,2], '-' + symbol_arr[i],\
# label = '{}'.format(sim_labels[i]))
# # Plot a faded dashed line to represent the line y = 0
# plt.plot(np.linspace(0,1000,len(quad_rad_arr[0,2])), \
# np.zeros(np.shape(quad_rad_arr[0,2])), 'k--', alpha = 0.5)
# # Make the x axis of the plot logarithmic
# ax3.set_xscale('log')
# # Make the x axis tick labels invisible
# plt.setp( ax3.get_xticklabels(), visible=False)
# # Make the y axis tick labels invisible
# plt.setp( ax3.get_yticklabels(), visible=False)
# # Force the legend to appear on the plot
# plt.legend(loc=4, fontsize = 9, numpoints=1)
# # Create an axis for the fourth subplot to be produced, which is for the
# # imaginary part of the quadrupole ratio for a line of sight along the x axis.
# # Make the x axis limits the same as for the first plot
# ax4 = fig.add_subplot(234, sharex = ax1, sharey = ax1)
# # Loop over the simulations to produce plots for each simulation
# for i in range(len(spec_locs)):
# # Plot the quadrupole ratio for this simulation, for this line of sight
# plt.plot(quad_rad_arr[i,0], quad_imag_arr[i,0], '-' + symbol_arr[i])
# # Plot a faded dashed line to represent the line y = 0
# plt.plot(np.linspace(0,1000,len(quad_rad_arr[0,0])), \
# np.zeros(np.shape(quad_rad_arr[0,0])), 'k--', alpha = 0.5)
# # Make the x axis of the plot logarithmic
# ax4.set_xscale('log')
# # Create an axis for the fifth subplot to be produced, which is for the
# # imaginary part of the quadrupole ratio for a line of sigth along the y axis.
# # Make the x axis limits the same as for the second plot, and the y axis limits
# # the same as for the fourth plot
# ax5 = fig.add_subplot(235, sharex = ax2, sharey = ax4)
# # Loop over the simulations to produce plots for each simulation
# for i in range(len(spec_locs)):
# # Plot the quadrupole ratio for this simulation, for this line of sight
# plt.plot(quad_rad_arr[i,1], quad_imag_arr[i,1], '-' + symbol_arr[i])
# # Plot a faded dashed line to represent the line y = 0
# plt.plot(np.linspace(0,1000,len(quad_rad_arr[0,1])), \
# np.zeros(np.shape(quad_rad_arr[0,1])), 'k--', alpha = 0.5)
# # Make the x axis of the plot logarithmic
# ax5.set_xscale('log')
# # Make the y axis tick labels invisible
# plt.setp( ax5.get_yticklabels(), visible=False)
# # Create an axis for the sixth subplot to be produced, which is for the
# # imaginary part of the quadrupole ratio for a line of sigth along the z axis.
# # Make the x axis limits the same as for the third plot, and the y axis limits
# # the same as for the fourth plot
# ax6 = fig.add_subplot(236, sharex = ax3, sharey = ax4)
# # Loop over the simulations to produce plots for each simulation
# for i in range(len(spec_locs)):
# # Plot the quadrupole ratio for this simulation, for this line of sight
# plt.plot(quad_rad_arr[i,2], quad_imag_arr[i,2], '-' + symbol_arr[i])
# # Plot a faded dashed line to represent the line y = 0
# plt.plot(np.linspace(0,1000,len(quad_rad_arr[0,2])), \
# np.zeros(np.shape(quad_rad_arr[0,2])), 'k--', alpha = 0.5)
# # Make the x axis of the plot logarithmic
# ax6.set_xscale('log')
# # Make the y axis tick labels invisible
# plt.setp( ax6.get_yticklabels(), visible=False)
# # Add a label to the x-axis
# plt.figtext(0.5, 0.0, 'Radial Separation [pixels]', ha = 'center', \
# va = 'bottom', fontsize = 20)
# # Add a label to the y-axis
# plt.figtext(0.03, 0.5, 'Quadrupole Ratio', ha = 'left', \
# va = 'center', fontsize = 20, rotation = 'vertical')
# # Add some text to the figure, to label the left plot as figure a
# plt.figtext(0.15, 0.94, 'a) x-LOS Real', fontsize = 18)
# # Add some text to the figure, to label the centre plot as figure b
# plt.figtext(0.42, 0.94, 'b) y-LOS Real', fontsize = 18)
# # Add some text to the figure, to label the right plot as figure c
# plt.figtext(0.7, 0.94, 'c) z-LOS Real', fontsize = 18)
# # Add some text to the figure, to label the left plot as figure d
# plt.figtext(0.15, 0.475, 'd) x-LOS Imag', fontsize = 18)
# # Add some text to the figure, to label the centre plot as figure e
# plt.figtext(0.42, 0.475, 'e) y-LOS Imag', fontsize = 18)
# # Add some text to the figure, to label the right plot as figure f
# plt.figtext(0.7, 0.475, 'f) z-LOS Imag', fontsize = 18)
# # Make sure that all of the labels are clearly visible in the plot
# #plt.tight_layout()
# # Save the figure using the given filename and format
# plt.savefig(simul_loc + 'real_imag_quad_all_sims_time_gam{}.eps'.format(gamma), format = 'eps')
# # Close the figure so that it does not stay in memory
# plt.close()
#-------------------------------------------------------------------------------
# Now that all of the statistics have been calculated, print them out to the
# screen. Loop over all of the lines of sight, and the different simulations,
# and print out results for the simulations
for j in range(3):
# For this line of sight, loop over the simulations
for i in range(len(spec_locs)):
# Print out the value of the mean for this line of sight
print "{} {} LOS Mean: {} Error: {}".format(sim_labels[i], line_o_sight[j],\
mean_arr[i,j], mean_err_arr[i,j])
for j in range(3):
# For this line of sight, loop over the simulations
for i in range(len(spec_locs)):
# Print out the value of the standard deviation for this line of sight
print "{} {} LOS St Dev: {} Error: {}".format(sim_labels[i], line_o_sight[j],\
stdev_arr[i,j], stdev_err_arr[i,j])
for j in range(3):
# For this line of sight, loop over the simulations
for i in range(len(spec_locs)):
# Print out the value of skewness for this line of sight
print "{} {} LOS Skewness: {} Error: {}".format(sim_labels[i], line_o_sight[j],\
skew_arr[i,j], skew_err_arr[i,j])
for j in range(3):
# For this line of sight, loop over the simulations
for i in range(len(spec_locs)):
# Print out the value of kurtosis for this line of sight
print "{} {} LOS Kurtosis: {} Error: {}".format(sim_labels[i], line_o_sight[j],\
kurt_arr[i,j], kurt_err_arr[i,j])
for j in range(3):
# For this line of sight, loop over the simulations
for i in range(len(spec_locs)):
# Print out the structure function slope for this line of sight
print "{} {} LOS SF Slope: {} Error: {}".format(sim_labels[i], line_o_sight[j],\
m_arr[i,j], m_err_arr[i,j])
for j in range(3):
# For this line of sight, loop over the simulations
for i in range(len(spec_locs)):
# Print out the residuals for this line of sight
print "{} {} LOS Residuals: {} Error: {}".format(sim_labels[i], line_o_sight[j],\
residual_arr[i,j], residual_err_arr[i,j])
for j in range(3):
# For this line of sight, loop over the simulations
for i in range(len(spec_locs)):
# Print out the value of the quadrupole ratio for this line of sight
print "{} {} LOS Quad Ratio: {} Error: {}".format(sim_labels[i], line_o_sight[j],\
int_quad_arr[i,j], int_quad_err_arr[i,j]) | [
"[email protected]"
] | |
59b02c1565c66a501284ae2b8e71274d82d42d8e | ce6cb09c21470d1981f1b459293d353407c8392e | /lib/jnpr/healthbot/swagger/models/command_rpc.py | 95cb417debd31ead46f6fe1b6170628878d617e1 | [
"Apache-2.0"
] | permissive | minefuto/healthbot-py-client | c4be4c9c3153ef64b37e5344bf84154e93e7b521 | bb81452c974456af44299aebf32a73abeda8a943 | refs/heads/master | 2022-12-04T07:47:04.722993 | 2020-05-13T14:04:07 | 2020-05-13T14:04:07 | 290,145,286 | 0 | 0 | Apache-2.0 | 2020-08-25T07:27:54 | 2020-08-25T07:27:53 | null | UTF-8 | Python | false | false | 8,149 | py | # coding: utf-8
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CommandRpc(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'args': 'dict(str, str)',
'filename': 'str',
'host': 'str',
'password': 'str',
'tablename': 'str',
'target': 'str',
'username': 'str'
}
attribute_map = {
'args': 'args',
'filename': 'filename',
'host': 'host',
'password': 'password',
'tablename': 'tablename',
'target': 'target',
'username': 'username'
}
def __init__(self, args=None, filename=None, host=None, password=None, tablename=None, target=None, username=None): # noqa: E501
"""CommandRpc - a model defined in Swagger""" # noqa: E501
self._args = None
self._filename = None
self._host = None
self._password = None
self._tablename = None
self._target = None
self._username = None
self.discriminator = None
if args is not None:
self.args = args
self.filename = filename
self.host = host
self.password = password
self.tablename = tablename
if target is not None:
self.target = target
self.username = username
@property
def args(self):
"""Gets the args of this CommandRpc. # noqa: E501
Optional key/value pair arguments to table # noqa: E501
:return: The args of this CommandRpc. # noqa: E501
:rtype: dict(str, str)
"""
return self._args
@args.setter
def args(self, args):
"""Sets the args of this CommandRpc.
Optional key/value pair arguments to table # noqa: E501
:param args: The args of this CommandRpc. # noqa: E501
:type: dict(str, str)
"""
self._args = args
@property
def filename(self):
"""Gets the filename of this CommandRpc. # noqa: E501
Command-rpc table filename in which the table is defined # noqa: E501
:return: The filename of this CommandRpc. # noqa: E501
:rtype: str
"""
return self._filename
@filename.setter
def filename(self, filename):
"""Sets the filename of this CommandRpc.
Command-rpc table filename in which the table is defined # noqa: E501
:param filename: The filename of this CommandRpc. # noqa: E501
:type: str
"""
if filename is None:
raise ValueError("Invalid value for `filename`, must not be `None`") # noqa: E501
self._filename = filename
@property
def host(self):
"""Gets the host of this CommandRpc. # noqa: E501
Host name or ip-address of the device in which command will be inspected # noqa: E501
:return: The host of this CommandRpc. # noqa: E501
:rtype: str
"""
return self._host
@host.setter
def host(self, host):
"""Sets the host of this CommandRpc.
Host name or ip-address of the device in which command will be inspected # noqa: E501
:param host: The host of this CommandRpc. # noqa: E501
:type: str
"""
if host is None:
raise ValueError("Invalid value for `host`, must not be `None`") # noqa: E501
self._host = host
@property
def password(self):
"""Gets the password of this CommandRpc. # noqa: E501
Password to connect to device # noqa: E501
:return: The password of this CommandRpc. # noqa: E501
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this CommandRpc.
Password to connect to device # noqa: E501
:param password: The password of this CommandRpc. # noqa: E501
:type: str
"""
if password is None:
raise ValueError("Invalid value for `password`, must not be `None`") # noqa: E501
self._password = password
@property
def tablename(self):
"""Gets the tablename of this CommandRpc. # noqa: E501
Command-rpc table name # noqa: E501
:return: The tablename of this CommandRpc. # noqa: E501
:rtype: str
"""
return self._tablename
@tablename.setter
def tablename(self, tablename):
"""Sets the tablename of this CommandRpc.
Command-rpc table name # noqa: E501
:param tablename: The tablename of this CommandRpc. # noqa: E501
:type: str
"""
if tablename is None:
raise ValueError("Invalid value for `tablename`, must not be `None`") # noqa: E501
self._tablename = tablename
@property
def target(self):
"""Gets the target of this CommandRpc. # noqa: E501
To run command on FPC, specifiy FPC target # noqa: E501
:return: The target of this CommandRpc. # noqa: E501
:rtype: str
"""
return self._target
@target.setter
def target(self, target):
"""Sets the target of this CommandRpc.
To run command on FPC, specifiy FPC target # noqa: E501
:param target: The target of this CommandRpc. # noqa: E501
:type: str
"""
self._target = target
@property
def username(self):
"""Gets the username of this CommandRpc. # noqa: E501
Username to connect to device # noqa: E501
:return: The username of this CommandRpc. # noqa: E501
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""Sets the username of this CommandRpc.
Username to connect to device # noqa: E501
:param username: The username of this CommandRpc. # noqa: E501
:type: str
"""
if username is None:
raise ValueError("Invalid value for `username`, must not be `None`") # noqa: E501
self._username = username
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CommandRpc, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CommandRpc):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
0001b37bd0d1d6b08e473e5f1c41d7bc88ba50bd | 48a8430d19c4d8d6fdcecf1cb9875d74b5efce6a | /CycleGAN/data_loader.py | 7cf7d6a2fd0954c3313fa0ba7bc7a498ee9437a9 | [] | no_license | orange-eng/GAN | af00f469b763893b2e474f8adb83460164c843e0 | 7a7fafa4c6e9aac0da73791ca646b6503c39b24f | refs/heads/main | 2023-02-25T20:21:54.825164 | 2021-01-25T08:34:41 | 2021-01-25T08:34:41 | 324,327,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,628 | py | import cv2
from glob import glob
# glob是python自己带的一个文件操作相关模块,用它可以查找符合自己目的的文件,就类似于Windows下的文件搜索
# https://www.cnblogs.com/lovemo1314/archive/2011/04/26/2029556.html
import numpy as np
import os
import sys
apath = os.path.abspath(os.path.dirname(sys.argv[0]))
#得到文件下面的所有文件目录。果然很方便
path = glob(apath+"/datasets/monet2photo/*")
print(path)
class DataLoader():
def __init__(self,dataset_name,img_res=(128,128)):
self.img_res = img_res
self.dataset_name = dataset_name
def load_data(self,domain,batch_size=1,is_testing = False):
data_type = "train%s"% domain if not is_testing else "test%s"% domain
path = glob(apath+"/datasets/%s/%s/*"%(self.dataset_name,data_type))
batch_images = np.random.choice(path,size=batch_size)
imgs = []
for img_path in batch_images:
img = self.imread(img_path)
img = cv2.resize(img,self.img_res) #把图像变为128*128*3
img = np.array(img)/127.5 - 1
cv2.imshow("img",img)
cv2.waitKey(0)
imgs.append(img)
return imgs
def load_batch(self,batch_size=1,is_testing=False):
data_type = "train" if not is_testing else "val"
path_A = glob(apath +'./datasets/%s/%sA/*' % (self.dataset_name, data_type))
path_B = glob(apath +'./datasets/%s/%sB/*' % (self.dataset_name, data_type))
self.n_batches = int(min(len(path_A),len(path_B)) / batch_size )
print("min:",int(min(len(path_A),len(path_B))))
total_samples = self.n_batches * batch_size
path_A = np.random.choice(path_A, total_samples, replace=False)
path_B = np.random.choice(path_B, total_samples, replace=False)
for i in range(self.n_batches - 1):
batch_A = path_A[i*batch_size:(i+1)*batch_size]
batch_B = path_B[i*batch_size:(i+1)*batch_size]
imgs_A, imgs_B = [], []
for img_A,img_B in zip(batch_A,batch_B):
'''
a = [1, 2, 3]
b = [4, 5, 6]
a_b_zip = zip(a, b) # 打包为元组的列表,而且元素个数与最短的列表一致
print("type of a_b_zip is %s" % type(a_b_zip)) # 输出zip函数的返回对象类型
a_b_zip = list(a_b_zip) # 因为zip函数返回一个zip类型对象,所以需要转换为list类型
print(a_b_zip)
'''
img_A = self.imread(img_A)
img_B = self.imread(img_B)
img_A = cv2.resize(img_A,self.img_res)
img_B = cv2.resize(img_B,self.img_res)
imgs_A.append(img_A)
imgs_B.append(img_B)
imgs_A = np.array(imgs_A,dtype=np.float32)/127.5 - 1
imgs_B = np.array(imgs_B,dtype=np.float32)/127.5 - 1
yield imgs_A,imgs_B
# 带yield的函数是一个生成器,而不是一个函数了,
# 这个生成器有一个函数就是next函数,next就相当于“下一步”生成哪个数
# 这一次的next开始的地方是接着上一次的next停止的地方执行的
#把BGR格式的图片转化为RGB格式的图片
def imread(self,path):
img = cv2.imread(path)
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
return img
# if __name__ == "__main__":
# Data = DataLoader(dataset_name="monet2photo")
# for batch_i,(imgs_A,imgs_B) in enumerate(Data.load_batch(50)):
# print(batch_i)
| [
"[email protected]"
] | |
bfbaca5b6b05cde10d06ec497cda64783dfb3cae | 3f9981c4ead0f5edaf85a760076518e9e89415d1 | /connectfour.py | e8685d371722aaa83a40fe97c2c928a1055bc79a | [
"MIT"
] | permissive | jthurst3/connectfour | 5ab756df61a924e08941081e0d08093db8a3280c | 43c58f67b2147cc75441ad19ae5618b51be015d4 | refs/heads/master | 2021-01-02T23:06:49.927372 | 2014-01-14T19:49:16 | 2014-01-14T19:49:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,339 | py | # connectfour.py
# runs the Connect Four program (makes the game interactive)
# J. Hassler Thurston
# Connect Four
# December 30, 2013
from board import *
from pattern import *
from sequence import *
from compute_moves import *
import time
# checks to see if user entered a valid integer
# from http://stackoverflow.com/questions/1265665/python-check-if-a-string-represents-an-int-without-using-try-except
def getInt(input_arg):
try:
user_input = raw_input(input_arg)
return int(user_input)
except ValueError:
print "Did not enter a valid integer."
return getInt(input_arg)
# function that plays with a human
def play_human():
# display a starting message
print "Starting game of connect four..."
# query the user whether they want to go first or second
human_player = getInt("Do you want to go first or second? (Enter 1 or 2): ")
# initialize the board
board = Board(7,6)
exit_status = 0
while(exit_status == 0):
game_play(board, human_player)
take_back = getInt("Press 1 to exit, or 0 to take back moves: ")
if take_back == 1:
exit_status = 1
else:
num_moves = getInt("How many moves do you want to take back? ")
board.take_back_moves(num_moves)
# main stage of playing the game
def game_play(playing_board, human_player):
# alternate between querying a user for a move and moving
while playing_board.winner == 0:
if playing_board.turn == human_player:
query_move(playing_board)
else:
make_move(playing_board)
# queries the user for a move
def query_move(playing_board):
column = getInt("Your turn. Please enter your move (an open column between 1 and " + str(playing_board.columns) + ", 0 to take back moves): ")
if column == 0:
# we're taking back some moves
num_moves = getInt("How many moves do you want to take back? ")
playing_board.take_back_moves(num_moves)
return True
playing_board.move(column-1)
return True
# function that makes a move for the computer
def make_move(playing_board):
# wait 2 seconds
time.sleep(1)
# make a move
compute_move(playing_board)
# print the move
print "Computer moved in column " + str(playing_board.move_history[-1]+1)
# wait .5 seconds
time.sleep(.5)
return True
# main function (for testing)
def main():
board = Board(7,6)
compute_move(board)
print board.board
if __name__ == '__main__':
play_human()
| [
"[email protected]"
] | |
8a6b6d32122534f3e8d72614bd118043a59200d6 | 69ef30768d930d1484cfba61b0d0a01eaf4baf28 | /run_dv_seq2seq.py | d6ed8894d8091cbdfd89f61e2aa98fe653ac5018 | [
"MIT"
] | permissive | vangogh0318/RWG_DV-Seq2Seq | d1195937610875392232e5387395c226e43312b3 | c91fe7e9c9fd3754af794bdacd97b61ed243dd62 | refs/heads/master | 2020-06-21T01:02:47.472108 | 2019-06-13T22:40:18 | 2019-06-13T22:40:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,871 | py | import utils.model_utils as mutil
from utils.lang_utils import *
from data_loaders import DVSeq2SeqDataLoader
from model_dv_seq2seq import *
from data.read_data import *
from constants import *
from params import prepare_params, merge_params
from components import LRDecayOptimizer, get_masked_nll_criterion, get_nll_criterion
def main(params):
params.model_name = "dv_seq2seq"
mutil.DEVICE_STR_OVERRIDE = params.device_str
data_train = read_col_word_delim_data("data/dv_seq2seq_train.txt")
data_valid = read_col_word_delim_data("data/dv_seq2seq_valid.txt")
data_test = read_col_word_delim_data("data/dv_seq2seq_test.txt")
w2c_src = build_w2c_from_seg_word_lists([t[0] + t[1] for t in data_train])
w2c_tgt = build_w2c_from_seg_word_lists([t[2] for t in data_train], limit=40000) # limit output vocab size
print("data_train len: {}".format(len(data_train)))
print("data_valid len: {}".format(len(data_valid)))
print("data_test len: {}".format(len(data_test)))
print("src w2c len: {}".format(len(w2c_src)))
print("tgt w2c len: {}".format(len(w2c_tgt)))
pre_built_w2v = None
src_vocab_cache_file = "cache/dv_seq2seq_src_vocab.pkl"
tgt_vocab_cache_file = "cache/dv_seq2seq_tgt_vocab.pkl"
if os.path.isfile(src_vocab_cache_file):
print("Loading src vocab from cache " + src_vocab_cache_file)
src_vocab = pickle.load(open(src_vocab_cache_file, "rb"))
else:
print("Building src vocab")
if pre_built_w2v is None:
pre_built_w2v = load_gensim_word_vec(params.word_vec_file,
cache_file=params.vocab_cache_file)
src_vocab = W2VTrainableVocab(w2c_src, pre_built_w2v, embedding_dim=params.word_embedding_dim, rand_oov_embed=True,
special_tokens=(
PAD_TOKEN,
OOV_TOKEN,
), light_weight=True)
pickle.dump(src_vocab, open(src_vocab_cache_file, "wb"), protocol=4)
params.src_vocab_size = len(src_vocab.w2i)
print("src vocab size: ", params.src_vocab_size)
if os.path.isfile(tgt_vocab_cache_file):
print("Loading tgt vocab from cache " + tgt_vocab_cache_file)
tgt_vocab = pickle.load(open(tgt_vocab_cache_file, "rb"))
else:
print("Building tgt vocab")
if pre_built_w2v is None:
pre_built_w2v = load_gensim_word_vec(params.word_vec_file,
cache_file=params.vocab_cache_file)
tgt_vocab = W2VTrainableVocab(w2c_tgt, pre_built_w2v, embedding_dim=params.word_embedding_dim, rand_oov_embed=False,
special_tokens=(
PAD_TOKEN,
OOV_TOKEN,
SOS_TOKEN,
EOS_TOKEN,
), light_weight=True)
pickle.dump(tgt_vocab, open(tgt_vocab_cache_file, "wb"), protocol=4)
params.tgt_vocab_size = len(tgt_vocab.w2i)
print("tgt vocab size: ", params.tgt_vocab_size)
params.src_w2i = src_vocab.w2i
params.src_i2w = src_vocab.i2w
params.tgt_w2i = tgt_vocab.w2i
params.tgt_i2w = tgt_vocab.i2w
params.w2i = tgt_vocab.w2i
params.i2w = tgt_vocab.i2w
params.pad_idx = tgt_vocab.pad_idx
params.oov_idx = tgt_vocab.oov_idx
params.sos_idx = tgt_vocab.w2i[SOS_TOKEN]
params.eos_idx = tgt_vocab.w2i[EOS_TOKEN]
print("Preparing data loaders")
train_loader = DVSeq2SeqDataLoader(params.batch_size, src_vocab, tgt_vocab, src_vocab, data_train)
valid_loader = DVSeq2SeqDataLoader(params.batch_size, src_vocab, tgt_vocab, src_vocab, data_valid)
test_loader = DVSeq2SeqDataLoader(params.batch_size, src_vocab, tgt_vocab, src_vocab, data_test)
print("{} overlapped train/test instances detected".format(len(train_loader.get_overlapping_data(test_loader))))
print("{} overlapped train/valid instances detected".format(len(train_loader.get_overlapping_data(valid_loader))))
print("{} overlapped valid/test instances detected".format(len(valid_loader.get_overlapping_data(test_loader))))
print("Initializing " + params.model_name)
criterion_gen = get_masked_nll_criterion(len(tgt_vocab))
criterion_cpy = get_nll_criterion()
model = make_dv_seq2seq_model(src_vocab.w2v_mat if params.use_pretrained_embedding else None,
tgt_vocab.w2v_mat if params.use_pretrained_embedding else None,
params, len(src_vocab), len(tgt_vocab),
same_word_embedding=params.same_word_embedding)
model_opt = LRDecayOptimizer(
torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=params.lrd_initial_lr,
betas=(params.adam_betas_1, params.adam_betas_2),
eps=params.adam_eps, weight_decay=params.adam_l2),
initial_lr=params.lrd_initial_lr, shrink_factor=params.lrd_lr_decay_factor,
min_lr=params.lrd_min_lr, past_scores_considered=params.lrd_past_lr_scores_considered,
score_method="min", verbose=True, max_fail_limit=params.lrd_max_fail_limit)
completed_epochs = 0
best_eval_result = 0
best_eval_epoch = 0
past_eval_results = []
if os.path.isfile(params.saved_model_file):
print("Found saved model {}, loading".format(params.saved_model_file))
sd = mutil.model_load(params.saved_model_file)
saved_params = sd[CHKPT_PARAMS]
params = merge_params(saved_params, params)
model.load_state_dict(sd[CHKPT_MODEL])
model_opt.load_state_dict(sd[CHKPT_OPTIMIZER])
best_eval_result = sd[CHKPT_BEST_EVAL_RESULT]
best_eval_epoch = sd[CHKPT_BEST_EVAL_EPOCH]
past_eval_results = sd[CHKPT_PAST_EVAL_RESULTS]
completed_epochs = sd[CHKPT_COMPLETED_EPOCHS]
print(model)
print("Model name: {}".format(params.model_name))
n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Number of trainable parameters: {}".format(n_params))
if not os.path.isfile(params.saved_model_file) or \
(os.path.isfile(params.saved_model_file) and params.continue_training):
print("Training")
try:
train_dv_seq2seq(params, model, train_loader, criterion_gen, criterion_cpy, model_opt,
completed_epochs=completed_epochs, best_eval_result=best_eval_result,
best_eval_epoch=best_eval_epoch, past_eval_results=past_eval_results,
eval_loader=valid_loader)
except KeyboardInterrupt:
print("training interrupted")
if len(test_loader) > 0:
fn = params.saved_models_dir + params.model_name + "_best.pt"
exclude_tokens = [SOS_TOKEN, EOS_TOKEN, PAD_TOKEN, "", " "]
if os.path.isfile(fn):
sd = mutil.model_load(fn)
completed_epochs = sd[CHKPT_COMPLETED_EPOCHS]
model.load_state_dict(sd[CHKPT_MODEL])
print("Loaded best model after {} epochs of training".format(completed_epochs))
with torch.no_grad():
model.eval()
write_line_to_file("input|pred|truth|", f_path=params.model_name + "_test_results.txt")
for batch in tqdm(test_loader, mininterval=2, desc="Test", leave=False, ascii=True):
beam_rvs = dv_seq2seq_beam_decode_batch(model, batch, params.sos_idx, tgt_vocab.i2w,
eos_idx=params.eos_idx,
len_norm=params.bs_len_norm, gamma=params.bs_div_gamma,
max_len=params.max_decoded_seq_len,
beam_width=params.beam_width_test)
for bi in range(batch[DK_BATCH_SIZE]):
msg_str = "".join(batch[DK_SRC_SEG_LISTS][bi])
truth_rsp_seg = [w for w in batch[DK_TGT_SEG_LISTS][bi] if w not in exclude_tokens]
truth_rsp_str = " ".join(truth_rsp_seg)
truth_rsp_str = re.sub(" +", " ", truth_rsp_str)
best_rv = [w for w in beam_rvs[bi][0][3] if w not in exclude_tokens] # word seg list
rsp = " ".join(best_rv)
write_line_to_file(msg_str + "|" + rsp + "|" + truth_rsp_str,
params.model_name + "_test_results.txt")
if __name__ == "__main__":
args = prepare_params()
main(args)
print("done")
| [
"[email protected]"
] | |
fae2ea084bc5df24b62506cd59bd23ec8f37c2f0 | 1229ecf1a824e6e3b21b7334241efd739b8f87e9 | /Dataleum Hackathon.py | d7f41ce057bfa7dffaba22706e041acfa9dcfe9e | [
"MIT"
] | permissive | ImonEmmanuel/Dataleum-virtual-hackathon | d2b7987836645eb112c1d3f2ef9672550cac5e95 | f089323911dd78aeb26091441956613c48386b93 | refs/heads/master | 2022-07-13T13:09:59.927181 | 2020-05-16T03:35:39 | 2020-05-16T03:35:39 | 264,351,315 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,823 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
sns.set()
# In[2]:
agent=pd.read_csv('Agents.csv')
# In[3]:
agent.head(4)
# In[4]:
agent.shape
# In[5]:
#Checked if there is any duplicated columns
agent.duplicated().sum()
# In[6]:
#Infomation about the dataframe
agent.info()
# In[7]:
#types of the data in which there are encoded
agent.dtypes
# In[8]:
#Checking statistical values of our dataset
agent.describe()
# In[9]:
#Converting the TransactionDate to datetime
agent['TransactionDate']=pd.to_datetime(agent['TransactionDate'])
agent['AccountOpenedDate']=pd.to_datetime(agent['AccountOpenedDate'])
# In[10]:
agent.dtypes
# ## Basic EDA
# In[11]:
'''
Performed a grouby to assigned each Agent to it total amount of transaction done'''
agent_groub=agent.groupby(by=agent['FullName'])
x=np.round(agent_groub.sum()['TransactionAmount'])
agent['total_amount']=agent['FullName'].map(x)
# In[12]:
#Each Transaction has a unique agent
agent.total_amount.nunique() - agent.FullName.nunique()
# In[13]:
agent[agent.total_amount==299944121].head(5)
# In[14]:
agent_top_10=agent.groupby('FullName').sum().sort_values(by='TransactionAmount', ascending=False).head(10)
plt.figure(figsize=(10,8))
plt.xlabel('Full Name')
plt.ylabel('Transaction Amount')
plt.xticks(rotation=20)
plt.title('Transaction Amount by Agent Since Inception of Company')
plt.plot(agent_top_10['TransactionAmount'], color='red', marker='+', linestyle=':')
plt.show();
# ## Highest Sale in 2019
# In[15]:
#create a column for the month column
agent['month']=agent['TransactionDate'].dt.month
agent['year']=agent['TransactionDate'].dt.year
# In[16]:
#Looping through the month column to form a new column quarter in the dataset
quarter=[]
for i in agent['month']:
if i==1 or i==2 or i==3:
quarter.append(1)
elif i==4 or i==5 or i==6:
quarter.append(2)
elif i==7 or i==8 or i==9:
quarter.append(3)
elif i==10 or i==11 or i==12:
quarter.append(4)
agent['quarters']=quarter
# In[17]:
agent_2019_quarter4=agent[(agent['year']==2019) & (agent['quarters']==4)]
# In[18]:
agent_quarter_top_10=agent_2019_quarter4.groupby('FullName').sum().sort_values(by='TransactionAmount', ascending=False).head(10)
plt.figure(figsize=(10,8))
plt.xlabel('Full Name')
plt.ylabel('Transaction Amount')
plt.xticks(rotation=30)
plt.title('Transaction For 2019')
plt.plot(agent_quarter_top_10['TransactionAmount'],color='green', marker='o', linestyle='dashed')
plt.show();
# ## Agent Raj Verma
# In[19]:
agent_raj_verma=agent[agent['FullName']=='Raj Verma']
print('Shape of the dataset',agent_raj_verma.shape)
agent_raj_verma.head(4)
# In[20]:
agent_sum=agent_raj_verma[agent_raj_verma['year']==2020]['TransactionAmount'].sum()
agent_sum=np.round(agent_sum/1000000, decimals=2)
print('Total Amount of Transaction Carried out by Agent Raj Verma for the First Quater of 2020:',agent_sum,'Million')
# In[21]:
quarter_1=agent_raj_verma[(agent_raj_verma['year']==2019) &(agent_raj_verma['quarters']==1)]['TransactionAmount'].sum()
quarter_2=agent_raj_verma[(agent_raj_verma['year']==2019) &(agent_raj_verma['quarters']==2)]['TransactionAmount'].sum()
quarter_3=agent_raj_verma[(agent_raj_verma['year']==2019) &(agent_raj_verma['quarters']==3)]['TransactionAmount'].sum()
quarter_4=agent_raj_verma[(agent_raj_verma['year']==2019) &(agent_raj_verma['quarters']==4)]['TransactionAmount'].sum()
(quarter_1, quarter_2, quarter_3, quarter_4)=np.round(quarter_1/1000000, decimals=2),np.round(quarter_2/1000000, decimals=2),np.round(quarter_3/1000000, decimals=2),np.round(quarter_4/1000000, decimals=2)
print(f'Transaction for quarter 1 {quarter_1}Million')
print(f'Transaction for quarter 2 {quarter_2}Million')
print(f'Transaction for quarter 3 {quarter_3}Million')
print(f'Transaction for quarter 4 {quarter_4}Million')
# In[22]:
#Created a new dataframe for the quarter columns
quarter_data={'Quarter':['Quarter 1', 'Quarter 2', 'Quarter 3', 'Quarter 4'], 'Amount in Million':[19.67, 16.15, 15.29, 5.38]}
quarter_data=pd.DataFrame(quarter_data)
# In[23]:
quarter_data.plot(x='Quarter', y='Amount in Million', kind='bar',
legend=True,title='Transaction by Quarter of Agent Raj Virma');
# In[ ]:
# In[ ]:
# # SUPPLIER EVALUATION
# In[24]:
supplier=pd.read_csv('DSuppliers.csv')
# In[25]:
supplier.head(4)
# In[26]:
#Checked if there is any duplicated columns
supplier.duplicated().sum()
# In[27]:
supplier.shape
# In[28]:
supplier.info()
# In[29]:
supplier.dtypes
# In[30]:
#Checking statistical values of our dataset
supplier.describe()
# In[31]:
#Checking for the total number of supplier Name of all products
print('Number of Suppliers:',supplier.SupplierName.nunique())
supplier.SupplierName.value_counts()
# In[32]:
#Checking for the total number of products
print('Number of Unique Products:',supplier.SupplierCategoryName.nunique())
supplier.SupplierCategoryName.value_counts()
# In[33]:
supplier_name=supplier.groupby('SupplierName').sum().sort_values(by='TransactionAmount', ascending=False)
supplier_name
# In[34]:
plt.figure(figsize=(10,8))
plt.xlabel('Full Name')
plt.ylabel('Transaction Amount')
plt.xticks(rotation=30)
#plt.legend()
plt.title('Transaction Amount of Supplies')
plt.plot(supplier_name['TransactionAmount'],color='pink', marker='*', linestyle='dashed')
plt.show();
# In[35]:
total_amt=(supplier.TransactionAmount.sum())/1000000000
total_amt=np.round(total_amt, decimals=2)
print('Total Money Spent on supplies:',total_amt,'Billion')
# In[36]:
agent.to_csv('Agents_new.csv')
# In[ ]:
# ## I hope it is comprehensive Enough
# In[ ]:
| [
"[email protected]"
] | |
20b26ad604fb71653761602a7d8bea355b90865c | 2b409a23a3fa6ba70dd98c4c604237330ff77ead | /gatekeeper/settings.py | 512aa92db12e72e660aaae7901b4f58345f81e93 | [] | no_license | taobluesky/gatekeeper | fdcdeb352a66aa20c4676ce02f9ad0fce23d283a | eac91828b466720f24024173ec73d37572e3bd42 | refs/heads/master | 2021-01-23T13:30:06.594316 | 2013-08-13T13:01:10 | 2013-08-13T13:01:10 | null | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 7,349 | py | # -*- coding: utf-8 -*-
import os
# Django settings for gatekeeper project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = [
'10.67.92.77',
'127.0.0.1',
]
DIRNAME = os.path.dirname(__file__).replace('\\','/')
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'gatekeeper', # Or path to database file if using sqlite3.
'USER': 'H7104943', # Not used with sqlite3.
'PASSWORD': 'tjit123', # Not used with sqlite3.
'HOST': '127.0.0.1', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Asia/Shanghai'#'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'zh-TW'#'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = DIRNAME + '/static/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
DIRNAME + '/static/site_static',
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'zit@&pgr4lbs6ebk&r-gn=p$1tr958uqr^)qgmu!&9j9a4c+cv'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.transaction.TransactionMiddleware',
)
ROOT_URLCONF = 'gatekeeper.urls'
TEMPLATE_DIRS = (
DIRNAME + '/templates',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.app_directories.load_template_source',
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
#'gatekeeper.sign',
#'gatekeeper.auth',
#'gatekeeper.appform',
'gatekeeper.account',
'gatekeeper.application',
'gatekeeper.core',
'gatekeeper.mail',
'gatekeeper.message_center',
'gatekeeper.management',
'gatekeeper.pdf',
'bootstrap_toolkit',
#'tinymce',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
},
}
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
# 'django.contrib.messages.context_processors.messages',
)
gettext_noop = lambda s: s
LANGUAGES = (
('en', gettext_noop('English')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
DOWNLOAD_ROOT = MEDIA_ROOT + '/share_files/'
APP_PDF_ROOT = MEDIA_ROOT + '/app_pdf/'
#SESSION_COOKIE_AGE = 60
EMAIL_HOST = 'smtp.qq.com'
EMAIL_PORT = 25
# Message Center µÄÅäÖÃ
MESSAGE_CENTER_URL = 'http://msgcenter.ecmms.foxconn:2571/Messaging.asmx?WSDL'
MESSAGE_CENTER_USER = 'GateKeeperUid'
MESSAGE_CENTER_PASSWORD= 'GateKeeperPwd'
MESSAGE_CENTER_FROM_EMAIL = '[email protected]'
# bootstrap ÔO¶¨
BOOTSTRAP_BASE_URL = STATIC_URL
BOOTSTRAP_CSS_BASE_URL = BOOTSTRAP_BASE_URL + 'css/'
BOOTSTRAP_CSS_URL = BOOTSTRAP_CSS_BASE_URL + 'bootstrap.css'
BOOTSTRAP_JS_BASE_URL = BOOTSTRAP_BASE_URL + 'js/'
# Enable for single bootstrap.js file
#BOOTSTRAP_JS_URL = BOOTSTRAP_JS_BASE_URL + 'bootstrap.js' | [
"[email protected]"
] | |
76d616278649a3882297bca1ec53242a02e4fad1 | 348ce2ed37b439c793c764eb48cffa9fabb3e9bc | /probe_wrapper.py | d4221a037c6c00ab201b4440326d951d997fb3dc | [
"Apache-2.0"
] | permissive | fabianfreyer/unicorefuzz | fc771d06bc352b979f37f3adaca2c5b96cb0de4f | 54c85d3c3b610f2d098eecc31b8937d73ea5903b | refs/heads/master | 2020-07-14T05:33:30.002672 | 2019-08-29T20:45:17 | 2019-08-29T20:45:17 | 205,250,990 | 0 | 0 | null | 2019-08-29T21:09:58 | 2019-08-29T21:09:57 | null | UTF-8 | Python | false | false | 4,944 | py | #!/usr/bin/env python3
import os
import socket
import re
import sys
import time
import shutil
import inotify.adapters
from datetime import datetime
from sh import which
from avatar2 import archs, Avatar, GDBTarget
from utils import get_base, get_arch, all_regs, REQUEST_FOLDER, STATE_FOLDER, REJECTED_ENDING
GDB_PATH = which("gdb")
def dump(workdir, target, base_address):
mem = target.read_memory(base_address, 0x1000, raw=True)
with open(os.path.join(workdir, STATE_FOLDER, "{:016x}".format(base_address)), "wb") as f:
f.write(mem)
print("[*] {}: Dumped 0x{:016x}".format(datetime.now(), base_address))
def forward_requests(target, workdir, requests_path, output_path):
filenames = os.listdir(requests_path)
while len(filenames):
for filename in filenames:
base_address = get_base(int(filename, 16))
try:
print("[+] {}: Received request for {:016x}".format(datetime.now(), base_address))
if not os.path.isfile(os.path.join(output_path, str(base_address))):
dump(workdir, target, base_address)
# we should restart afl now
except KeyboardInterrupt as ex:
print("cya")
exit(0)
except Exception as e:
print("Could not get memory region at {}: {} (Found mem corruption?)".format(
hex(base_address), repr(e)))
with open(os.path.join(output_path, "{:016x}{}".format(base_address, REJECTED_ENDING)), 'a') as f:
f.write(repr(e))
os.remove(os.path.join(requests_path, filename))
filenames = os.listdir(requests_path)
def main(workdir, module=None, breakoffset=None, breakaddress=None, reset_state=True, arch="x64", gdb_port=1234):
request_path = os.path.join(workdir, REQUEST_FOLDER)
output_path = os.path.join(workdir, STATE_FOLDER)
if arch != "x64":
raise("Unsupported arch")
if reset_state:
try:
shutil.rmtree(output_path)
except:
pass
try:
os.makedirs(output_path, exist_ok=True)
except:
pass
if module:
if breakaddress is not None:
raise("Breakaddress and module supplied. They are not compatible.")
if breakoffset is None:
raise("Module but no breakoffset specified. Don't know where to break.")
mem_addr = os.popen("./get_mod_addr.sh " + module).readlines()
try:
mem_addr = int(mem_addr[0], 16)
except ValueError as ex:
print("Error decoding module addr. Either module {} has not been loaded or something went wrong with ssh ({})".format(module, ex))
exit(-1)
print("Module " + module + " is at memory address " + hex(mem_addr))
breakaddress = hex(mem_addr + breakoffset)
else:
breakaddress = hex(breakaddress)
avatar = Avatar(arch=get_arch(arch),
output_directory=os.path.join(workdir, "avatar"))
target = avatar.add_target(
GDBTarget, gdb_port=gdb_port, gdb_executable=GDB_PATH)
target.init()
target.set_breakpoint("*{}".format(breakaddress))
print("[*] Breakpoint set at {}".format(breakaddress))
print("[+] waiting for bp hit...")
target.cont()
target.wait()
print("[+] hit! dumping registers and memory")
# dump registers
for reg in all_regs(get_arch(arch)):
written = True
reg_file = os.path.join(output_path, reg)
with open(reg_file, "w") as f:
try:
val = target.read_register(reg)
if isinstance(val, list):
# Avatar special registers (xmm, ...)
i32list = val
val = 0
for shift, i32 in enumerate(i32list):
val += (i32 << (shift * 32))
f.write(str(val))
except Exception as ex:
#print("Ignoring {}: {}".format(reg, ex))
written = False
if not written:
os.unlink(reg_file)
try:
os.mkdir(request_path)
except:
pass
forward_requests(target, workdir, request_path, output_path)
print("[*] Initial dump complete. Listening for requests from ./harness.py.")
i = inotify.adapters.Inotify()
# only readily written files
i.add_watch(request_path, mask=inotify.constants.IN_CLOSE_WRITE)
for event in i.event_gen(yield_nones=False):
#print("Request: ", event)
forward_requests(target, workdir, request_path, output_path)
print("[*] Exiting probe_wrapper (keyboard interrupt)")
if __name__ == "__main__":
import config
main(
module=config.MODULE,
breakoffset=config.BREAKOFFSET,
breakaddress=config.BREAKADDR,
workdir=config.WORKDIR,
arch=config.ARCH,
gdb_port=config.GDB_PORT
)
| [
"[email protected]"
] | |
5076dc84170e0886f5e445b5068ebeb7b5a3d44b | 14f1b75de10bdf052535fc6c2821802c3f058736 | /selection_sort/selection_sort_blind.py | 736e85f140b13a8e83b62f984d862ecda351e105 | [] | no_license | asherif844/algorithms | a4fdaed64d1d61395e5f1ea12d0fda7d0f45d7a4 | 146617523dcf5e47f1134722eb3aa38ff02099c1 | refs/heads/master | 2022-06-10T06:26:39.689346 | 2022-05-16T16:44:22 | 2022-05-16T16:44:22 | 245,258,563 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | def sorted_sort(array):
original_index = 0
while original_index < len(array) - 1:
smallest_index = original_index
for i in range(smallest_index+1, len(array)):
if array[i] < array[smallest_index]:
smallest_index = i
swap(smallest_index, original_index, array)
original_index += 1
return array
def swap(smallest_index, original_index, array):
array[smallest_index], array[original_index] = array[original_index], array[smallest_index]
# return array
old_array = [5, 4, 3, 2, -5, -6, 10, 200, 36, 0]
print(sorted_sort(old_array))
| [
"[email protected]"
] | |
7ede8b2e4b1c7240868b9fbd8862209cc5388c58 | 31b85e2abc35fa633257cc372c8fa059c75241d9 | /superset/models/core.py | 8e7c1998b64e7d239f3201ea593391b7a9d81cb4 | [
"Apache-2.0"
] | permissive | Zandut/Superset-Funnel | b28cf1c0768bfcf7ab630c622c7ca3755212bfe8 | cff832b2d584f859ceb025349b615c25afa524b7 | refs/heads/master | 2022-12-04T07:10:29.164996 | 2020-08-24T08:05:16 | 2020-08-24T08:05:16 | 288,700,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45,993 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
"""A collection of ORM sqlalchemy models for Superset"""
from contextlib import closing
from copy import copy, deepcopy
from datetime import datetime
import json
import logging
import textwrap
from typing import List
from flask import escape, g, Markup, request
from flask_appbuilder import Model
from flask_appbuilder.models.decorators import renders
from flask_appbuilder.security.sqla.models import User
import numpy
import pandas as pd
import sqlalchemy as sqla
from sqlalchemy import (
Boolean,
Column,
create_engine,
DateTime,
ForeignKey,
Integer,
MetaData,
String,
Table,
Text,
)
from sqlalchemy.engine import url
from sqlalchemy.engine.url import make_url
from sqlalchemy.orm import relationship, sessionmaker, subqueryload
from sqlalchemy.orm.session import make_transient
from sqlalchemy.pool import NullPool
from sqlalchemy.schema import UniqueConstraint
from sqlalchemy_utils import EncryptedType
import sqlparse
from superset import app, db, db_engine_specs, is_feature_enabled, security_manager
from superset.connectors.connector_registry import ConnectorRegistry
from superset.legacy import update_time_range
from superset.models.helpers import AuditMixinNullable, ImportMixin
from superset.models.tags import ChartUpdater, DashboardUpdater, FavStarUpdater
from superset.models.user_attributes import UserAttribute
from superset.utils import cache as cache_util, core as utils
from superset.viz import viz_types
from urllib import parse # noqa
config = app.config
custom_password_store = config.get("SQLALCHEMY_CUSTOM_PASSWORD_STORE")
stats_logger = config.get("STATS_LOGGER")
log_query = config.get("QUERY_LOGGER")
metadata = Model.metadata # pylint: disable=no-member
PASSWORD_MASK = "X" * 10
def set_related_perm(mapper, connection, target): # noqa
src_class = target.cls_model
id_ = target.datasource_id
if id_:
ds = db.session.query(src_class).filter_by(id=int(id_)).first()
if ds:
target.perm = ds.perm
def copy_dashboard(mapper, connection, target):
dashboard_id = config.get("DASHBOARD_TEMPLATE_ID")
if dashboard_id is None:
return
Session = sessionmaker(autoflush=False)
session = Session(bind=connection)
new_user = session.query(User).filter_by(id=target.id).first()
# copy template dashboard to user
template = session.query(Dashboard).filter_by(id=int(dashboard_id)).first()
dashboard = Dashboard(
dashboard_title=template.dashboard_title,
position_json=template.position_json,
description=template.description,
css=template.css,
json_metadata=template.json_metadata,
slices=template.slices,
owners=[new_user],
)
session.add(dashboard)
session.commit()
# set dashboard as the welcome dashboard
extra_attributes = UserAttribute(
user_id=target.id, welcome_dashboard_id=dashboard.id
)
session.add(extra_attributes)
session.commit()
sqla.event.listen(User, "after_insert", copy_dashboard)
class Url(Model, AuditMixinNullable):
"""Used for the short url feature"""
__tablename__ = "url"
id = Column(Integer, primary_key=True)
url = Column(Text)
class KeyValue(Model):
"""Used for any type of key-value store"""
__tablename__ = "keyvalue"
id = Column(Integer, primary_key=True)
value = Column(Text, nullable=False)
class CssTemplate(Model, AuditMixinNullable):
"""CSS templates for dashboards"""
__tablename__ = "css_templates"
id = Column(Integer, primary_key=True)
template_name = Column(String(250))
css = Column(Text, default="")
slice_user = Table(
"slice_user",
metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("ab_user.id")),
Column("slice_id", Integer, ForeignKey("slices.id")),
)
class Slice(Model, AuditMixinNullable, ImportMixin):
"""A slice is essentially a report or a view on data"""
__tablename__ = "slices"
id = Column(Integer, primary_key=True)
slice_name = Column(String(250))
datasource_id = Column(Integer)
datasource_type = Column(String(200))
datasource_name = Column(String(2000))
viz_type = Column(String(250))
params = Column(Text)
description = Column(Text)
cache_timeout = Column(Integer)
perm = Column(String(1000))
owners = relationship(security_manager.user_model, secondary=slice_user)
export_fields = (
"slice_name",
"datasource_type",
"datasource_name",
"viz_type",
"params",
"cache_timeout",
)
def __repr__(self):
return self.slice_name or str(self.id)
@property
def cls_model(self):
return ConnectorRegistry.sources[self.datasource_type]
@property
def datasource(self):
return self.get_datasource
def clone(self):
return Slice(
slice_name=self.slice_name,
datasource_id=self.datasource_id,
datasource_type=self.datasource_type,
datasource_name=self.datasource_name,
viz_type=self.viz_type,
params=self.params,
description=self.description,
cache_timeout=self.cache_timeout,
)
@datasource.getter # type: ignore
@utils.memoized
def get_datasource(self):
return db.session.query(self.cls_model).filter_by(id=self.datasource_id).first()
@renders("datasource_name")
def datasource_link(self):
# pylint: disable=no-member
datasource = self.datasource
return datasource.link if datasource else None
def datasource_name_text(self):
# pylint: disable=no-member
datasource = self.datasource
return datasource.name if datasource else None
@property
def datasource_edit_url(self):
# pylint: disable=no-member
datasource = self.datasource
return datasource.url if datasource else None
@property # type: ignore
@utils.memoized
def viz(self):
d = json.loads(self.params)
viz_class = viz_types[self.viz_type]
# pylint: disable=no-member
return viz_class(datasource=self.datasource, form_data=d)
@property
def description_markeddown(self):
return utils.markdown(self.description)
@property
def data(self):
"""Data used to render slice in templates"""
d = {}
self.token = ""
try:
d = self.viz.data
self.token = d.get("token")
except Exception as e:
logging.exception(e)
d["error"] = str(e)
return {
"datasource": self.datasource_name,
"description": self.description,
"description_markeddown": self.description_markeddown,
"edit_url": self.edit_url,
"form_data": self.form_data,
"slice_id": self.id,
"slice_name": self.slice_name,
"slice_url": self.slice_url,
"modified": self.modified(),
"changed_on_humanized": self.changed_on_humanized,
"changed_on": self.changed_on.isoformat(),
}
@property
def json_data(self):
return json.dumps(self.data)
@property
def form_data(self):
form_data = {}
try:
form_data = json.loads(self.params)
except Exception as e:
logging.error("Malformed json in slice's params")
logging.exception(e)
form_data.update(
{
"slice_id": self.id,
"viz_type": self.viz_type,
"datasource": "{}__{}".format(self.datasource_id, self.datasource_type),
}
)
if self.cache_timeout:
form_data["cache_timeout"] = self.cache_timeout
update_time_range(form_data)
return form_data
def get_explore_url(self, base_url="/metrix/explore", overrides=None):
overrides = overrides or {}
form_data = {"slice_id": self.id}
form_data.update(overrides)
params = parse.quote(json.dumps(form_data))
return f"{base_url}/?form_data={params}"
@property
def slice_url(self):
"""Defines the url to access the slice"""
return self.get_explore_url()
@property
def explore_json_url(self):
"""Defines the url to access the slice"""
return self.get_explore_url("/metrix/explore_json")
@property
def edit_url(self):
return "/chart/edit/{}".format(self.id)
@property
def chart(self):
return self.slice_name or "<empty>"
@property
def slice_link(self):
url = self.slice_url
name = escape(self.chart)
return Markup(f'<a href="{url}">{name}</a>')
def get_viz(self, force=False):
"""Creates :py:class:viz.BaseViz object from the url_params_multidict.
:return: object of the 'viz_type' type that is taken from the
url_params_multidict or self.params.
:rtype: :py:class:viz.BaseViz
"""
slice_params = json.loads(self.params)
slice_params["slice_id"] = self.id
slice_params["json"] = "false"
slice_params["slice_name"] = self.slice_name
slice_params["viz_type"] = self.viz_type if self.viz_type else "table"
return viz_types[slice_params.get("viz_type")](
self.datasource, form_data=slice_params, force=force
)
@property
def icons(self):
return f"""
<a
href="{self.datasource_edit_url}"
data-toggle="tooltip"
title="{self.datasource}">
<i class="fa fa-database"></i>
</a>
"""
@classmethod
def import_obj(cls, slc_to_import, slc_to_override, import_time=None):
"""Inserts or overrides slc in the database.
remote_id and import_time fields in params_dict are set to track the
slice origin and ensure correct overrides for multiple imports.
Slice.perm is used to find the datasources and connect them.
:param Slice slc_to_import: Slice object to import
:param Slice slc_to_override: Slice to replace, id matches remote_id
:returns: The resulting id for the imported slice
:rtype: int
"""
session = db.session
make_transient(slc_to_import)
slc_to_import.dashboards = []
slc_to_import.alter_params(remote_id=slc_to_import.id, import_time=import_time)
slc_to_import = slc_to_import.copy()
slc_to_import.reset_ownership()
params = slc_to_import.params_dict
slc_to_import.datasource_id = ConnectorRegistry.get_datasource_by_name(
session,
slc_to_import.datasource_type,
params["datasource_name"],
params["schema"],
params["database_name"],
).id
if slc_to_override:
slc_to_override.override(slc_to_import)
session.flush()
return slc_to_override.id
session.add(slc_to_import)
logging.info("Final slice: {}".format(slc_to_import.to_json()))
session.flush()
return slc_to_import.id
@property
def url(self):
return "/metrix/explore/?form_data=%7B%22slice_id%22%3A%20{0}%7D".format(
self.id
)
sqla.event.listen(Slice, "before_insert", set_related_perm)
sqla.event.listen(Slice, "before_update", set_related_perm)
dashboard_slices = Table(
"dashboard_slices",
metadata,
Column("id", Integer, primary_key=True),
Column("dashboard_id", Integer, ForeignKey("dashboards.id")),
Column("slice_id", Integer, ForeignKey("slices.id")),
UniqueConstraint("dashboard_id", "slice_id"),
)
dashboard_user = Table(
"dashboard_user",
metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("ab_user.id")),
Column("dashboard_id", Integer, ForeignKey("dashboards.id")),
)
class Dashboard(Model, AuditMixinNullable, ImportMixin):
"""The dashboard object!"""
__tablename__ = "dashboards"
id = Column(Integer, primary_key=True)
dashboard_title = Column(String(500))
position_json = Column(utils.MediumText())
description = Column(Text)
css = Column(Text)
json_metadata = Column(Text)
slug = Column(String(255), unique=True)
slices = relationship("Slice", secondary=dashboard_slices, backref="dashboards")
owners = relationship(security_manager.user_model, secondary=dashboard_user)
published = Column(Boolean, default=False)
export_fields = (
"dashboard_title",
"position_json",
"json_metadata",
"description",
"css",
"slug",
)
def __repr__(self):
return self.dashboard_title or str(self.id)
@property
def table_names(self):
# pylint: disable=no-member
return ", ".join({"{}".format(s.datasource.full_name) for s in self.slices})
@property
def url(self):
if self.json_metadata:
# add default_filters to the preselect_filters of dashboard
json_metadata = json.loads(self.json_metadata)
default_filters = json_metadata.get("default_filters")
# make sure default_filters is not empty and is valid
if default_filters and default_filters != "{}":
try:
if json.loads(default_filters):
filters = parse.quote(default_filters.encode("utf8"))
return "/metrix/dashboard/{}/?preselect_filters={}".format(
self.slug or self.id, filters
)
except Exception:
pass
return "/metrix/dashboard/{}/".format(self.slug or self.id)
@property
def datasources(self):
return {slc.datasource for slc in self.slices}
@property
def charts(self):
return [slc.chart for slc in self.slices]
@property
def sqla_metadata(self):
# pylint: disable=no-member
metadata = MetaData(bind=self.get_sqla_engine())
return metadata.reflect()
def dashboard_link(self):
title = escape(self.dashboard_title or "<empty>")
return Markup(f'<a href="{self.url}">{title}</a>')
@property
def data(self):
positions = self.position_json
if positions:
positions = json.loads(positions)
return {
"id": self.id,
"metadata": self.params_dict,
"css": self.css,
"dashboard_title": self.dashboard_title,
"published": self.published,
"slug": self.slug,
"slices": [slc.data for slc in self.slices],
"position_json": positions,
}
@property
def params(self):
return self.json_metadata
@params.setter
def params(self, value):
self.json_metadata = value
@property
def position(self):
if self.position_json:
return json.loads(self.position_json)
return {}
@classmethod
def import_obj(cls, dashboard_to_import, import_time=None):
"""Imports the dashboard from the object to the database.
Once dashboard is imported, json_metadata field is extended and stores
remote_id and import_time. It helps to decide if the dashboard has to
be overridden or just copies over. Slices that belong to this
dashboard will be wired to existing tables. This function can be used
to import/export dashboards between multiple superset instances.
Audit metadata isn't copied over.
"""
def alter_positions(dashboard, old_to_new_slc_id_dict):
""" Updates slice_ids in the position json.
Sample position_json data:
{
"DASHBOARD_VERSION_KEY": "v2",
"DASHBOARD_ROOT_ID": {
"type": "DASHBOARD_ROOT_TYPE",
"id": "DASHBOARD_ROOT_ID",
"children": ["DASHBOARD_GRID_ID"]
},
"DASHBOARD_GRID_ID": {
"type": "DASHBOARD_GRID_TYPE",
"id": "DASHBOARD_GRID_ID",
"children": ["DASHBOARD_CHART_TYPE-2"]
},
"DASHBOARD_CHART_TYPE-2": {
"type": "DASHBOARD_CHART_TYPE",
"id": "DASHBOARD_CHART_TYPE-2",
"children": [],
"meta": {
"width": 4,
"height": 50,
"chartId": 118
}
},
}
"""
position_data = json.loads(dashboard.position_json)
position_json = position_data.values()
for value in position_json:
if (
isinstance(value, dict)
and value.get("meta")
and value.get("meta").get("chartId")
):
old_slice_id = value.get("meta").get("chartId")
if old_slice_id in old_to_new_slc_id_dict:
value["meta"]["chartId"] = old_to_new_slc_id_dict[old_slice_id]
dashboard.position_json = json.dumps(position_data)
logging.info(
"Started import of the dashboard: {}".format(dashboard_to_import.to_json())
)
session = db.session
logging.info("Dashboard has {} slices".format(len(dashboard_to_import.slices)))
# copy slices object as Slice.import_slice will mutate the slice
# and will remove the existing dashboard - slice association
slices = copy(dashboard_to_import.slices)
old_to_new_slc_id_dict = {}
new_filter_immune_slices = []
new_timed_refresh_immune_slices = []
new_expanded_slices = {}
i_params_dict = dashboard_to_import.params_dict
remote_id_slice_map = {
slc.params_dict["remote_id"]: slc
for slc in session.query(Slice).all()
if "remote_id" in slc.params_dict
}
for slc in slices:
logging.info(
"Importing slice {} from the dashboard: {}".format(
slc.to_json(), dashboard_to_import.dashboard_title
)
)
remote_slc = remote_id_slice_map.get(slc.id)
new_slc_id = Slice.import_obj(slc, remote_slc, import_time=import_time)
old_to_new_slc_id_dict[slc.id] = new_slc_id
# update json metadata that deals with slice ids
new_slc_id_str = "{}".format(new_slc_id)
old_slc_id_str = "{}".format(slc.id)
if (
"filter_immune_slices" in i_params_dict
and old_slc_id_str in i_params_dict["filter_immune_slices"]
):
new_filter_immune_slices.append(new_slc_id_str)
if (
"timed_refresh_immune_slices" in i_params_dict
and old_slc_id_str in i_params_dict["timed_refresh_immune_slices"]
):
new_timed_refresh_immune_slices.append(new_slc_id_str)
if (
"expanded_slices" in i_params_dict
and old_slc_id_str in i_params_dict["expanded_slices"]
):
new_expanded_slices[new_slc_id_str] = i_params_dict["expanded_slices"][
old_slc_id_str
]
# override the dashboard
existing_dashboard = None
for dash in session.query(Dashboard).all():
if (
"remote_id" in dash.params_dict
and dash.params_dict["remote_id"] == dashboard_to_import.id
):
existing_dashboard = dash
dashboard_to_import = dashboard_to_import.copy()
dashboard_to_import.id = None
dashboard_to_import.reset_ownership()
# position_json can be empty for dashboards
# with charts added from chart-edit page and without re-arranging
if dashboard_to_import.position_json:
alter_positions(dashboard_to_import, old_to_new_slc_id_dict)
dashboard_to_import.alter_params(import_time=import_time)
if new_expanded_slices:
dashboard_to_import.alter_params(expanded_slices=new_expanded_slices)
if new_filter_immune_slices:
dashboard_to_import.alter_params(
filter_immune_slices=new_filter_immune_slices
)
if new_timed_refresh_immune_slices:
dashboard_to_import.alter_params(
timed_refresh_immune_slices=new_timed_refresh_immune_slices
)
new_slices = (
session.query(Slice)
.filter(Slice.id.in_(old_to_new_slc_id_dict.values()))
.all()
)
if existing_dashboard:
existing_dashboard.override(dashboard_to_import)
existing_dashboard.slices = new_slices
session.flush()
return existing_dashboard.id
else:
dashboard_to_import.slices = new_slices
session.add(dashboard_to_import)
session.flush()
return dashboard_to_import.id
@classmethod
def export_dashboards(cls, dashboard_ids):
copied_dashboards = []
datasource_ids = set()
for dashboard_id in dashboard_ids:
# make sure that dashboard_id is an integer
dashboard_id = int(dashboard_id)
dashboard = (
db.session.query(Dashboard)
.options(subqueryload(Dashboard.slices))
.filter_by(id=dashboard_id)
.first()
)
# remove ids and relations (like owners, created by, slices, ...)
copied_dashboard = dashboard.copy()
for slc in dashboard.slices:
datasource_ids.add((slc.datasource_id, slc.datasource_type))
copied_slc = slc.copy()
# save original id into json
# we need it to update dashboard's json metadata on import
copied_slc.id = slc.id
# add extra params for the import
copied_slc.alter_params(
remote_id=slc.id,
datasource_name=slc.datasource.datasource_name,
schema=slc.datasource.schema,
database_name=slc.datasource.database.name,
)
# set slices without creating ORM relations
slices = copied_dashboard.__dict__.setdefault("slices", [])
slices.append(copied_slc)
copied_dashboard.alter_params(remote_id=dashboard_id)
copied_dashboards.append(copied_dashboard)
eager_datasources = []
for datasource_id, datasource_type in datasource_ids:
eager_datasource = ConnectorRegistry.get_eager_datasource(
db.session, datasource_type, datasource_id
)
copied_datasource = eager_datasource.copy()
copied_datasource.alter_params(
remote_id=eager_datasource.id,
database_name=eager_datasource.database.name,
)
datasource_class = copied_datasource.__class__
for field_name in datasource_class.export_children:
field_val = getattr(eager_datasource, field_name).copy()
# set children without creating ORM relations
copied_datasource.__dict__[field_name] = field_val
eager_datasources.append(copied_datasource)
return json.dumps(
{"dashboards": copied_dashboards, "datasources": eager_datasources},
cls=utils.DashboardEncoder,
indent=4,
)
class Database(Model, AuditMixinNullable, ImportMixin):
"""An ORM object that stores Database related information"""
__tablename__ = "dbs"
type = "table"
__table_args__ = (UniqueConstraint("database_name"),)
id = Column(Integer, primary_key=True)
verbose_name = Column(String(250), unique=True)
# short unique name, used in permissions
database_name = Column(String(250), unique=True, nullable=False)
sqlalchemy_uri = Column(String(1024))
password = Column(EncryptedType(String(1024), config.get("SECRET_KEY")))
cache_timeout = Column(Integer)
select_as_create_table_as = Column(Boolean, default=False)
expose_in_sqllab = Column(Boolean, default=True)
allow_run_async = Column(Boolean, default=False)
allow_csv_upload = Column(Boolean, default=False)
allow_ctas = Column(Boolean, default=False)
allow_dml = Column(Boolean, default=False)
force_ctas_schema = Column(String(250))
allow_multi_schema_metadata_fetch = Column(Boolean, default=False)
extra = Column(
Text,
default=textwrap.dedent(
"""\
{
"metadata_params": {},
"engine_params": {},
"metadata_cache_timeout": {},
"schemas_allowed_for_csv_upload": []
}
"""
),
)
perm = Column(String(1000))
impersonate_user = Column(Boolean, default=False)
export_fields = (
"database_name",
"sqlalchemy_uri",
"cache_timeout",
"expose_in_sqllab",
"allow_run_async",
"allow_ctas",
"allow_csv_upload",
"extra",
)
export_children = ["tables"]
def __repr__(self):
return self.name
@property
def name(self):
return self.verbose_name if self.verbose_name else self.database_name
@property
def allows_subquery(self):
return self.db_engine_spec.allows_subqueries
@property
def allows_cost_estimate(self) -> bool:
extra = self.get_extra()
database_version = extra.get("version")
cost_estimate_enabled = extra.get("cost_estimate_enabled")
return (
self.db_engine_spec.get_allow_cost_estimate(database_version)
and cost_estimate_enabled
)
@property
def data(self):
return {
"id": self.id,
"name": self.database_name,
"backend": self.backend,
"allow_multi_schema_metadata_fetch": self.allow_multi_schema_metadata_fetch,
"allows_subquery": self.allows_subquery,
"allows_cost_estimate": self.allows_cost_estimate,
}
@property
def unique_name(self):
return self.database_name
@property
def url_object(self):
return make_url(self.sqlalchemy_uri_decrypted)
@property
def backend(self):
url = make_url(self.sqlalchemy_uri_decrypted)
return url.get_backend_name()
@property
def metadata_cache_timeout(self):
return self.get_extra().get("metadata_cache_timeout", {})
@property
def schema_cache_enabled(self):
return "schema_cache_timeout" in self.metadata_cache_timeout
@property
def schema_cache_timeout(self):
return self.metadata_cache_timeout.get("schema_cache_timeout")
@property
def table_cache_enabled(self):
return "table_cache_timeout" in self.metadata_cache_timeout
@property
def table_cache_timeout(self):
return self.metadata_cache_timeout.get("table_cache_timeout")
@property
def default_schemas(self):
return self.get_extra().get("default_schemas", [])
@classmethod
def get_password_masked_url_from_uri(cls, uri):
url = make_url(uri)
return cls.get_password_masked_url(url)
@classmethod
def get_password_masked_url(cls, url):
url_copy = deepcopy(url)
if url_copy.password is not None and url_copy.password != PASSWORD_MASK:
url_copy.password = PASSWORD_MASK
return url_copy
def set_sqlalchemy_uri(self, uri):
conn = sqla.engine.url.make_url(uri.strip())
if conn.password != PASSWORD_MASK and not custom_password_store:
# do not over-write the password with the password mask
self.password = conn.password
conn.password = PASSWORD_MASK if conn.password else None
self.sqlalchemy_uri = str(conn) # hides the password
def get_effective_user(self, url, user_name=None):
"""
Get the effective user, especially during impersonation.
:param url: SQL Alchemy URL object
:param user_name: Default username
:return: The effective username
"""
effective_username = None
if self.impersonate_user:
effective_username = url.username
if user_name:
effective_username = user_name
elif (
hasattr(g, "user")
and hasattr(g.user, "username")
and g.user.username is not None
):
effective_username = g.user.username
return effective_username
@utils.memoized(watch=("impersonate_user", "sqlalchemy_uri_decrypted", "extra"))
def get_sqla_engine(self, schema=None, nullpool=True, user_name=None, source=None):
extra = self.get_extra()
url = make_url(self.sqlalchemy_uri_decrypted)
url = self.db_engine_spec.adjust_database_uri(url, schema)
effective_username = self.get_effective_user(url, user_name)
# If using MySQL or Presto for example, will set url.username
# If using Hive, will not do anything yet since that relies on a
# configuration parameter instead.
self.db_engine_spec.modify_url_for_impersonation(
url, self.impersonate_user, effective_username
)
masked_url = self.get_password_masked_url(url)
logging.info("Database.get_sqla_engine(). Masked URL: {0}".format(masked_url))
params = extra.get("engine_params", {})
if nullpool:
params["poolclass"] = NullPool
# If using Hive, this will set hive.server2.proxy.user=$effective_username
configuration = {}
configuration.update(
self.db_engine_spec.get_configuration_for_impersonation(
str(url), self.impersonate_user, effective_username
)
)
if configuration:
d = params.get("connect_args", {})
d["configuration"] = configuration
params["connect_args"] = d
DB_CONNECTION_MUTATOR = config.get("DB_CONNECTION_MUTATOR")
if DB_CONNECTION_MUTATOR:
url, params = DB_CONNECTION_MUTATOR(
url, params, effective_username, security_manager, source
)
return create_engine(url, **params)
def get_reserved_words(self):
return self.get_dialect().preparer.reserved_words
def get_quoter(self):
return self.get_dialect().identifier_preparer.quote
def get_df(self, sql, schema, mutator=None):
sqls = [str(s).strip().strip(";") for s in sqlparse.parse(sql)]
source_key = None
if request and request.referrer:
if "/metrix/dashboard/" in request.referrer:
source_key = "dashboard"
elif "/metrix/explore/" in request.referrer:
source_key = "chart"
engine = self.get_sqla_engine(
schema=schema, source=utils.sources.get(source_key, None)
)
username = utils.get_username()
def needs_conversion(df_series):
if df_series.empty:
return False
if isinstance(df_series[0], (list, dict)):
return True
return False
def _log_query(sql):
if log_query:
log_query(engine.url, sql, schema, username, __name__, security_manager)
with closing(engine.raw_connection()) as conn:
with closing(conn.cursor()) as cursor:
for sql in sqls[:-1]:
_log_query(sql)
self.db_engine_spec.execute(cursor, sql)
cursor.fetchall()
_log_query(sqls[-1])
self.db_engine_spec.execute(cursor, sqls[-1])
if cursor.description is not None:
columns = [col_desc[0] for col_desc in cursor.description]
else:
columns = []
df = pd.DataFrame.from_records(
data=list(cursor.fetchall()), columns=columns, coerce_float=True
)
if mutator:
df = mutator(df)
for k, v in df.dtypes.items():
if v.type == numpy.object_ and needs_conversion(df[k]):
df[k] = df[k].apply(utils.json_dumps_w_dates)
return df
def compile_sqla_query(self, qry, schema=None):
engine = self.get_sqla_engine(schema=schema)
sql = str(qry.compile(engine, compile_kwargs={"literal_binds": True}))
if engine.dialect.identifier_preparer._double_percents:
sql = sql.replace("%%", "%")
return sql
def select_star(
self,
table_name,
schema=None,
limit=100,
show_cols=False,
indent=True,
latest_partition=False,
cols=None,
):
"""Generates a ``select *`` statement in the proper dialect"""
eng = self.get_sqla_engine(
schema=schema, source=utils.sources.get("sql_lab", None)
)
return self.db_engine_spec.select_star(
self,
table_name,
schema=schema,
engine=eng,
limit=limit,
show_cols=show_cols,
indent=indent,
latest_partition=latest_partition,
cols=cols,
)
def apply_limit_to_sql(self, sql, limit=1000):
return self.db_engine_spec.apply_limit_to_sql(sql, limit, self)
def safe_sqlalchemy_uri(self):
return self.sqlalchemy_uri
@property
def inspector(self):
engine = self.get_sqla_engine()
return sqla.inspect(engine)
@cache_util.memoized_func(
key=lambda *args, **kwargs: "db:{}:schema:None:table_list",
attribute_in_key="id",
)
def get_all_table_names_in_database(
self, cache: bool = False, cache_timeout: bool = None, force=False
) -> List[utils.DatasourceName]:
"""Parameters need to be passed as keyword arguments."""
if not self.allow_multi_schema_metadata_fetch:
return []
return self.db_engine_spec.get_all_datasource_names(self, "table")
@cache_util.memoized_func(
key=lambda *args, **kwargs: "db:{}:schema:None:view_list", attribute_in_key="id"
)
def get_all_view_names_in_database(
self, cache: bool = False, cache_timeout: bool = None, force: bool = False
) -> List[utils.DatasourceName]:
"""Parameters need to be passed as keyword arguments."""
if not self.allow_multi_schema_metadata_fetch:
return []
return self.db_engine_spec.get_all_datasource_names(self, "view")
@cache_util.memoized_func(
key=lambda *args, **kwargs: "db:{{}}:schema:{}:table_list".format(
kwargs.get("schema")
),
attribute_in_key="id",
)
def get_all_table_names_in_schema(
self,
schema: str,
cache: bool = False,
cache_timeout: int = None,
force: bool = False,
):
"""Parameters need to be passed as keyword arguments.
For unused parameters, they are referenced in
cache_util.memoized_func decorator.
:param schema: schema name
:param cache: whether cache is enabled for the function
:param cache_timeout: timeout in seconds for the cache
:param force: whether to force refresh the cache
:return: list of tables
"""
try:
tables = self.db_engine_spec.get_table_names(
database=self, inspector=self.inspector, schema=schema
)
return [
utils.DatasourceName(table=table, schema=schema) for table in tables
]
except Exception as e:
logging.exception(e)
@cache_util.memoized_func(
key=lambda *args, **kwargs: "db:{{}}:schema:{}:view_list".format(
kwargs.get("schema")
),
attribute_in_key="id",
)
def get_all_view_names_in_schema(
self,
schema: str,
cache: bool = False,
cache_timeout: int = None,
force: bool = False,
):
"""Parameters need to be passed as keyword arguments.
For unused parameters, they are referenced in
cache_util.memoized_func decorator.
:param schema: schema name
:param cache: whether cache is enabled for the function
:param cache_timeout: timeout in seconds for the cache
:param force: whether to force refresh the cache
:return: list of views
"""
try:
views = self.db_engine_spec.get_view_names(
database=self, inspector=self.inspector, schema=schema
)
return [utils.DatasourceName(table=view, schema=schema) for view in views]
except Exception as e:
logging.exception(e)
@cache_util.memoized_func(
key=lambda *args, **kwargs: "db:{}:schema_list", attribute_in_key="id"
)
def get_all_schema_names(
self, cache: bool = False, cache_timeout: int = None, force: bool = False
) -> List[str]:
"""Parameters need to be passed as keyword arguments.
For unused parameters, they are referenced in
cache_util.memoized_func decorator.
:param cache: whether cache is enabled for the function
:param cache_timeout: timeout in seconds for the cache
:param force: whether to force refresh the cache
:return: schema list
"""
return self.db_engine_spec.get_schema_names(self.inspector)
@property
def db_engine_spec(self):
return db_engine_specs.engines.get(self.backend, db_engine_specs.BaseEngineSpec)
@classmethod
def get_db_engine_spec_for_backend(cls, backend):
return db_engine_specs.engines.get(backend, db_engine_specs.BaseEngineSpec)
def grains(self):
"""Defines time granularity database-specific expressions.
The idea here is to make it easy for users to change the time grain
from a datetime (maybe the source grain is arbitrary timestamps, daily
or 5 minutes increments) to another, "truncated" datetime. Since
each database has slightly different but similar datetime functions,
this allows a mapping between database engines and actual functions.
"""
return self.db_engine_spec.get_time_grains()
def get_extra(self):
extra = {}
if self.extra:
try:
extra = json.loads(self.extra)
except Exception as e:
logging.error(e)
raise e
return extra
def get_table(self, table_name, schema=None):
extra = self.get_extra()
meta = MetaData(**extra.get("metadata_params", {}))
return Table(
table_name,
meta,
schema=schema or None,
autoload=True,
autoload_with=self.get_sqla_engine(),
)
def get_columns(self, table_name, schema=None):
return self.db_engine_spec.get_columns(self.inspector, table_name, schema)
def get_indexes(self, table_name, schema=None):
return self.inspector.get_indexes(table_name, schema)
def get_pk_constraint(self, table_name, schema=None):
return self.inspector.get_pk_constraint(table_name, schema)
def get_foreign_keys(self, table_name, schema=None):
return self.inspector.get_foreign_keys(table_name, schema)
def get_schema_access_for_csv_upload(self):
return self.get_extra().get("schemas_allowed_for_csv_upload", [])
@property
def sqlalchemy_uri_decrypted(self):
conn = sqla.engine.url.make_url(self.sqlalchemy_uri)
if custom_password_store:
conn.password = custom_password_store(conn)
else:
conn.password = self.password
return str(conn)
@property
def sql_url(self):
return "/metrix/sql/{}/".format(self.id)
def get_perm(self):
return ("[{obj.database_name}].(id:{obj.id})").format(obj=self)
def has_table(self, table):
engine = self.get_sqla_engine()
return engine.has_table(table.table_name, table.schema or None)
def has_table_by_name(self, table_name, schema=None):
engine = self.get_sqla_engine()
return engine.has_table(table_name, schema)
@utils.memoized
def get_dialect(self):
sqla_url = url.make_url(self.sqlalchemy_uri_decrypted)
return sqla_url.get_dialect()()
sqla.event.listen(Database, "after_insert", security_manager.set_perm)
sqla.event.listen(Database, "after_update", security_manager.set_perm)
class Log(Model):
"""ORM object used to log Superset actions to the database"""
__tablename__ = "logs"
id = Column(Integer, primary_key=True)
action = Column(String(512))
user_id = Column(Integer, ForeignKey("ab_user.id"))
dashboard_id = Column(Integer)
slice_id = Column(Integer)
json = Column(Text)
user = relationship(
security_manager.user_model, backref="logs", foreign_keys=[user_id]
)
dttm = Column(DateTime, default=datetime.utcnow)
duration_ms = Column(Integer)
referrer = Column(String(1024))
class FavStar(Model):
__tablename__ = "favstar"
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey("ab_user.id"))
class_name = Column(String(50))
obj_id = Column(Integer)
dttm = Column(DateTime, default=datetime.utcnow)
class DatasourceAccessRequest(Model, AuditMixinNullable):
"""ORM model for the access requests for datasources and dbs."""
__tablename__ = "access_request"
id = Column(Integer, primary_key=True)
datasource_id = Column(Integer)
datasource_type = Column(String(200))
ROLES_BLACKLIST = set(config.get("ROBOT_PERMISSION_ROLES", []))
@property
def cls_model(self):
return ConnectorRegistry.sources[self.datasource_type]
@property
def username(self):
return self.creator()
@property
def datasource(self):
return self.get_datasource
@datasource.getter # type: ignore
@utils.memoized
def get_datasource(self):
# pylint: disable=no-member
ds = db.session.query(self.cls_model).filter_by(id=self.datasource_id).first()
return ds
@property
def datasource_link(self):
return self.datasource.link # pylint: disable=no-member
@property
def roles_with_datasource(self):
action_list = ""
perm = self.datasource.perm # pylint: disable=no-member
pv = security_manager.find_permission_view_menu("datasource_access", perm)
for r in pv.role:
if r.name in self.ROLES_BLACKLIST:
continue
# pylint: disable=no-member
url = (
f"/metrix/approve?datasource_type={self.datasource_type}&"
f"datasource_id={self.datasource_id}&"
f"created_by={self.created_by.username}&role_to_grant={r.name}"
)
href = '<a href="{}">Grant {} Role</a>'.format(url, r.name)
action_list = action_list + "<li>" + href + "</li>"
return "<ul>" + action_list + "</ul>"
@property
def user_roles(self):
action_list = ""
for r in self.created_by.roles: # pylint: disable=no-member
# pylint: disable=no-member
url = (
f"/metrix/approve?datasource_type={self.datasource_type}&"
f"datasource_id={self.datasource_id}&"
f"created_by={self.created_by.username}&role_to_extend={r.name}"
)
href = '<a href="{}">Extend {} Role</a>'.format(url, r.name)
if r.name in self.ROLES_BLACKLIST:
href = "{} Role".format(r.name)
action_list = action_list + "<li>" + href + "</li>"
return "<ul>" + action_list + "</ul>"
# events for updating tags
if is_feature_enabled("TAGGING_SYSTEM"):
sqla.event.listen(Slice, "after_insert", ChartUpdater.after_insert)
sqla.event.listen(Slice, "after_update", ChartUpdater.after_update)
sqla.event.listen(Slice, "after_delete", ChartUpdater.after_delete)
sqla.event.listen(Dashboard, "after_insert", DashboardUpdater.after_insert)
sqla.event.listen(Dashboard, "after_update", DashboardUpdater.after_update)
sqla.event.listen(Dashboard, "after_delete", DashboardUpdater.after_delete)
sqla.event.listen(FavStar, "after_insert", FavStarUpdater.after_insert)
sqla.event.listen(FavStar, "after_delete", FavStarUpdater.after_delete)
| [
"[email protected]"
] | |
b4325c34ca7df66f80c392e2397c2f0e76593e7f | 053b2c101bc3aad4132f46dd687d071176e2c332 | /bidwire/tests/test_cityofboston_scraper.py | 4e6ca2661b028da02526d8beb6f6349f7566503f | [
"MIT"
] | permissive | RagtagOpen/bidwire | 1e58fb0f184b5ff3b88bcf4c091f8ceeb6d556fa | 0de7ee2dce87127f69e0c4bdc2c7b080cd1d4a45 | refs/heads/master | 2021-01-23T06:55:45.588981 | 2018-01-14T20:26:31 | 2018-01-14T20:26:31 | 86,412,959 | 5 | 7 | MIT | 2019-04-30T15:10:51 | 2017-03-28T03:56:42 | HTML | UTF-8 | Python | false | false | 424 | py | import pytest
from scrapers.cityofboston_scraper import CityOfBostonScraper
from . import utils
def test_scrape_results_page():
page_str = open(utils.get_abs_filename('cityofboston-results-page.html'), 'r').read()
cityofboston_scraper = CityOfBostonScraper()
bid_ids = cityofboston_scraper.scrape_results_page(page_str)
assert len(bid_ids) == 79
assert "23315" in bid_ids
assert "23360" in bid_ids
| [
"[email protected]"
] | |
ed0647fd49b4f07b9905aa8cfcf2a943da682bc5 | 3c0b737c35cb41c38ffad4041f124ccaa5e7a874 | /app/.~c9_invoke_4bHZ4W.py | 0b377eef72d177116058619982d0b42c1b50f647 | [] | no_license | kevelcampbell/info3180-project1 | a4650c863a7793027566cbe907dd54855b0b7f84 | d105e8837fb3594f4d3ea848d024093983507a21 | refs/heads/master | 2020-05-25T00:45:50.062536 | 2017-03-14T02:28:23 | 2017-03-14T02:28:23 | 84,894,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,773 | py | """
Flask Documentation: http://flask.pocoo.org/docs/
Jinja2 Documentation: http://jinja.pocoo.org/2/documentation/
Werkzeug Documentation: http://werkzeug.pocoo.org/documentation/
This file creates your application.
"""
import os
import psycopg2
from datetime import datetime
from random import randint
from app import app,db
from flask import render_template, request, redirect, url_for, flash, session, abort, jsonify,Response,json
from models import UserProfile
from werkzeug.utils import secure_filename
###
# Routing for your application.
###
@app.route('/')
def home():
"""Render website's home page."""
return render_template('home.html')
@app.route('/about/')
def about():
"""Render the website's about page."""
return render_template('about.html')
@app.route('/profile',methods=['POST','GET'])
def profile():
"""Render the website's profile page."""
file_folder = 'app/static/uploads'
filename='no file'
if request.method == 'POST':
uid=620000000+randint(10000,99999)
creation =datetime.now()
fname=request.form.get('fname')
lname=request.form.get('lname')
bio=request.form.get('bio')
file = request.files['profile_image']
filename = secure_filename(file.filename)
file.save(os.path.join(file_folder, filename))
profile_image=filename
age=request.form.get('age')
gender=request.form.get('gender')
user =UserProfile(id=uid,profile_creation=creation,first_name=fname,
last_name=lname,bio=bio,imagename=profile_image,age=age,gender=gender)
db.session.add(user)
db.session.commit()
flash("Information accepted")
return redirect(url_for('home'))
return render_template('profile.html')
@app.route('/profiles',methods=['GET','POST'])
def profiles():
"""Render the website's profiles page."""
users =[]
query='SELECT first_name,id FROM user_profile;'
entries=db.session.execute(query)
for entry in entries:
uname,uid =entry[0],entry[1]
users+=[{"username":uname,"userid":uid}]
results =users
return render_template('profiles.html',profiles=results)
#return Response(JSON.parse(results), mimetype='application/json')#Content-Type:
@app.route('/profile/<userid>',methods=['GET','POST'])
def profile_id(userid):
"""Render the website's unique profile page."""
query="SELECT id,first_name,imagename,gender,age,profile_creation FROM user_profile WHERE id={0};".format(userid)
uid,uname,profile_pic,gender,age,created_on =
for user in profile_info:
uid,uname,profile_pic,gender,age,created_on =user[0],user[1],user[2],user[3],user[4],user[5]
result={"userid":uid,"username":uname, "image":profile_pic,
"gender": gender, "age": age, "profile_created_on":created_on }
return render_template('unique_profile.html',profile=result)
###
# The functions below should be applicable to all Flask apps.
###
@app.route('/<file_name>.txt')
def send_text_file(file_name):
"""Send your static text file."""
file_dot_text = file_name + '.txt'
return app.send_static_file(file_dot_text)
@app.after_request
def add_header(response):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'
response.headers['Cache-Control'] = 'public, max-age=0'
return response
@app.errorhandler(404)
def page_not_found(error):
"""Custom 404 page."""
return render_template('404.html'), 404
if __name__ == '__main__':
app.run(debug=True,host="0.0.0.0",port="8080")
| [
"[email protected]"
] | |
be55d9627d221ef15a7208f8625d6dac784efb54 | 64ae307de1a6966ec948662df695cd09cefd5eff | /Day_6/mm_project/mm_project/tests/conftest.py | b924ef165574f9638d0da245dcbadd78736a65fc | [
"BSD-3-Clause"
] | permissive | janash/mm_project_sss2019 | 3a4d61c4f4dbe7eee5af401d831e7483480bb509 | 84f9da3efe335a7024213ddae6fd56113d4fda09 | refs/heads/master | 2020-05-19T23:03:53.143517 | 2019-07-26T23:03:08 | 2019-07-26T23:03:08 | 185,258,555 | 0 | 2 | BSD-3-Clause | 2019-07-09T17:33:19 | 2019-05-06T19:26:20 | Python | UTF-8 | Python | false | false | 682 | py | """
Fixtures for monte carlo tests
"""
# Import package, test suite, and other packages as needed
import mm_project as mc
import numpy as np
import os
import pytest
import sys
@pytest.fixture
def nist_file():
current_directory = os.path.dirname(os.path.abspath(__file__))
nist_file = os.path.join(current_directory,'..', 'data', 'nist_sample_config1.txt')
coordinates = mc.generate_initial_coordinates(method='file', fname=nist_file)
return coordinates, nist_file
@pytest.fixture
def mc_box(nist_file):
coordinates = nist_file[0][0]
box_length = nist_file[0][1]
fname = nist_file[1]
test_box = mc.Box(box_length, coordinates)
return test_box | [
"[email protected]"
] | |
0c333c2e1333ac21f072ca09b4dcd701240565d7 | b631676cd50d395e23a9db68b9cc04fd125aff2e | /banned_users.py | a1df8e434ae9cd5a3aa92567aeb82c46c660afca | [] | no_license | souzalaercio2004/CodigosPython | 8208c3bfd05bc98822ae8b083d94cab6c4a6df60 | 8f9462ac97f928bf3ad936078c58bd899fbd45df | refs/heads/master | 2023-07-04T14:16:56.642889 | 2021-08-17T03:04:33 | 2021-08-17T03:04:33 | 397,054,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | banned_users= ['andrew', 'carolina', 'david']
user= 'marie'
if user not in banned_users:
print (user.title()+ " , you can post a response if you wish.")
| [
"[email protected]"
] | |
e1178d40768143ba64bb0408256b99e52cf2f234 | 9009629a5fdd8f5cdd7454fc43d010a288871a28 | /multi_layer_perceptron.py | 51838c6803e76f638d56ba57205fa376c1515b10 | [] | no_license | dunderwood88/theano-playground | 17b61cbe609b79b7d57da3b3050aad061486ee48 | 503c3f1d770bbf1764fe35e69eb20819bb4baf9b | refs/heads/master | 2021-04-12T10:03:30.991312 | 2018-03-23T15:20:24 | 2018-03-23T15:20:24 | 126,502,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,768 | py | import numpy as np
import matplotlib.pyplot as plt
import theano
from theano import tensor as T
class Layer:
def __init__(self, weights_init, bias_init, activation):
'''
Individual layer constructor
:param weights_init: initialized weight matrix connecting nodes between layers
:param bias_init: initialized bias vector for the layer
:param activation: activation function for the layer's output
'''
dim_output, dim_input = weights_init.shape
assert bias_init.shape == (dim_output,)
self.weights = theano.shared(value=weights_init.astype(theano.config.floatX), name='weights', borrow=True)
self.bias = theano.shared(value=bias_init.reshape(dim_output, 1).astype(theano.config.floatX), name='bias',
borrow=True, broadcastable=(False, True))
self.activation = activation
self.params = [self.weights, self.bias]
def output(self, x):
'''
Computes an output based on processing an input vector through a weight matrix,
adding a bias, and then feeding through an activation function: a(Wx + b)
Note: activation is element-wise for output vector
:param x: input feature vector
:return: the final computational output of the layer (a vector)
'''
lin_output = T.dot(self.weights, x) + self.bias
return lin_output if self.activation is None else self.activation(lin_output)
class MLP:
def __init__(self, topology):
'''
Multilayer perceptron constructor
:param topology: description of layer sequence defining the MLP
'''
self.layers = []
for n_input, n_output in zip(topology[:-1], topology[1:]):
self.layers.append(Layer(np.random.randn(n_output, n_input),
np.ones(n_output), T.nnet.sigmoid))
self.params = []
for layer in self.layers:
self.params += layer.params
def output(self, x):
'''
Computes an output based on processing an input vector through multiple layers of the MLP
:param x: input feature vector
:return: the final computational output of the MLP
'''
# recursively compute the output through each layer
for layer in self.layers:
x = layer.output(x)
return x
def error(self, x, y):
'''
Cost function to be minimized using gradient descent method
:param x: input
:param y: target output
:return: error function
'''
return T.sum(-(y * T.log(self.output(x)) + (1 - y) * T.log(1 - self.output(x))))
def gradient_updates(self, cost, learning_rate):
'''
Provides the updates to weight and bias parameters in the MLP
:param cost: cost function for determining derivatives w.r.t. parameters
:param learning_rate: rate of gradient descent
:return: updated parameter list
'''
updates = []
for param in self.params:
updates.append((param, param - learning_rate * T.grad(cost, param)))
return updates
# EXAMPLE: XOR Function
inputs = np.vstack(np.array([[0, 0, 1, 1], [0, 1, 0, 1]])).astype(theano.config.floatX)
targets = np.array([1, 0, 0, 1]).astype(theano.config.floatX)
# First, set the size of each layer (and the number of layers)
# Input layer size is training data dimensionality (2)
# Output size is just 1-d: 0 or 1
# Finally, let the hidden layers be twice the size of the input.
# If we wanted more layers, just add another layer size to this list.
# topology = [inputs.shape[0], inputs.shape[0]*2, 1]
topology = [2, 3, 3, 1]
mlp = MLP(topology)
# Create Theano variables for the MLP input and targets
mlp_input = T.matrix('mlp_input')
mlp_target = T.vector('mlp_target')
# Learning rate
learning_rate = 0.01
# Create definition for computing the cost of the network given an input
cost = mlp.error(mlp_input, mlp_target)
# Create a theano function for training the network - parameters are updated based on the cost definition
train = theano.function([mlp_input, mlp_target], cost, updates=mlp.gradient_updates(cost, learning_rate))
# Create a theano function for computing the MLP output given some input
mlp_output = theano.function([mlp_input], mlp.output(mlp_input))
iteration = 0
cost = []
max_iteration = 30000
while iteration < max_iteration:
current_cost = train(inputs, targets)
cost.append(current_cost)
iteration += 1
output = mlp_output(inputs)
for i in range(len(inputs[1])):
print('The output for x1 = %d | x2 = %d is %.2f' % (inputs[0][i], inputs[1][i], output[0][i]))
# plot the cost minimization:
plt.plot(cost)
plt.show()
| [
"[email protected]"
] | |
ad054ac451ef06373f179e21b87ed7d2fab498a0 | 3d9cd521db737f32bd7de1dd6cbb05b5f219c329 | /14_1.py | 4ad6019429325169bee9a64ab905cb8fb4625f16 | [] | no_license | shesha4572/11 | 233348b542dded0da9b3069b643c62253706bf92 | aa33864dec1901030da82370b0b167a875991705 | refs/heads/master | 2020-08-03T21:36:33.744192 | 2020-02-02T15:40:47 | 2020-02-02T15:40:47 | 211,893,716 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | def disp(s):
print("Selected operation : Display the string")
print(s)
def cpy_str(s):
str1 = ""
print("Selected operation : Copy the string")
print("String before copying :" ,str1)
str1 = s
print("String after copying :" ,str1)
def concat_str():
print("Selected operation : Concatenate the string")
str1 = "Hello "
str2 = "How are you?"
print("String 1 :" ,str1)
print("String 2 :" ,str2)
print("After concatenation : " ,(str1 + str2))
a = input("Enter the string : ")
disp(a)
cpy_str(a)
concat_str() | [
"[email protected]"
] | |
59a9a8b45ff2d4b0170b6627f7c1223a314ecfea | c75a0628053ab5e0f79640a38465d024716437b9 | /Turtle Project/Pattern Project/Pattern3.py | 75d982542f41a6ff53e94e07e647caf85279b5e1 | [] | no_license | mohit11R/Turtle-Projects | 147591978f8c8a985154642149a38569a286c1c9 | 74141113ec4f59b62f96be68f3b5a16e72070ea6 | refs/heads/main | 2023-03-22T09:57:21.175773 | 2021-03-17T08:27:45 | 2021-03-17T08:27:45 | 329,230,245 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | # INSIDE OUT SQUARES
import turtle
pattern=turtle.Screen()
pattern.bgcolor("light grey")
shape = turtle.Turtle()
shape.color("blue")
shape.width(5)
def shapeOFSquare(size):
for _ in range(4):
shape.forward(size)
shape.left(90)
size = size +5
shapeOFSquare(6)
shapeOFSquare(26)
shapeOFSquare(46)
shapeOFSquare(66)
shapeOFSquare(86)
shapeOFSquare(106)
shapeOFSquare(126)
shapeOFSquare(146) | [
"[email protected]"
] | |
9e886d74f75b734efd48e17ef284b61748d29972 | b5fd5d41ce9d0b5e7ac885d2571b86a7351ff304 | /test.py | fc0b7f5a6b6a9d763626198741577f29b78acbf3 | [] | no_license | welias13/AI_HW3 | 222cad204a88fc7f18836d00cc0268127980ade7 | 8a03c2677bb6438f71e00e3bb1c7704490ab7f5a | refs/heads/master | 2021-01-13T03:48:01.151949 | 2016-12-29T19:04:31 | 2016-12-29T19:04:31 | 77,216,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,334 | py | from blaze.expr.reductions import count
import run_game
import numpy as np
import csv
import vars
# global PAWN_WEIGHT
# PAWN_WEIGHT = 1
# global KING_WEIGHT
# vars.a = 1.5
# vars.b = 2
# vars.c = 2
# vars.d = 1
# vars.e = 2
# vars.f = 0
# vars.g = 0
# result = 'tie'
# while result=='tie':
# game = run_game.GameRunner(2, 3, 5, "y", "AI3_204360267_308311356.better_h_player", "simple_player")
# result = game.run()
# if result == "tie":
# print("it's a tie !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# a = 1.5
# b = 2
# c = 2
# d = 1
# e = 2
result_dict = {}
a = 1.5
b = 2
c = 2
for f in np.arange(1, 2.5, 0.5):
for g in np.arange(1, 2.5, 0.5):
for e in range(1, 3):
vars.a = a
vars.b = b
vars.c = c
vars.d = 3 - e
vars.e = e
vars.f = f
vars.g = g
for i in range(1, 5):
listx = ''.join(
[str(a), '|', str(b), '|', str(c), '|', str(3 - e), '|', str(e), '|', str(f), '|', str(g)])
game = run_game.GameRunner(2, 3, 5, "n", "AI3_204360267_308311356.better_h_player",
"simple_player")
result = game.run()
if result != 'tie':
result = result[0]
if listx in result_dict:
result_dict[listx][result] += 1
else:
result_dict[listx] = {'red': 0, 'black': 0, 'tie': 0}
result_dict[listx][result] += 1
with open('results.csv', 'w', newline='') as out:
csv_out = csv.writer(out)
for params_index in result_dict.keys():
red_res = result_dict[params_index]['red']
black_res = result_dict[params_index]['black']
tie_res = result_dict[params_index]['tie']
row = [params_index, red_res, black_res, tie_res]
# row = [params_index, 'red', red_res,'black',black_res,'tie',tie_res]
csv_out.writerow(row)
exit(0)
result_dict = {}
for i in range(10):
listx = ''.join([str(a), '|', str(b), '|', str(c), '|', str(d), '|', str(e)])
game = run_game.GameRunner(2, 3, 5, "n", "AI3_204360267_308311356.better_h_player",
"simple_player")
result = game.run()
if result != 'tie':
result = result[0]
if listx in result_dict:
result_dict[listx][result] += 1
else:
result_dict[listx] = {'red': 0, 'black': 0, 'tie': 0}
result_dict[listx][result] += 1
print(result_dict)
exit(0)
counter = 0
result_dict = {}
for a in np.arange(1, 2.5, 0.5):
for b in np.arange(1, 2.5, 0.5):
for c in np.arange(1, 2.5, 0.5):
for d in np.arange(1, 2.5, 0.5):
for e in np.arange(1, 2.5, 0.5):
vars.a = a
vars.b = b
vars.c = c
vars.d = d
vars.e = e
for i in range(1, 5):
listx = ''.join([str(a), '|', str(b), '|', str(c), '|', str(d), '|', str(e)])
game = run_game.GameRunner(2, 3, 5, "n", "AI3_204360267_308311356.better_h_player",
"simple_player")
result = game.run()
if result != 'tie':
result = result[0]
if listx in result_dict:
result_dict[listx][result] += 1
else:
result_dict[listx] = {'red': 0, 'black': 0, 'tie': 0}
result_dict[listx][result] += 1
# break
# break
# break
# break
# break
with open('results.csv', 'w', newline='') as out:
csv_out = csv.writer(out)
for params_index in result_dict.keys():
red_res = result_dict[params_index]['red']
black_res = result_dict[params_index]['black']
tie_res = result_dict[params_index]['tie']
row = [params_index, red_res, black_res, tie_res]
# row = [params_index, 'red', red_res,'black',black_res,'tie',tie_res]
csv_out.writerow(row)
| [
"[email protected]"
] | |
15a141f4f9422aa5fae65ea0a3ca5efebdf34467 | d0ece6a2f09da681bfe8af22e34a520592317fa4 | /Discord Bots/DiscordBot/cogs/devcmds.py | 00c45511952fb676f6fb8fd0584333a45fbae4be | [
"MIT"
] | permissive | SeymoTheDev/skittles-stuff | d59e599d877935b260fa36d11f44401bdd5ae192 | f9eba3efd0577045085418391b7154f3fd121f70 | refs/heads/main | 2023-01-02T12:30:14.439006 | 2020-11-02T02:36:21 | 2020-11-02T02:36:21 | 309,169,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | import discord
from discord.ext import commands
client = commands.Bot(command_prefix = '-')
class Developers(commands.Cog):
def __int__(self, client):
self.client = client
def setup(client):
client.add_cog(Developers(client)) | [
"[email protected]"
] | |
47e0e8d9dc9f021eef73a93f67eabb84336aeaca | b88f0e2210c8adc1001a3ce18ac8b0bf5fdbbcc6 | /src/coincap_top100.py | f0335f6ba40d7d8cd65885df55aef1af86de6d26 | [] | no_license | timwfitnum/cryptoCur | 37c308ddbb1f66588b7bf582b7e8a39b92d4145f | 266a6d82c03686c3c79dfe7e31287141fcfc042b | refs/heads/main | 2023-01-07T18:46:50.837107 | 2020-10-20T11:17:37 | 2020-10-20T11:17:37 | 303,454,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,684 | py | import os
import json
from requests import Request, Session
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
from datetime import datetime
from prettytable import PrettyTable
from colorama import Fore, Back, Style
convert = 'USD'
listings_url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest?sort='
global_metric = 'https://pro-api.coinmarketcap.com/v1/global-metrics/quotes/latest'
headers = {
'Accepts': 'application/json',
'X-CMC_PRO_API_KEY': 'YOUR_API_KEY',
}
session = Session()
session.headers.update(headers)
convert = str(input("Please select currency for portfolio: "))
conversion = '?convert=' + convert
global_metric = global_metric + conversion
try:
response = session.get(global_metric)
except (ConnectionError, Timeout, TooManyRedirects) as e:
print(e)
data = json.loads(response.text)
#print(json.dumps(data, sort_keys=True, indent=4))
data = data['data']
global_cap = int(data['quote'][convert]['total_market_cap'])
global_cap_string = '{:,}'.format(global_cap)
while True:
print()
print("CoinmarketCap Explorer Menu")
print("The global market cap is $" + global_cap_string)
print()
print("1 - Top 100 sorted by price")
print("2 - Top 100 sorted by 24 hour change")
print("3 - Top 100 sorted by 24 hour volume")
print("0 - Exit")
print()
choice = input("Please choose 1 - 3!")
if choice == '1':
listings_url += 'price'
if choice == '2':
listings_url += 'percent_change_24h'
if choice == '3':
listings_url += 'volume_24h'
if choice == '0':
break
try:
response = session.get(listings_url)
except (ConnectionError, Timeout, TooManyRedirects) as e:
print(e)
data = json.loads(response.text)
data = data['data']
table = PrettyTable(['CMC Rank','Asset','Price','Market Cap', 'Volume', '1hr Change', 'Daily Change', 'Weekly Change'])
print()
for currency in data:
rank = currency['cmc_rank']
name = currency['name']
symbol = currency['symbol']
quotes = currency['quote'][convert]
market_cap = quotes['market_cap']
hour_change = quotes['percent_change_1h']
day_change = quotes['percent_change_24h']
week_change = quotes['percent_change_7d']
price = quotes['price']
volume = quotes['volume_24h']
# could return none type so must check
if hour_change is not None:
if hour_change > 0:
hour_change = Back.GREEN + str(hour_change) + '%' + Style.RESET_ALL
else:
hour_change = Back.RED + str(hour_change) + '%' + Style.RESET_ALL
if day_change is not None:
if day_change > 0:
day_change = Back.GREEN + str(day_change) + '%' + Style.RESET_ALL
else:
day_change = Back.RED + str(day_change) + '%' + Style.RESET_ALL
if week_change is not None:
if week_change > 0:
week_change = Back.GREEN + str(week_change) + '%' + Style.RESET_ALL
else:
week_change = Back.RED + str(week_change) +'%' + Style.RESET_ALL
if volume is not None:
volume_string = '{:,}'.format(volume)
if market_cap is not None:
market_cap_string = '{:,}'.format(market_cap)
table.add_row([rank,
name + '{' + symbol + '}',
'$' + str(price),
'$' + market_cap_string,
'$' + volume_string,
str(hour_change),
str(day_change),
str(week_change)])
print()
print(table)
print()
choice = input("Again? (y/n)")
if choice == 'n':
break
| [
"[email protected]"
] | |
139ae4368f9dcc52c84dcbfbcab84a8112ca406a | 727987094c01eaf41343464567a52fbb705f6701 | /yabgp/message/attribute/nlri/__init__.py | c67e29f0a1e9105cce0eecd0e3eebb32ea38ff2a | [
"Apache-2.0"
] | permissive | xinwu/yabgp | 1377d11e4e42f259dd66bb08060b74d0683a1796 | ae7cc871a4a8a67d08eef2abc82cf1397f2601c3 | refs/heads/master | 2021-01-13T03:05:33.031083 | 2016-12-14T16:27:23 | 2016-12-14T16:27:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,558 | py | # Copyright 2016 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import struct
import binascii
import netaddr
class NLRI(object):
@classmethod
def parse(cls, *args):
raise NotImplementedError
@classmethod
def construct(cls, *args):
raise NotImplementedError
@staticmethod
def construct_prefix_v4(masklen, prefix_str):
ip_hex = struct.pack('!I', netaddr.IPNetwork(prefix_str).value)
if 16 < masklen <= 24:
ip_hex = ip_hex[0:3]
elif 8 < masklen <= 16:
ip_hex = ip_hex[0:2]
elif masklen <= 8:
ip_hex = ip_hex[0:1]
return ip_hex
@staticmethod
def construct_prefix_v6(prefix):
mask = int(prefix.split('/')[1])
prefix_hex = binascii.unhexlify(hex(netaddr.IPNetwork(prefix).ip)[2:])
offset = mask / 8
offset_re = mask % 8
if offset == 0:
return prefix_hex[0: 1]
return prefix_hex[0: offset + offset_re]
| [
"[email protected]"
] | |
443e1f9749ae03b2af77501c83c2aa1172bf7c7a | ff1bea11066c6c1359bb054f989c5125ad882b93 | /Source Code/basic/2/led_button.py | da5831ae966a031c061d8feb4691c4a8d5920a67 | [] | no_license | ecodingstory/ESP32_MicroPython | d8ece55023a322df71e62e449c676d2f889077f0 | 35c2e005ce6a4cd3dab5b26ce4d3809646a53e21 | refs/heads/main | 2023-01-20T16:49:34.446582 | 2020-12-06T01:06:06 | 2020-12-06T01:06:06 | 318,746,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | from machine import Pin
led_0= Pin(16, Pin.OUT)
led_1 = Pin(17, Pin.OUT)
button_0 = Pin(4, Pin.IN)
button_1 = Pin(5, Pin.IN)
while True:
if(button_0.value()):
led_0.on()
else:
led_0.off()
if(button_1.value()):
led_1.on()
else:
led_1.off()
| [
"[email protected]"
] | |
64f78170e1c9f94014471c5cd831ff57aae3e3b4 | 5c785ceb1d8442a1dc1d7755cc53784259131ed7 | /users/models.py | 3b18749a9e8834f424a6e39dcfe4264bd94188c1 | [] | no_license | deepaligpt000/NomadCoders_Airbnb | 3975c82d91f1b7589e0256bd354ac6116765d996 | 57caf8d5c9dd285eacd101a9f3dc6d55fc74f87f | refs/heads/master | 2022-09-30T01:33:27.464189 | 2020-06-08T06:04:45 | 2020-06-08T06:04:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | from django.contrib.auth.models import AbstractUser
from django.db import models
# Create your models here.
class User(AbstractUser):
""" Custom User Model """
GENDER_MALE = "male"
GENDER_FEMALE = "female"
GENDER_OTHER = "other"
GENDER_CHOICES = (
(GENDER_MALE, "Male"),
(GENDER_FEMALE, "Female"),
(GENDER_OTHER, "Other"),
)
LANGUAGE_ENGLISH = "en"
LANGUAGE_KOREAN = "kr"
LANGUAGE_CHOICES = (
(LANGUAGE_ENGLISH, "English"),
(LANGUAGE_KOREAN, "Korean"),
)
CURRENCY_USD = "usd"
CURRENCY_KRW = "krw"
CURRENCY_CHOICES = (
(CURRENCY_USD, "USD"),
(CURRENCY_KRW, "KRW"),
)
avatar = models.ImageField(blank=True)
gender = models.CharField(choices=GENDER_CHOICES, max_length=10, blank=True)
bio = models.TextField(blank=True)
birthdate = models.DateField(blank=True, null=True)
language = models.CharField(max_length=2, blank=True, choices=LANGUAGE_CHOICES)
currency = models.CharField(max_length=3, blank=True, choices=CURRENCY_CHOICES)
superhost = models.BooleanField(default=False)
| [
"[email protected]"
] | |
f4e41340c6a91271c699988573ccb9305183e8e3 | 7b2ff067a21a3229072c7e8172f1a5735931aa4d | /build/build.py | e13356656480ec27014b3c28970e6a416cb27c45 | [] | no_license | maclovin/interAgentes | 22d1a101aa5e7a2decf0be3688acf179d8c7ab25 | fc23d4ec903d8d97e230179441e6237e002169d3 | refs/heads/master | 2016-08-04T11:57:43.048378 | 2013-02-08T20:12:25 | 2013-02-08T20:12:25 | 3,902,458 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,850 | py | #!/usr/bin/python
import os,sys
def makedir(diretorio):
print 'Criando diretorio %s' %(diretorio)
if not os.path.isdir(diretorio):
os.mkdir(diretorio)
def makefile(local,conteudo):
f = open(local, 'w')
f.write(conteudo)
f.close
print 'Criando arquivo %s' %(local)
def usage():
usage = """
Modo de usar:
python build.py [nome_do_ambiente_ou_banco_de_dados]
Ex:
python build.py clienteX
"""
return usage
def generate(client):
scrapPy = """
import tweetstream
import pymongo
import time
import sys
import os
import string
from classes.tweets import *
from pymongo import Connection
#inicia conexao com MongoDB (host e porta)
myConnection = Connection('127.0.0.1', 27017)
db = myConnection."""+client+"""
#passar como parametro nos objetos
dbTweet = db."""+client+"""
#instancia o objeto tweets
myTweet = tweets(dbTweet)
#le arquivo externo de termos a serem utilizados nas buscas
f = open('terms.txt', 'r')
lines = string.split(f.read(), '\\n')
f.close
myTerms = []
for line in lines:
if not line == '':
myTerms.append(line)
people = [1000]
with tweetstream.FilterStream("neuromancer_br", "q1w2e3r4", track=myTerms, follow=people) as stream:
for tweet in stream:
dateToday = time.strftime("%Y-%m-%d")
hour = time.strftime("%H")
myTweet.setDateScrap(dateToday)
myTweet.setHourScrap(hour)
myTweet.setSeguidores(tweet['user']['followers_count'])
myTweet.setRts(tweet['retweet_count'])
myTweet.setMensagem(tweet['text'])
myTweet.setDateTime(tweet['created_at'])
myTweet.setAutor(tweet['user']['screen_name'])
myTweet.setLocation(tweet['user']['location'])
myTweet.setStatusCount(tweet['user']['statuses_count'])
myTweet.setCreatedAt(tweet['user']['created_at'])
myTweet.setHashTags(tweet['entities']['hashtags'])
myTweet.setEntitles(tweet['entities']['urls'])
myTweet.setMencoes(tweet['entities']['user_mentions'])
myTweet.setAutorOriginal(tweet['in_reply_to_screen_name'])
myTweet.save()
print tweet['user']['created_at'], " - "
print tweet['user']['screen_name'], ":\\n"
print tweet['text'], "\\n\\n"
"""
tsvPy = """
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import os
from pymongo import Connection
from datetime import date
myConnection = Connection('127.0.0.1', 27017)
db = myConnection."""+client+"""
#passar como parametro nos objetos
dbTweet = db."""+client+"""
def usage():
usage = \"\"\"
Modo de usar:
python tsv.py [nome do diretorio] [data]
python tsv.py [nome do diretorio] [data] [hora_cheia]
Ex:
python tsv.py joao 2012-03-28
python tsv.py joao 2012-03-28 14
\"\"\"
return usage
def toTSV(client, date, hour, content):
if hour == None:
fname = '%s/%s.tsv' %(client, date)
else:
fname = '%s/%s_%s.tsv' %(client, date, hour)
try:
f = open(fname, 'w')
f.write(content)
f.close
except:
if not os.path.isdir(client):
os.mkdir(client)
f = open(fname, 'w')
f.write(content)
f.close
def search(client, date, hour):
content = ''
if not date == None and not hour == None:
myTweets = dbTweet.find({ "hourScrap":hour, "dateScrap":date })
elif not date == None:
myTweets = dbTweet.find({ "dateScrap":date })
try:
for myTweet in myTweets:
content += "%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n" %(myTweet['dateScrap'], myTweet['hourScrap'], myTweet['seguidores'], myTweet['rts'], myTweet['mensagem'], myTweet['teor'], myTweet['obs'], myTweet['url'], myTweet['dateTime'], myTweet['autor'], myTweet['location'], myTweet['statusCount'], myTweet['createdAt'], myTweet['hashTags'], myTweet['entitles'], myTweet['mencoes'], myTweet['autorOriginal'])
except:
print 'Nao ha dados'
print "Conteudo: %s\\n\\n" %(content)
toTSV(client, date, hour, content)
if __name__ == "__main__":
action = None
date = None
hour = None
try:
action = sys.argv[1]
date = sys.argv[2]
except:
print usage()
try:
hour = sys.argv[3]
except:
hour = None
if not action == None and not date == None:
search(action,date,hour)
"""
databasePy = """
import pymongo
from pymongo import Connection
class database:
def __init__(self):
self.connection = Connection('127.0.0.1', 27017)
self.db = self.connection."""+client+"""
self.dbTweet = self.db."""+client+"""
return 'lol'
"""
tweetsPy = """
import pymongo
from pymongo import Connection
class tweets():
def __init__(self,db):
"Classe para Streamming API do Twitter"
self.dbTweet = db
self.dateScrap = ''
self.hourScrap = ''
self.seguidores = ''
self.rts = ''
self.mensagem = ''
self.teor = ''
self.obs = ''
self.url = ''
self.dateTime = ''
self.autor = ''
self.location = ''
self.statusCount = ''
self.createdAt = ''
self.hashTags = ''
self.entitles = ''
self.mencoes = ''
self.autorOriginal = ''
def setDateScrap(self, dateScrap):
self.dateScrap = dateScrap
def setHourScrap(self, hourScrap):
self.hourScrap = hourScrap
def setSeguidores(self, seguidores):
self.seguidores = seguidores
def setRts(self, rts):
self.rts = rts;
def setMensagem(self, mensagem):
self.mensagem = mensagem
def setTeor(self, teor):
self.teor = teor
def setObs(self, obs):
self.obs = obs
def setUrl(self, url):
self.url = url
def setDateTime(self, dateTime):
self.dateTime = dateTime
def setAutor(self, autor):
self.autor = autor
def setLocation(self, location):
self.location = location
def setStatusCount(self, statusCount):
self.statusCount = statusCount
def setCreatedAt(self, createdAt):
self.createdAt = createdAt
def setHashTags(self, hashTags):
self.hashTags = hashTags
def setEntitles(self, entitles):
self.entitles = entitles
def setMencoes(self, mencoes):
self.mencoes = mencoes
def setAutorOriginal(self, autorOriginal):
self.autorOriginal = autorOriginal
def save(self):
self.dbTweet.insert({"dateScrap":self.dateScrap, "hourScrap":self.hourScrap, "seguidores":self.seguidores, "rts":self.rts, "mensagem":self.mensagem, "teor":self.teor, "obs":self.obs, "url":self.url, "dateTime":self.dateTime, "autor":self.autor, "location":self.location, "statusCount":self.statusCount, "createdAt":self.createdAt, "hashTags":self.hashTags, "entitles":self.entitles, "mencoes":self.mencoes, "autorOriginal":self.autorOriginal })
"""
makefile('scrap.py',scrapPy)
makefile('terms.txt','')
makefile('tsv.py',tsvPy)
makedir('classes')
makefile('classes/__init__.py','')
makefile('classes/database.py',databasePy)
makefile('classes/tweets.py',tweetsPy)
if __name__ == "__main__":
client = None
try:
client = sys.argv[1]
generate(client)
except:
print usage()
| [
"[email protected]"
] | |
b412bf90b49df2ddca1fa8eb2a34f01c20753966 | 936a0a1c7a09414a4e41343769fa8656af7a573b | /code/input.py | 068aa4382bffbd32b2ebe8a3baa2e3bed7fa88e7 | [] | no_license | ramco-labs/automated-feature-engineering | b2b77c6497e14edc0e1328f57c04254c8a05b168 | a2c54753a5f66be4e2398303bfa108acc687b3a7 | refs/heads/master | 2022-11-28T21:06:25.492891 | 2020-09-26T00:50:03 | 2020-09-26T00:50:03 | 210,266,856 | 1 | 1 | null | 2022-11-22T03:36:16 | 2019-09-23T04:49:01 | Python | UTF-8 | Python | false | false | 616 | py | import pandas as pd
class UserInput:
@staticmethod
def input_process(path, uid, outputs):
try:
df = pd.read_csv(path)
for output in outputs:
try:
keys = list(df.keys())
keys.index(output)
except ValueError:
print("error key not found :", output)
return df, uid, outputs
except IOError:
print("File not found", path)
#user_input = UserInput()
#print(user_input.input_process("../data/autocoding_aviation-autocoding.csv", "1234", ['test', 'fb_id']))
| [
"[email protected]"
] | |
969ded313c78856f06db9a768ee20f8972c9ab0e | 7e30d941277b01a73e3537c9a81d3ddf6ddb11ac | /sudoku-solver.py | 708bca2660864cb4aa748e5eff8984e549066eb9 | [] | no_license | krishna13-dev/Sudoku-Solver | bdc43662eb123c0fae9e7648ff387f4173337f3b | 356a0f015da97cc70d37d282c64a94ecb950a87a | refs/heads/master | 2022-12-13T02:49:37.800577 | 2020-09-15T07:10:36 | 2020-09-15T07:10:36 | 295,603,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,593 | py | board = [[0, 4, 9, 0, 0, 0, 0, 0, 5],
[0, 0, 0, 8, 3, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 8],
[0, 0, 0, 0, 9, 0, 8, 1, 0],
[0, 0, 0, 0, 0, 7, 6, 0, 0],
[0, 0, 1, 0, 0, 0, 3, 0, 0],
[7, 8, 0, 4, 1, 0, 0, 0, 9],
[2, 9, 4, 7, 0, 0, 0, 0, 0],
[6, 0, 0, 9, 0, 0, 0, 0, 0]]
# solve board
def solve_board(board):
find = find_empty(board)
if not find:
return True
else:
row, col = find
for i in range(1, 10):
if check_valid(board, i, (row, col)):
board[row][col] = i
if solve_board(board):
return True
board[row][col] = 0
return False
# check if valid
def check_valid(board, num, pos):
# row
for i in range(len(board[0])):
if board[pos[0]][i] == num and pos[1] != i:
return False
# coloum
for i in range(len(board[0])):
if board[i][pos[1]] == num and pos[0] != i:
return False
# 3*3 box
box_x = pos[1] // 3
box_y = pos[0] // 3
for i in range(box_y*3, box_y*3 + 3):
for j in range(box_x*3, box_x*3 + 3):
if board[i][j] == num and (i,j) != pos:
return False
return True
# find empty space
def find_empty(board):
for i in range(len(board)):
for j in range(len(board)):
if board[i][j] == 0:
return(i, j)
return None
# printing board
def print_board(board):
for i in range(len(board)):
if i % 3 == 0 and i != 0:
print('- - - - - - - - - - - -')
for j in range(len(board[i])):
if j % 3 == 0 and j != 0:
print(' | ', end = '')
if j == 8:
print(board[i][j])
else:
print(str(board[i][j]) + ' ', end= '')
print_board(board)
solve_board(board)
print()
print()
print_board(board) | [
"[email protected]"
] | |
9a0f2585c8786ae68cdb437ea210b0230321d96c | f71aecb0e91fe877af3ec652c7f6753a1e7b5ccd | /RemoveComments_MID_722.py | 5fd160cab533466e6146d8361f29931987c5447a | [] | no_license | 953250587/leetcode-python | 036ad83154bf1fce130d41220cf2267856c7770d | 679a2b246b8b6bb7fc55ed1c8096d3047d6d4461 | refs/heads/master | 2020-04-29T12:01:47.084644 | 2019-03-29T15:50:45 | 2019-03-29T15:50:45 | 176,122,880 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,195 | py | """
Given a C++ program, remove comments from it. The program source is an array where source[i] is the i-th line of the source code. This represents the result of splitting the original source code string by the newline character \n.
In C++, there are two types of comments, line comments, and block comments.
The string // denotes a line comment, which represents that it and rest of the characters to the right of it in the same line should be ignored.
The string /* denotes a block comment, which represents that all characters until the next (non-overlapping) occurrence of */ should be ignored. (Here, occurrences happen in reading order: line by line from left to right.) To be clear, the string /*/ does not yet end the block comment, as the ending would be overlapping the beginning.
The first effective comment takes precedence over others: if the string // occurs in a block comment, it is ignored. Similarly, if the string /* occurs in a line or block comment, it is also ignored.
If a certain line of code is empty after removing comments, you must not output that line: each string in the answer list will be non-empty.
There will be no control characters, single quote, or double quote characters. For example, source = "string s = "/* Not a comment. */";" will not be a test case. (Also, nothing else such as defines or macros will interfere with the comments.)
It is guaranteed that every open block comment will eventually be closed, so /* outside of a line or block comment always starts a new comment.
Finally, implicit newline characters can be deleted by block comments. Please see the examples below for details.
After removing the comments from the source code, return the source code in the same format.
Example 1:
Input:
source = ["/*Test program */", "int main()", "{ ", " // variable declaration ", "int a, b, c;", "/* This is a test", " multiline ", " comment for ", " testing */", "a = b + c;", "}"]
The line by line code is visualized as below:
/*Test program */
int main()
{
// variable declaration
int a, b, c;
/* This is a test
multiline
comment for
testing */
a = b + c;
}
Output: ["int main()","{ "," ","int a, b, c;","a = b + c;","}"]
The line by line code is visualized as below:
int main()
{
int a, b, c;
a = b + c;
}
Explanation:
The string
/*
denotes a block comment, including line 1 and lines 6-9. The string
//
denotes line 4 as comments.
Example 2:
Input:
source = ["a/*comment", "line", "more_comment*/b"]
Output: ["ab"]
Explanation: The original source string is "a/*comment\nline\nmore_comment*/b", where we have bolded the newline characters. After deletion, the implicit newline characters are deleted, leaving the string "ab", which when delimited by newline characters becomes ["ab"].
Note:
The length of source is in the range [1, 100].
The length of source[i] is in the range [0, 80].
Every open block comment is eventually closed.
There are no single-quote, double-quote, or control characters in the source code.
"""
class Solution(object):
def removeComments(self, source):
"""
:type source: List[str]
:rtype: List[str]
32ms
"""
result = []
block = False
union = False
start = 0
while start < len(source):
line = source[start]
start += 1
if not block and '//' in line and '/*' in line:
s = line.split('//', 1)
if '/*' not in s[0]:
s = line.split('//', 1)
if len(s[0]) >= 1:
if union:
result[-1] += s[0]
union = False
else:
result.append(s[0])
union = False
else:
block = True
s = line.split('/*', 1)
if len(s[0]) >= 1:
if union:
result[-1] += s[0]
else:
union = True
result.append(s[0])
source.insert(start, s[1])
elif not block and '//' in line:
s = line.split('//', 1)
if len(s[0]) >= 1:
if union:
result[-1] += s[0]
else:
result.append(s[0])
union = False
elif not block and '/*' in line:
block = True
s = line.split('/*', 1)
if len(s[0]) >= 1:
if union:
result[-1] += s[0]
else:
union = True
result.append(s[0])
source.insert(start, s[1])
elif block and '*/' in line:
end = line.split('*/', 1)[1]
source.insert(start, end)
block = False
elif not block:
if union:
result[-1] += line
union = False
else:
if len(line) >= 1:
result.append(line)
print(source, union, block)
print(result)
return result
# source = ["/*Test program */",
# "int main()",
# "{ ",
# " // variable declaration ",
# "int a, b, c;",
# "",
# "/* This is a test",
# " multiline ",
# " comment for ",
# " testing */",
# "a = b + c;",
# "}"]
# a = Solution().removeComments(source)
# for i in a:
# print(i)
#
# source = ["a/*comment", "line", "more_comment*/b"]
# a = Solution().removeComments(source)
# for i in a:
# print(i)
#
# source = ["a//*b//*c","blank","d/*/e*//f"]
# a = Solution().removeComments(source)
# for i in a:
# print(i)
#
# source = ["a/*/b//*c","blank","d/*/e*//f"]
# a = Solution().removeComments(source)
# for i in a:
# print(i)
#
# source = ["class test{",
# "public: ",
# " int x = 1;",
# " /*double y = 1;*/",
# " char c;", "};"]
# a = Solution().removeComments(source)
# for i in a:
# print(i)
source = ['d/*/aee*//d/********/']
a = Solution().removeComments(source)
for i in a:
print(i)
def _find_comment(line):
for i, ch in enumerate(line):
if ch == '/' and i + 1 < len(line):
ch = line[i + 1]
if ch == '/' or ch == '*':
return i
return -1
# O(n) time. O(1) space.
class Solution(object):
def removeComments(self, source):
"""
:type source: List[str]
:rtype: List[str]
36ms
"""
row = 0
while row < len(source):
line = source[row]
lo = _find_comment(line)
if lo == -1:
row += 1
continue
if line[lo + 1] == '/':
if lo == 0:
source.pop(row)
else:
source[row] = line[:lo]
row += 1
continue
hi = line.find('*/', lo + 2)
if hi != -1:
if lo == 0 and hi + 2 == len(line):
source.pop(row)
else:
source[row] = line[:lo] + line[hi + 2:]
continue
if lo == 0:
source.pop(row)
else:
source[row] = line[:lo]
row += 1
while row < len(source):
line = source[row]
hi = line.find('*/')
if hi == -1:
source.pop(row)
continue
if hi + 2 == len(line):
source.pop(row)
else:
if lo == 0:
source[row] = line[hi + 2:]
else:
source.pop(row)
row -= 1
source[row] += line[hi + 2:]
break
return source | [
"[email protected]"
] | |
4159a5dae0418564d7a15b83606762600f9c6bef | 4f383578dc1809c565f1fa85cc10e8f736479f63 | /G1_Selenium.py | a0d4018fe557872273e4ab601531364e833b4cab | [] | no_license | leonardosoler/selenium-task-mercadolivre | d186ccc140d558a0611ca0d29130be8e4b4591c0 | e3fc905ebeff59ea7e52258d297454b7f94aa9c7 | refs/heads/master | 2023-07-30T20:27:01.027894 | 2021-10-04T03:24:09 | 2021-10-04T03:24:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | import requests
from bs4 import BeautifulSoup
response = requests.get('https://g1.globo.com/')
content = response.content
site = BeautifulSoup(content, 'html.parser')
#HTML da noticia
noticias = site.findAll('div', attrs={'class' : 'feed-post-body'} )
for noticia in noticias:
#Titulo
titulo = noticia.find('a', attrs= {'class': 'feed-post-link'})
subtitulo = noticia.find('a', attrs={'class': 'feed-post-body-title'})
if (subtitulo):
print("Subtitulo : " + subtitulo.text)
#print(noticia.prettify())
print("Titulo : " + titulo.text)
| [
"[email protected]"
] | |
94464e03510f90f38a1e437322c2d19f586c970e | e13434a945f17c46f8c162fd6652798232d55e25 | /main.py | b78565c7dd0b4a6929730c90a6087a36dab0a9e0 | [] | no_license | pomeroyd/test-flask-app | e9f043e578d9d80e825f11c4977acf555664a191 | 85a9a0d2863108bde551c6da88a9f988eeea7614 | refs/heads/main | 2023-01-13T07:01:45.723359 | 2020-11-20T10:42:55 | 2020-11-20T10:42:55 | 314,525,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | from flask import Flask, request
from emoji import emojize
app = Flask(__name__)
@app.route('/')
def hello():
name = request.args.get('name', 'World')
return emojize("Hello " + name +"!")
| [
"[email protected]"
] | |
bc21e2df44ce367bd5aa9caedada1fd31027667e | 3748795418a9e9294b829ec86c01ea227cc0991e | /exps/synthetic_data.py | 012442a48e127c2eb648357e0606a4aa88f585fe | [
"MIT"
] | permissive | ranigb/Set-Tree | 16aad4eb188caf2878d0dab4345979bcbcc9cd47 | fa3971f9a8ef98dbfd0f6de654efcde3006a197b | refs/heads/main | 2023-09-04T02:03:12.147211 | 2021-11-08T21:50:05 | 2021-11-08T21:50:05 | 406,342,399 | 0 | 0 | MIT | 2021-09-14T11:39:35 | 2021-09-14T11:39:34 | null | UTF-8 | Python | false | false | 7,655 | py | import numpy as np
import random
from settree.set_data import SetDataset
########################################################################################################################
# EXP 1: First quarter
########################################################################################################################
def get_first_quarter_data(num_samples, min_items_set=2, max_items_set=10, dim=2):
def inject_samples_in_first_quarter(set_of_samples, min=1, max=1, dim=2):
num = random.choice(range(min, max + 1))
pos_points = np.random.uniform(low=0, high=1, size=(num, dim))
set_of_samples[:num, :] = pos_points
return set_of_samples
def sample_point_not_from_first_quarter(dim=2):
# sample a quarter (not the first)
while True:
r = np.random.uniform(-1, 1, dim)
if sum(r >= 0) < dim:
break
return tuple(r)
def sample_set(num, dim):
return np.stack([sample_point_not_from_first_quarter(dim) for _ in range(num)])
s_1 = [sample_set(random.choice(range(min_items_set, max_items_set)), dim) for _ in range(num_samples // 2)]
s_2 = [sample_set(random.choice(range(min_items_set, max_items_set)), dim) for _ in range(num_samples // 2)]
s_2 = [inject_samples_in_first_quarter(i, min=1, max=1, dim=dim) for i in s_2]
x = s_1 + s_2
y = np.concatenate([np.zeros(len(s_1)), np.ones(len(s_2))]).astype(np.int64)
return x, y
########################################################################################################################
# EXP 2: Stats
########################################################################################################################
def get_data_uniform_vs_normal(n, set_size):
neg = []
for _ in range(n//2):
if np.random.rand() > 0.5:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
else:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
neg.append(np.stack([np.concatenate([np.random.normal(loc=0.0, scale=1.0, size=(set_size // 2,)),
np.random.uniform(low=-1.0, high=1.0, size=(set_size // 2,))]), ids], axis=1))
pos = []
for _ in range(n//4):
if np.random.rand() > 0.5:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
else:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
pos.append(np.stack([np.random.normal(loc=0.0, scale=1.0, size=(set_size,)), ids], axis=1))
pos.append(np.stack([np.random.uniform(low=-1.0, high=1.0, size=(set_size,)), ids], axis=1))
y = np.array([0] * (n // 2) + [1] * (n // 2))
x = pos + neg
return x, y
def get_data_laplace_vs_normal(n, set_size):
neg = []
for _ in range(n//2):
if np.random.rand() > 0.5:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
else:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
neg.append(np.stack([np.concatenate([np.random.normal(loc=0.0, scale=1.0, size=(set_size // 2,)),
np.random.laplace(loc=0.0, scale=1.0, size=(set_size // 2,))]), ids], axis=1))
pos = []
for _ in range(n//4):
if np.random.rand() > 0.5:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
else:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
pos.append(np.stack([np.random.normal(loc=0.0, scale=1.0, size=(set_size,)), ids], axis=1))
pos.append(np.stack([np.random.laplace(loc=0.0, scale=1.0, size=(set_size,)), ids], axis=1))
y = np.array([0] * (n // 2) + [1] * (n // 2))
x = pos + neg
return x, y
def get_data_different_mu_normal(n, set_size):
neg = []
for _ in range(n//2):
if np.random.rand() > 0.5:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
else:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
neg.append(np.stack([np.concatenate([np.random.normal(loc=np.random.randn(), scale=1.0, size=(set_size // 2,)),
np.random.normal(loc=np.random.randn(), scale=1.0, size=(set_size // 2,))]), ids], axis=1))
pos = []
for _ in range(n//4):
if np.random.rand() > 0.5:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
else:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
mu = np.random.randn()
pos.append(np.stack([np.random.normal(loc=mu, scale=1.0, size=(set_size,)), ids], axis=1))
pos.append(np.stack([np.random.normal(loc=mu, scale=1.0, size=(set_size,)), ids], axis=1))
y = np.array([0] * (n // 2) + [1] * (n // 2))
x = pos + neg
return x, y
def get_data_different_sigma_normal(n, set_size):
neg = []
for _ in range(n//2):
if np.random.rand() > 0.5:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
else:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
neg.append(np.stack([np.concatenate([np.random.normal(loc=0.0, scale=np.abs(np.random.randn()), size=(set_size // 2,)),
np.random.normal(loc=0.0, scale=np.abs(np.random.randn()), size=(set_size // 2,))]), ids], axis=1))
pos = []
for _ in range(n//4):
if np.random.rand() > 0.5:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
else:
ids = np.array([0] * (set_size // 2) + [1] * (set_size // 2))
sig = np.abs(np.random.randn())
pos.append(np.stack([np.random.normal(loc=0.0, scale=sig, size=(set_size,)), ids], axis=1))
pos.append(np.stack([np.random.normal(loc=0.0, scale=sig, size=(set_size,)), ids], axis=1))
y = np.array([0] * (n // 2) + [1] * (n // 2))
x = pos + neg
return x, y
def get_data_by_task(task_name, params):
if task_name == 'different_uniform_normal':
# 1) different distributions
x_train, y_train = get_data_uniform_vs_normal(params['n_train'], params['set_size'])
ds_train = SetDataset(records=x_train, is_init=True)
x_test, y_test = get_data_uniform_vs_normal(params['n_test'], params['set_size'])
ds_test = SetDataset(records=x_test, is_init=True)
elif task_name == 'different_laplace_normal':
# 1) different distributions
x_train, y_train = get_data_laplace_vs_normal(params['n_train'], params['set_size'])
ds_train = SetDataset(records=x_train, is_init=True)
x_test, y_test = get_data_laplace_vs_normal(params['n_test'], params['set_size'])
ds_test = SetDataset(records=x_test, is_init=True)
elif task_name == 'different_mean':
# 2) different mean
x_train, y_train = get_data_different_mu_normal(params['n_train'], params['set_size'])
ds_train = SetDataset(records=x_train, is_init=True)
x_test, y_test = get_data_different_mu_normal(params['n_test'], params['set_size'])
ds_test = SetDataset(records=x_test, is_init=True)
elif task_name == 'different_std':
# 3) different sigma
x_train, y_train = get_data_different_sigma_normal(params['n_train'], params['set_size'])
ds_train = SetDataset(records=x_train, is_init=True)
x_test, y_test = get_data_different_sigma_normal(params['n_test'], params['set_size'])
ds_test = SetDataset(records=x_test, is_init=True)
else:
raise ValueError
return ds_train, y_train, ds_test, y_test
| [
"[email protected]"
] | |
5438edc9a22551e8091a4992b211263f519f6cce | 8e939e0f075a14377d87e0eb7729e4f0818f1df9 | /zarc/models_2017-08-04-06:42:45.py | 0831485aa5117c669efb76a2c672493a20217ba0 | [
"MIT"
] | permissive | mimcomp/caseke | 072d076c9442c19916d8f71ec25fa45676031914 | 3c0749a431bb2e2c82bcb292d528c748bea8b1ba | refs/heads/master | 2020-06-02T15:42:24.159728 | 2019-06-03T16:34:09 | 2019-06-03T16:34:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53,466 | py | # coding: utf-8
# AUTOGENERATED BY gen_script.sh from kp2.py
# Copyright (C) Nyimbi Odero, Fri Aug 4 06:40:31 EAT 2017
from sqlalchemy import func
from flask_appbuilder import Model
from flask_appbuilder.models.mixins import AuditMixin, FileColumn, ImageColumn, UserExtensionMixin
from flask_appbuilder.models.decorators import renders
from flask_appbuilder.filemanager import ImageManager
from sqlalchemy_utils import aggregated, force_auto_coercion
from sqlalchemy.orm import relationship, query, defer, deferred
from sqlalchemy import (Column, Integer, String, ForeignKey,
Sequence, Float, Text, BigInteger, Date,
DateTime, Time, Boolean, Index, CheckConstraint,
UniqueConstraint,ForeignKeyConstraint, Numeric, LargeBinary , Table)
from datetime import timedelta, datetime, date
from sqlalchemy.dialects.postgresql import *
from sqlalchemy.sql import func
from .mixins import *
# Here is how to extend the User model
#class UserExtended(Model, UserExtensionMixin):
# contact_group_id = Column(Integer, ForeignKey('contact_group.id'), nullable=True)
# contact_group = relationship('ContactGroup')
# UTILITY CLASSES
import arrow, enum
import enum
# Initialize sqlalchemy_utils
#force_auto_coercion()
class Lawyer(PersonMixin, ContactMixin, AuditMixin, Model):
__tablename__ = 'Lawyers'
def ViewName(self):
return self.__class__.__name__ +'View'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn+'.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn+'.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def print_button(self):
vn = self.ViewName()
#pdf = render_pdf(url_for(vn, pk=str(self.id)))
#pdf = pdfkit.from_string(url_for(vn, pk=str(self.id)))
#response = make_response(pdf)
#response.headers['Content-Type'] = 'application/pdf'
#response.headers['Content-Disposition'] = 'inline; filename=output.pdf'
return Markup(
'<a href="' + url_for(vn) + '" class="btn btn-sm btn-primary" data-toggle="tooltip" rel="tooltip"'+
'title="Print">' +
'<i class="fa fa-edit"></i>' +
'</a>')
def audio_play(self):
vn = self.ViewName()
return Markup(
'<audio controls>' +
'<source src="' + url_for(vn) + '" type="audio/mpeg"'> +'<i class="fa fa-volume-up"></i>' +
'Your browser does not support the audio element.' +
'</audio>'
)
# edit_form_extra_fields = {'field2': TextField('field2',
# widget=BS3TextFieldROWidget())}
id = Column(Integer, primary_key=True, autoincrement=True)
law_firm = Column(ForeignKey(u'lawfirm.id'), index=True)
gender = Column(ForeignKey(u'gender.id'), index=True)
barnumber = Column(String(20))
admissiondate = Column(Date)
gender1 = relationship(u'Gender', primaryjoin='Lawyer.gender == Gender.id', backref=u'lawyers')
lawfirm = relationship(u'Lawfirm', primaryjoin='Lawyer.law_firm == Lawfirm.id', backref=u'lawyers')
class Medevent(ActivityMixin, AuditMixin, Model):
__tablename__ = 'Medevent'
id = Column(Integer, primary_key=True, autoincrement=True)
class Bail(AuditMixin, Model):
__tablename__ = 'bail'
id = Column(Integer, primary_key=True, autoincrement=True)
hearing = Column(ForeignKey(u'hearing.id'), nullable=False, index=True)
defendant = Column(ForeignKey(u'defendant.id'), nullable=False, index=True)
amountgranted = Column(Numeric(12, 2))
noofsureties = Column(Integer, nullable=False)
paid = Column(Boolean)
paydate = Column(Date)
defendant1 = relationship(u'Defendant', primaryjoin='Bail.defendant == Defendant.id', backref=u'bails')
hearing1 = relationship(u'Hearing', primaryjoin='Bail.hearing == Hearing.id', backref=u'bails')
surety = relationship(u'Surety', secondary='bail_surety', backref=u'bails')
bail_surety = Table(
'bail_surety', Model.metadata,
Column('bail', ForeignKey(u'bail.id'), primary_key=True, nullable=False),
Column('surety', ForeignKey(u'surety.id'), primary_key=True, nullable=False, index=True)
)
class Case(AuditMixin, Model):
__tablename__ = 'case'
id = Column(Integer, primary_key=True, autoincrement=True)
casename = Column(String(200), nullable=False)
initialreport = Column(Text, nullable=False)
priority = Column(Integer, nullable=False)
investigationassigmentdate = Column(DateTime)
investigationassignmentnote = Column(Text, nullable=False)
investigationplan = Column(Text, nullable=False)
investigationsummary = Column(Text, nullable=False)
investigationreview = Column(Text)
agadvicerequested = Column(Boolean)
agadvicedate = Column(Date)
agadvice = Column(Text, nullable=False)
chargesheet = Column(Text, nullable=False)
sendtotrial = Column(Boolean, nullable=False)
docketnumber = Column(String(100))
nameofcase = Column(String(400))
chargedate = Column(DateTime)
judgement = Column(Text, nullable=False)
judgementdate = Column(DateTime)
sentencelengthyr = Column(Integer)
sentencelengthmnth = Column(Integer)
senetencelenghtdays = Column(Integer)
sentencestartdate = Column(Date)
sentenceexpirydate = Column(Date)
fineamount = Column(Numeric(12, 2))
caseappealed = Column(Boolean)
appealdate = Column(DateTime)
appealexpiry = Column(Date)
caseclosed = Column(Boolean)
closedate = Column(Date)
policestation = relationship(u'Policestation', secondary='case_policestation', backref=u'cases')
natureofsuit = relationship(u'Natureofsuit', secondary='case_natureofsuit', backref=u'cases')
plaintiff = relationship(u'Plaintiff', secondary='case_plaintiff', backref=u'cases')
witness = relationship(u'Witnes', secondary='case_witness', backref=u'cases')
prosecutor = relationship(u'Prosecutor', secondary='case_prosecutor', backref=u'prosecutor_cases')
policeofficer = relationship(u'Policeofficer', secondary='case_policeofficer', backref=u'policeofficer_cases')
policeofficer1 = relationship(u'Policeofficer', secondary='case_policeofficer_2', backref=u'policeofficer_cases_0')
casecategory = relationship(u'Casecategory', secondary='case_casecategory', backref=u'cases')
defendant = relationship(u'Defendant', secondary='case_defendant', backref=u'cases')
prosecutor1 = relationship(u'Prosecutor', secondary='case_prosecutor_2', backref=u'prosecutor_cases_0')
causeofaction = relationship(u'Causeofaction', secondary='case_causeofaction', backref=u'cases')
case_casecategory = Table(
'case_casecategory', Model.metadata,
Column('case', ForeignKey(u'case.id'), primary_key=True, nullable=False),
Column('casecategory', ForeignKey(u'casecategory.id'), primary_key=True, nullable=False, index=True)
)
case_causeofaction = Table(
'case_causeofaction', Model.metadata,
Column('case', ForeignKey(u'case.id'), primary_key=True, nullable=False),
Column('causeofaction', ForeignKey(u'causeofaction.id'), primary_key=True, nullable=False, index=True)
)
case_defendant = Table(
'case_defendant', Model.metadata,
Column('case', ForeignKey(u'case.id'), primary_key=True, nullable=False),
Column('defendant', ForeignKey(u'defendant.id'), primary_key=True, nullable=False, index=True)
)
case_natureofsuit = Table(
'case_natureofsuit', Model.metadata,
Column('case', ForeignKey(u'case.id'), primary_key=True, nullable=False),
Column('natureofsuit', ForeignKey(u'natureofsuit.id'), primary_key=True, nullable=False, index=True)
)
case_plaintiff = Table(
'case_plaintiff', Model.metadata,
Column('case', ForeignKey(u'case.id'), primary_key=True, nullable=False),
Column('plaintiff', ForeignKey(u'plaintiff.id'), primary_key=True, nullable=False, index=True)
)
case_policeofficer = Table(
'case_policeofficer', Model.metadata,
Column('case', ForeignKey(u'case.id'), primary_key=True, nullable=False),
Column('policeofficer', ForeignKey(u'policeofficer.id'), primary_key=True, nullable=False, index=True)
)
case_policeofficer_2 = Table(
'case_policeofficer_2', Model.metadata,
Column('case', ForeignKey(u'case.id'), primary_key=True, nullable=False),
Column('policeofficer', ForeignKey(u'policeofficer.id'), primary_key=True, nullable=False, index=True)
)
case_policestation = Table(
'case_policestation', Model.metadata,
Column('case', ForeignKey(u'case.id'), primary_key=True, nullable=False),
Column('policestation', ForeignKey(u'policestation.id'), primary_key=True, nullable=False, index=True)
)
case_prosecutor = Table(
'case_prosecutor', Model.metadata,
Column('case', ForeignKey(u'case.id'), primary_key=True, nullable=False),
Column('prosecutor', ForeignKey(u'prosecutor.id'), primary_key=True, nullable=False, index=True)
)
case_prosecutor_2 = Table(
'case_prosecutor_2', Model.metadata,
Column('case', ForeignKey(u'case.id'), primary_key=True, nullable=False),
Column('prosecutor', ForeignKey(u'prosecutor.id'), primary_key=True, nullable=False, index=True)
)
case_witness = Table(
'case_witness', Model.metadata,
Column('case', ForeignKey(u'case.id'), primary_key=True, nullable=False),
Column('witness', ForeignKey(u'witness.id'), primary_key=True, nullable=False, index=True)
)
class Casecategory(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'casecategory'
id = Column(Integer, primary_key=True, autoincrement=True)
indictable = Column(Boolean)
class Causeofaction(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'causeofaction'
id = Column(Integer, primary_key=True, autoincrement=True)
criminal = Column(Boolean, nullable=False)
parent_coa = Column(ForeignKey(u'causeofaction.id'), index=True)
parent = relationship(u'Causeofaction', remote_side=[id], primaryjoin='Causeofaction.parent_coa == Causeofaction.id', backref=u'causeofactions')
filing = relationship(u'Filing', secondary='causeofaction_filing', backref=u'causeofactions')
hearing = relationship(u'Hearing', secondary='causeofaction_hearing', backref=u'causeofactions')
causeofaction_filing = Table(
'causeofaction_filing', Model.metadata,
Column('causeofaction', ForeignKey(u'causeofaction.id'), primary_key=True, nullable=False),
Column('filing', ForeignKey(u'filing.id'), primary_key=True, nullable=False, index=True)
)
causeofaction_hearing = Table(
'causeofaction_hearing', Model.metadata,
Column('causeofaction', ForeignKey(u'causeofaction.id'), primary_key=True, nullable=False),
Column('hearing', ForeignKey(u'hearing.id'), primary_key=True, nullable=False, index=True)
)
class Cell(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'cell'
id = Column(Integer, primary_key=True, autoincrement=True)
prison = Column(ForeignKey(u'prison.id'), nullable=False, index=True)
prison1 = relationship(u'Prison', primaryjoin='Cell.prison == Prison.id', backref=u'cells')
class Commitaltype(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'commitaltype'
id = Column(Integer, primary_key=True, autoincrement=True)
prisoncommital = relationship(u'Prisoncommital', secondary='commitaltype_prisoncommital', backref=u'commitaltypes')
commitaltype_prisoncommital = Table(
'commitaltype_prisoncommital', Model.metadata,
Column('commitaltype', ForeignKey(u'commitaltype.id'), primary_key=True, nullable=False),
Column('prisoncommital_prison', Integer, primary_key=True, nullable=False),
Column('prisoncommital_warrantno', String(100), primary_key=True, nullable=False),
ForeignKeyConstraint(['prisoncommital_prison', 'prisoncommital_warrantno'], [u'prisoncommital.prison', u'prisoncommital.warrantno']),
Index('idx_commitaltype_prisoncommital', 'prisoncommital_prison', 'prisoncommital_warrantno')
)
class Constituency(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'constituency'
id = Column(Integer, primary_key=True, autoincrement=True)
county = Column(ForeignKey(u'county.id'), nullable=False, index=True)
town = Column(ForeignKey(u'town.id'), index=True)
county1 = relationship(u'County', primaryjoin='Constituency.county == County.id', backref=u'constituencies')
town1 = relationship(u'Town', primaryjoin='Constituency.town == Town.id', backref=u'constituencies')
class County(AuditMixin, Model):
__tablename__ = 'county'
id = Column(Integer, primary_key=True, autoincrement=True)
class Court(PlaceMixin, RefTypeMixin, AuditMixin, Model):
__tablename__ = 'court'
id = Column(Integer, primary_key=True, autoincrement=True)
court_station = Column(ForeignKey(u'courtstation.id'), nullable=False, index=True)
courtstation = relationship(u'Courtstation', primaryjoin='Court.court_station == Courtstation.id', backref=u'courts')
class Courtlevel(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'courtlevel'
id = Column(Integer, primary_key=True, autoincrement=True)
class Courtstation(PlaceMixin, AuditMixin, Model):
__tablename__ = 'courtstation'
id = Column(Integer, primary_key=True, autoincrement=True)
residentmagistrate = Column(String(100))
registrar = Column(String(100), nullable=False)
court_level = Column(ForeignKey(u'courtlevel.id'), nullable=False, index=True)
num_of_courts = Column(Integer)
town = Column(ForeignKey(u'town.id'), nullable=False, index=True)
courtlevel = relationship(u'Courtlevel', primaryjoin='Courtstation.court_level == Courtlevel.id', backref=u'courtstations')
town1 = relationship(u'Town', primaryjoin='Courtstation.town == Town.id', backref=u'courtstations')
class Defendant(PersonMedicalMixin, PersonDocMixin, BiometricMixin, EmploymentMixin, PersonMixin, ContactMixin, AuditMixin, Model):
__tablename__ = 'defendant'
def ViewName(self):
return self.__class__.__name__ +'View'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn+'.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn+'.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def print_button(self):
vn = self.ViewName()
#pdf = render_pdf(url_for(vn, pk=str(self.id)))
#pdf = pdfkit.from_string(url_for(vn, pk=str(self.id)))
#response = make_response(pdf)
#response.headers['Content-Type'] = 'application/pdf'
#response.headers['Content-Disposition'] = 'inline; filename=output.pdf'
return Markup(
'<a href="' + url_for(vn) + '" class="btn btn-sm btn-primary" data-toggle="tooltip" rel="tooltip"'+
'title="Print">' +
'<i class="fa fa-edit"></i>' +
'</a>')
def audio_play(self):
vn = self.ViewName()
return Markup(
'<audio controls>' +
'<source src="' + url_for(vn) + '" type="audio/mpeg"'> +'<i class="fa fa-volume-up"></i>' +
'Your browser does not support the audio element.' +
'</audio>'
)
# edit_form_extra_fields = {'field2': TextField('field2',
# widget=BS3TextFieldROWidget())}
id = Column(Integer, primary_key=True, autoincrement=True)
juvenile = Column(Boolean)
gender = Column(ForeignKey(u'gender.id'), nullable=False, index=True)
cell = Column(ForeignKey(u'cell.id'), nullable=False, index=True)
cell1 = relationship(u'Cell', primaryjoin='Defendant.cell == Cell.id', backref=u'defendants')
gender1 = relationship(u'Gender', primaryjoin='Defendant.gender == Gender.id', backref=u'defendants')
Medevent = relationship(u'Medevent', secondary='defendant_medevent', backref=u'defendants')
hearing = relationship(u'Hearing', secondary='defendant_hearing', backref=u'defendants')
defendant_hearing = Table(
'defendant_hearing', Model.metadata,
Column('defendant', ForeignKey(u'defendant.id'), primary_key=True, nullable=False),
Column('hearing', ForeignKey(u'hearing.id'), primary_key=True, nullable=False, index=True)
)
defendant_medevent = Table(
'defendant_medevent', Model.metadata,
Column('defendant', ForeignKey(u'defendant.id'), primary_key=True, nullable=False),
Column('medevent', ForeignKey(u'Medevent.id'), primary_key=True, nullable=False, index=True)
)
class Discipline(ActivityMixin, AuditMixin, Model):
__tablename__ = 'discipline'
id = Column(Integer, primary_key=True, autoincrement=True)
defendant = Column(ForeignKey(u'defendant.id'), nullable=False, index=True)
defendant1 = relationship(u'Defendant', primaryjoin='Discipline.defendant == Defendant.id', backref=u'disciplines')
class Doctemplate(DocMixin, RefTypeMixin, AuditMixin, Model):
__tablename__ = 'doctemplate'
id = Column(Integer, primary_key=True, autoincrement=True)
class Document(DocMixin, AuditMixin, Model):
__tablename__ = 'document'
id = Column(Integer, primary_key=True, autoincrement=True)
doc_template = Column(ForeignKey(u'doctemplate.id'), index=True)
confidential = Column(Boolean)
pagecount = Column(Integer)
filing = Column(ForeignKey(u'filing.id'), nullable=False, index=True)
locked = Column(Boolean)
doctemplate = relationship(u'Doctemplate', primaryjoin='Document.doc_template == Doctemplate.id', backref=u'documents')
filing1 = relationship(u'Filing', primaryjoin='Document.filing == Filing.id', backref=u'documents')
class Filing(AuditMixin, Model):
__tablename__ = 'filing'
id = Column(Integer, primary_key=True, autoincrement=True)
uploaddate = Column(DateTime)
pagecount = Column(Integer)
totalfees = Column(Numeric(12, 2), nullable=False)
filing_attorney = Column(ForeignKey(u'Lawyers.id'), nullable=False, index=True)
filing_prosecutor = Column(ForeignKey(u'prosecutor.id'), nullable=False, index=True)
assessedfees = Column(Numeric(12, 2))
receiptverified = Column(Boolean)
amountpaid = Column(Numeric(12, 2))
feebalance = Column(Numeric(12, 2))
paymenthistory = Column(Text, nullable=False)
case = Column(ForeignKey(u'case.id'), nullable=False, index=True)
urgent = Column(Boolean)
urgentreason = Column(Text, nullable=False)
case1 = relationship(u'Case', primaryjoin='Filing.case == Case.id', backref=u'filings')
Lawyer = relationship(u'Lawyer', primaryjoin='Filing.filing_attorney == Lawyer.id', backref=u'filings')
prosecutor = relationship(u'Prosecutor', primaryjoin='Filing.filing_prosecutor == Prosecutor.id', backref=u'filings')
filingtype = relationship(u'Filingtype', secondary='filing_filingtype', backref=u'filings')
payment = relationship(u'Payment', secondary='filing_payment', backref=u'filings')
filing_filingtype = Table(
'filing_filingtype', Model.metadata,
Column('filing', ForeignKey(u'filing.id'), primary_key=True, nullable=False),
Column('filingtype', ForeignKey(u'filingtype.id'), primary_key=True, nullable=False, index=True)
)
filing_payment = Table(
'filing_payment', Model.metadata,
Column('filing', ForeignKey(u'filing.id'), primary_key=True, nullable=False),
Column('payment', ForeignKey(u'payment.id'), primary_key=True, nullable=False, index=True)
)
class Filingtype(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'filingtype'
id = Column(Integer, primary_key=True, autoincrement=True)
fees = Column(Numeric(12, 2))
perpagecost = Column(Numeric(12, 2))
paid_per_page = Column(Boolean)
class Gender(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'gender'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(20), nullable=False, unique=True)
class Hearing(ActivityMixin, AuditMixin, Model):
__tablename__ = 'hearing'
id = Column(Integer, primary_key=True, autoincrement=True)
hearingdate = Column(DateTime, nullable=False)
adjourned = Column(Boolean)
case = Column(ForeignKey(u'case.id'), nullable=False, index=True)
court = Column(ForeignKey(u'court.id'), nullable=False, index=True)
remandwarrant = Column(Text)
hearing_type = Column(ForeignKey(u'hearingtype.id'), nullable=False, index=True)
remanddays = Column(Integer)
remanddate = Column(Date)
remandwarrantexpirydate = Column(Date)
nexthearingdate = Column(Date)
finalhearing = Column(Boolean, nullable=False)
transcript = Column(Text)
audio = Column(LargeBinary)
video = Column(LargeBinary)
case1 = relationship(u'Case', primaryjoin='Hearing.case == Case.id', backref=u'hearings')
court1 = relationship(u'Court', primaryjoin='Hearing.court == Court.id', backref=u'hearings')
hearingtype = relationship(u'Hearingtype', primaryjoin='Hearing.hearing_type == Hearingtype.id', backref=u'hearings')
prosecutor = relationship(u'Prosecutor', secondary='hearing_prosecutor', backref=u'hearings')
policeofficer = relationship(u'Policeofficer', secondary='hearing_policeofficer', backref=u'hearings')
witness = relationship(u'Witnes', secondary='hearing_witness', backref=u'hearings')
Lawyers = relationship(u'Lawyer', secondary='hearing_lawyer', backref=u'hearings')
judicialofficer = relationship(u'Judicialofficer', secondary='hearing_judicialofficer', backref=u'hearings')
hearing_judicialofficer = Table(
'hearing_judicialofficer', Model.metadata,
Column('hearing', ForeignKey(u'hearing.id'), primary_key=True, nullable=False),
Column('judicialofficer', ForeignKey(u'judicialofficer.id'), primary_key=True, nullable=False, index=True)
)
hearing_lawyer = Table(
'hearing_lawyer', Model.metadata,
Column('hearing', ForeignKey(u'hearing.id'), primary_key=True, nullable=False),
Column('lawyer', ForeignKey(u'Lawyers.id'), primary_key=True, nullable=False, index=True)
)
hearing_policeofficer = Table(
'hearing_policeofficer', Model.metadata,
Column('hearing', ForeignKey(u'hearing.id'), primary_key=True, nullable=False),
Column('policeofficer', ForeignKey(u'policeofficer.id'), primary_key=True, nullable=False, index=True)
)
hearing_prosecutor = Table(
'hearing_prosecutor', Model.metadata,
Column('hearing', ForeignKey(u'hearing.id'), primary_key=True, nullable=False),
Column('prosecutor', ForeignKey(u'prosecutor.id'), primary_key=True, nullable=False, index=True)
)
hearing_witness = Table(
'hearing_witness', Model.metadata,
Column('hearing', ForeignKey(u'hearing.id'), primary_key=True, nullable=False),
Column('witness', ForeignKey(u'witness.id'), primary_key=True, nullable=False, index=True)
)
class Hearingtype(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'hearingtype'
id = Column(Integer, primary_key=True, autoincrement=True)
class Investigation(PlaceMixin, AuditMixin, Model):
__tablename__ = 'investigation'
id = Column(Integer, primary_key=True, autoincrement=True)
case = Column(ForeignKey(u'case.id'), nullable=False, index=True)
actiondate = Column(DateTime, nullable=False)
evidence = Column(Text, nullable=False)
narrative = Column(Text, nullable=False)
weather = Column(Text, nullable=False)
location = Column(Text, nullable=False)
case1 = relationship(u'Case', primaryjoin='Investigation.case == Case.id', backref=u'investigations')
policeofficer = relationship(u'Policeofficer', secondary='investigation_policeofficer', backref=u'investigations')
witness = relationship(u'Witnes', secondary='investigation_witness', backref=u'investigations')
investigation_policeofficer = Table(
'investigation_policeofficer', Model.metadata,
Column('investigation', ForeignKey(u'investigation.id'), primary_key=True, nullable=False),
Column('policeofficer', ForeignKey(u'policeofficer.id'), primary_key=True, nullable=False, index=True)
)
investigation_witness = Table(
'investigation_witness', Model.metadata,
Column('investigation', ForeignKey(u'investigation.id'), primary_key=True, nullable=False),
Column('witness', ForeignKey(u'witness.id'), primary_key=True, nullable=False, index=True)
)
class JoRank(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'jo_rank'
id = Column(Integer, primary_key=True, autoincrement=True)
appelation = Column(Text, nullable=False)
informaladdress = Column(Text, nullable=False)
class Judicialofficer(PersonMixin, ContactMixin, AuditMixin, Model):
__tablename__ = 'judicialofficer'
def ViewName(self):
return self.__class__.__name__ +'View'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn+'.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn+'.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def print_button(self):
vn = self.ViewName()
#pdf = render_pdf(url_for(vn, pk=str(self.id)))
#pdf = pdfkit.from_string(url_for(vn, pk=str(self.id)))
#response = make_response(pdf)
#response.headers['Content-Type'] = 'application/pdf'
#response.headers['Content-Disposition'] = 'inline; filename=output.pdf'
return Markup(
'<a href="' + url_for(vn) + '" class="btn btn-sm btn-primary" data-toggle="tooltip" rel="tooltip"'+
'title="Print">' +
'<i class="fa fa-edit"></i>' +
'</a>')
def audio_play(self):
vn = self.ViewName()
return Markup(
'<audio controls>' +
'<source src="' + url_for(vn) + '" type="audio/mpeg"'> +'<i class="fa fa-volume-up"></i>' +
'Your browser does not support the audio element.' +
'</audio>'
)
# edit_form_extra_fields = {'field2': TextField('field2',
# widget=BS3TextFieldROWidget())}
id = Column(Integer, primary_key=True, autoincrement=True)
j_o__rank = Column(ForeignKey(u'jo_rank.id'), nullable=False, index=True)
gender = Column(ForeignKey(u'gender.id'), index=True)
court = Column(ForeignKey(u'court.id'), nullable=False, index=True)
court1 = relationship(u'Court', primaryjoin='Judicialofficer.court == Court.id', backref=u'judicialofficers')
gender1 = relationship(u'Gender', primaryjoin='Judicialofficer.gender == Gender.id', backref=u'judicialofficers')
jo_rank = relationship(u'JoRank', primaryjoin='Judicialofficer.j_o__rank == JoRank.id', backref=u'judicialofficers')
class Lawfirm(PlaceMixin, RefTypeMixin, AuditMixin, Model):
__tablename__ = 'lawfirm'
id = Column(Integer, primary_key=True, autoincrement=True)
class Natureofsuit(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'natureofsuit'
id = Column(Integer, primary_key=True, autoincrement=True)
class Payment(AuditMixin, Model):
__tablename__ = 'payment'
id = Column(Integer, primary_key=True, autoincrement=True)
amountpaid = Column(Numeric(12, 2))
datepaid = Column(DateTime)
paymentreference = Column(String(80), nullable=False)
paymentconfirmed = Column(Boolean)
paidby = Column(Text, nullable=False)
msisdn = Column(Text)
receiptnumber = Column(String(100), nullable=False)
ispartial = Column(Boolean)
bail = Column(ForeignKey(u'bail.id'), index=True)
billrefnumber = Column(Text, nullable=False)
payment_method = Column(ForeignKey(u'paymentmethod.id'), nullable=False, index=True)
paymentdescription = Column(Text, nullable=False)
case = Column(ForeignKey(u'case.id'), nullable=False, index=True)
bail1 = relationship(u'Bail', primaryjoin='Payment.bail == Bail.id', backref=u'payments')
case1 = relationship(u'Case', primaryjoin='Payment.case == Case.id', backref=u'payments')
paymentmethod = relationship(u'Paymentmethod', primaryjoin='Payment.payment_method == Paymentmethod.id', backref=u'payments')
class Paymentmethod(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'paymentmethod'
id = Column(Integer, primary_key=True, autoincrement=True)
key = Column(Text, nullable=False)
secret = Column(Text, nullable=False)
portal = Column(Text, nullable=False)
tillnumber = Column(Text, nullable=False)
shortcode = Column(Text, nullable=False)
class Plaintiff(PersonMixin, ContactMixin, AuditMixin, Model):
__tablename__ = 'plaintiff'
def ViewName(self):
return self.__class__.__name__ +'View'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn+'.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn+'.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def print_button(self):
vn = self.ViewName()
#pdf = render_pdf(url_for(vn, pk=str(self.id)))
#pdf = pdfkit.from_string(url_for(vn, pk=str(self.id)))
#response = make_response(pdf)
#response.headers['Content-Type'] = 'application/pdf'
#response.headers['Content-Disposition'] = 'inline; filename=output.pdf'
return Markup(
'<a href="' + url_for(vn) + '" class="btn btn-sm btn-primary" data-toggle="tooltip" rel="tooltip"'+
'title="Print">' +
'<i class="fa fa-edit"></i>' +
'</a>')
def audio_play(self):
vn = self.ViewName()
return Markup(
'<audio controls>' +
'<source src="' + url_for(vn) + '" type="audio/mpeg"'> +'<i class="fa fa-volume-up"></i>' +
'Your browser does not support the audio element.' +
'</audio>'
)
# edit_form_extra_fields = {'field2': TextField('field2',
# widget=BS3TextFieldROWidget())}
id = Column(Integer, primary_key=True, autoincrement=True)
gender = Column(ForeignKey(u'gender.id'), index=True)
juvenile = Column(Boolean)
gender1 = relationship(u'Gender', primaryjoin='Plaintiff.gender == Gender.id', backref=u'plaintiffs')
class Policeofficer(PersonMixin, ContactMixin, AuditMixin, Model):
__tablename__ = 'policeofficer'
def ViewName(self):
return self.__class__.__name__ +'View'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn+'.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn+'.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def print_button(self):
vn = self.ViewName()
#pdf = render_pdf(url_for(vn, pk=str(self.id)))
#pdf = pdfkit.from_string(url_for(vn, pk=str(self.id)))
#response = make_response(pdf)
#response.headers['Content-Type'] = 'application/pdf'
#response.headers['Content-Disposition'] = 'inline; filename=output.pdf'
return Markup(
'<a href="' + url_for(vn) + '" class="btn btn-sm btn-primary" data-toggle="tooltip" rel="tooltip"'+
'title="Print">' +
'<i class="fa fa-edit"></i>' +
'</a>')
def audio_play(self):
vn = self.ViewName()
return Markup(
'<audio controls>' +
'<source src="' + url_for(vn) + '" type="audio/mpeg"'> +'<i class="fa fa-volume-up"></i>' +
'Your browser does not support the audio element.' +
'</audio>'
)
# edit_form_extra_fields = {'field2': TextField('field2',
# widget=BS3TextFieldROWidget())}
id = Column(Integer, primary_key=True, autoincrement=True)
police_rank = Column(ForeignKey(u'policerank.id'), nullable=False, index=True)
gender = Column(ForeignKey(u'gender.id'), nullable=False, index=True)
servicenumber = Column(Text)
gender1 = relationship(u'Gender', primaryjoin='Policeofficer.gender == Gender.id', backref=u'policeofficers')
policerank = relationship(u'Policerank', primaryjoin='Policeofficer.police_rank == Policerank.id', backref=u'policeofficers')
policerole = relationship(u'Policerole', secondary='policeofficer_policerole', backref=u'policeofficers')
policeofficer_policerole = Table(
'policeofficer_policerole', Model.metadata,
Column('policeofficer', ForeignKey(u'policeofficer.id'), primary_key=True, nullable=False),
Column('policerole', ForeignKey(u'policerole.id'), primary_key=True, nullable=False, index=True)
)
class Policerank(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'policerank'
id = Column(Integer, primary_key=True, autoincrement=True)
class Policerole(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'policerole'
id = Column(Integer, primary_key=True, autoincrement=True)
class Policestation(PlaceMixin, AuditMixin, Model):
__tablename__ = 'policestation'
id = Column(Integer, primary_key=True, autoincrement=True)
town = Column(ForeignKey(u'town.id'), nullable=False, index=True)
officercommanding = Column(String(100))
police_station_type = Column(ForeignKey(u'policestationtype.id'), nullable=False, index=True)
policestationtype = relationship(u'Policestationtype', primaryjoin='Policestation.police_station_type == Policestationtype.id', backref=u'policestations')
town1 = relationship(u'Town', primaryjoin='Policestation.town == Town.id', backref=u'policestations')
class Policestationtype(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'policestationtype'
id = Column(Integer, primary_key=True, autoincrement=True)
class Prison(PlaceMixin, AuditMixin, Model):
__tablename__ = 'prison'
id = Column(Integer, primary_key=True, autoincrement=True)
town = Column(ForeignKey(u'town.id'), nullable=False, index=True)
warden = Column(String(100))
capacity = Column(Integer)
population = Column(Integer)
cellcount = Column(Integer)
town1 = relationship(u'Town', primaryjoin='Prison.town == Town.id', backref=u'prisons')
securityrank = relationship(u'Securityrank', secondary='prison_securityrank', backref=u'prisons')
prison_securityrank = Table(
'prison_securityrank', Model.metadata,
Column('prison', ForeignKey(u'prison.id'), primary_key=True, nullable=False),
Column('securityrank', ForeignKey(u'securityrank.id'), primary_key=True, nullable=False, index=True)
)
class Prisoncommital(ActivityMixin, AuditMixin, Model):
__tablename__ = 'prisoncommital'
prison = Column(ForeignKey(u'prison.id'), primary_key=True, nullable=False)
warrantno = Column(String(100), primary_key=True, nullable=False)
defendant = Column(ForeignKey(u'defendant.id'), nullable=False, index=True)
hearing = Column(ForeignKey(u'hearing.id'), nullable=False, index=True)
warrantduration = Column(Integer, nullable=False)
warrantdate = Column(DateTime)
warrant = Column(Text, nullable=False)
warrantexpiry = Column(DateTime, nullable=False)
history = Column(Text, nullable=False)
earliestrelease = Column(Date)
releasedate = Column(DateTime)
property = Column(Text)
itemcount = Column(Integer)
releasenotes = Column(Text)
commitalnotes = Column(Text)
police_officer_commiting = Column(ForeignKey(u'policeofficer.id'), nullable=False, index=True)
defendant1 = relationship(u'Defendant', primaryjoin='Prisoncommital.defendant == Defendant.id', backref=u'prisoncommitals')
hearing1 = relationship(u'Hearing', primaryjoin='Prisoncommital.hearing == Hearing.id', backref=u'prisoncommitals')
policeofficer = relationship(u'Policeofficer', primaryjoin='Prisoncommital.police_officer_commiting == Policeofficer.id', backref=u'prisoncommitals')
prison1 = relationship(u'Prison', primaryjoin='Prisoncommital.prison == Prison.id', backref=u'prisoncommitals')
warder = relationship(u'Warder', secondary='prisoncommital_warder', backref=u'prisoncommitals')
prisoncommital_warder = Table(
'prisoncommital_warder', Model.metadata,
Column('prisoncommital_prison', Integer, primary_key=True, nullable=False),
Column('prisoncommital_warrantno', String(100), primary_key=True, nullable=False),
Column('warder', ForeignKey(u'warder.id'), primary_key=True, nullable=False, index=True),
ForeignKeyConstraint(['prisoncommital_prison', 'prisoncommital_warrantno'], [u'prisoncommital.prison', u'prisoncommital.warrantno'])
)
class Prosecutor(PersonMixin, ContactMixin, AuditMixin, Model):
__tablename__ = 'prosecutor'
def ViewName(self):
return self.__class__.__name__ +'View'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn+'.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn+'.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def print_button(self):
vn = self.ViewName()
#pdf = render_pdf(url_for(vn, pk=str(self.id)))
#pdf = pdfkit.from_string(url_for(vn, pk=str(self.id)))
#response = make_response(pdf)
#response.headers['Content-Type'] = 'application/pdf'
#response.headers['Content-Disposition'] = 'inline; filename=output.pdf'
return Markup(
'<a href="' + url_for(vn) + '" class="btn btn-sm btn-primary" data-toggle="tooltip" rel="tooltip"'+
'title="Print">' +
'<i class="fa fa-edit"></i>' +
'</a>')
def audio_play(self):
vn = self.ViewName()
return Markup(
'<audio controls>' +
'<source src="' + url_for(vn) + '" type="audio/mpeg"'> +'<i class="fa fa-volume-up"></i>' +
'Your browser does not support the audio element.' +
'</audio>'
)
# edit_form_extra_fields = {'field2': TextField('field2',
# widget=BS3TextFieldROWidget())}
id = Column(Integer, primary_key=True, autoincrement=True)
gender = Column(ForeignKey(u'gender.id'), index=True)
gender1 = relationship(u'Gender', primaryjoin='Prosecutor.gender == Gender.id', backref=u'prosecutors')
prosecutorteam = relationship(u'Prosecutorteam', secondary='prosecutor_prosecutorteam', backref=u'prosecutors')
prosecutor_prosecutorteam = Table(
'prosecutor_prosecutorteam', Model.metadata,
Column('prosecutor', ForeignKey(u'prosecutor.id'), primary_key=True, nullable=False),
Column('prosecutorteam', ForeignKey(u'prosecutorteam.id'), primary_key=True, nullable=False, index=True)
)
class Prosecutorteam(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'prosecutorteam'
id = Column(Integer, primary_key=True, autoincrement=True)
class Securityrank(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'securityrank'
id = Column(Integer, primary_key=True, autoincrement=True)
class Subcounty(AuditMixin, Model):
__tablename__ = 'subcounty'
id = Column(Integer, primary_key=True, autoincrement=True)
county = Column(ForeignKey(u'county.id'), nullable=False, index=True)
county1 = relationship(u'County', primaryjoin='Subcounty.county == County.id', backref=u'subcounties')
class Surety(PersonMixin, ContactMixin, AuditMixin, Model):
__tablename__ = 'surety'
def ViewName(self):
return self.__class__.__name__ +'View'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn+'.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn+'.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def print_button(self):
vn = self.ViewName()
#pdf = render_pdf(url_for(vn, pk=str(self.id)))
#pdf = pdfkit.from_string(url_for(vn, pk=str(self.id)))
#response = make_response(pdf)
#response.headers['Content-Type'] = 'application/pdf'
#response.headers['Content-Disposition'] = 'inline; filename=output.pdf'
return Markup(
'<a href="' + url_for(vn) + '" class="btn btn-sm btn-primary" data-toggle="tooltip" rel="tooltip"'+
'title="Print">' +
'<i class="fa fa-edit"></i>' +
'</a>')
def audio_play(self):
vn = self.ViewName()
return Markup(
'<audio controls>' +
'<source src="' + url_for(vn) + '" type="audio/mpeg"'> +'<i class="fa fa-volume-up"></i>' +
'Your browser does not support the audio element.' +
'</audio>'
)
# edit_form_extra_fields = {'field2': TextField('field2',
# widget=BS3TextFieldROWidget())}
id = Column(Integer, primary_key=True, autoincrement=True)
gender = Column(ForeignKey(u'gender.id'), index=True)
gender1 = relationship(u'Gender', primaryjoin='Surety.gender == Gender.id', backref=u'sureties')
class Town(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'town'
id = Column(Integer, primary_key=True, autoincrement=True)
subcounty = Column(ForeignKey(u'subcounty.id'), nullable=False, index=True)
subcounty1 = relationship(u'Subcounty', primaryjoin='Town.subcounty == Subcounty.id', backref=u'towns')
class Visit(ActivityMixin, AuditMixin, Model):
__tablename__ = 'visit'
vistors = Column(ForeignKey(u'vistor.id'), primary_key=True, nullable=False)
defendants = Column(ForeignKey(u'defendant.id'), primary_key=True, nullable=False, index=True)
visitdate = Column(DateTime)
visitnotes = Column(Text)
visitduration = Column(INTERVAL)
defendant = relationship(u'Defendant', primaryjoin='Visit.defendants == Defendant.id', backref=u'visits')
vistor = relationship(u'Vistor', primaryjoin='Visit.vistors == Vistor.id', backref=u'visits')
class Vistor(AuditMixin, Model):
__tablename__ = 'vistor'
id = Column(Integer, primary_key=True, autoincrement=True)
gender = Column(ForeignKey(u'gender.id'), nullable=False, index=True)
gender1 = relationship(u'Gender', primaryjoin='Vistor.gender == Gender.id', backref=u'vistors')
class Warder(PersonMixin, ContactMixin, AuditMixin, Model):
__tablename__ = 'warder'
def ViewName(self):
return self.__class__.__name__ +'View'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn+'.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn+'.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def print_button(self):
vn = self.ViewName()
#pdf = render_pdf(url_for(vn, pk=str(self.id)))
#pdf = pdfkit.from_string(url_for(vn, pk=str(self.id)))
#response = make_response(pdf)
#response.headers['Content-Type'] = 'application/pdf'
#response.headers['Content-Disposition'] = 'inline; filename=output.pdf'
return Markup(
'<a href="' + url_for(vn) + '" class="btn btn-sm btn-primary" data-toggle="tooltip" rel="tooltip"'+
'title="Print">' +
'<i class="fa fa-edit"></i>' +
'</a>')
def audio_play(self):
vn = self.ViewName()
return Markup(
'<audio controls>' +
'<source src="' + url_for(vn) + '" type="audio/mpeg"'> +'<i class="fa fa-volume-up"></i>' +
'Your browser does not support the audio element.' +
'</audio>'
)
# edit_form_extra_fields = {'field2': TextField('field2',
# widget=BS3TextFieldROWidget())}
id = Column(Integer, primary_key=True, autoincrement=True)
prison = Column(ForeignKey(u'prison.id'), nullable=False, index=True)
warder_rank = Column(ForeignKey(u'warderrank.id'), nullable=False, index=True)
prison1 = relationship(u'Prison', primaryjoin='Warder.prison == Prison.id', backref=u'warders')
warderrank = relationship(u'Warderrank', primaryjoin='Warder.warder_rank == Warderrank.id', backref=u'warders')
class Warderrank(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'warderrank'
id = Column(Integer, primary_key=True, autoincrement=True)
class Witnes(PersonMixin, ContactMixin, AuditMixin, Model):
__tablename__ = 'witness'
def ViewName(self):
return self.__class__.__name__ +'View'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn+'.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn+'.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def print_button(self):
vn = self.ViewName()
#pdf = render_pdf(url_for(vn, pk=str(self.id)))
#pdf = pdfkit.from_string(url_for(vn, pk=str(self.id)))
#response = make_response(pdf)
#response.headers['Content-Type'] = 'application/pdf'
#response.headers['Content-Disposition'] = 'inline; filename=output.pdf'
return Markup(
'<a href="' + url_for(vn) + '" class="btn btn-sm btn-primary" data-toggle="tooltip" rel="tooltip"'+
'title="Print">' +
'<i class="fa fa-edit"></i>' +
'</a>')
def audio_play(self):
vn = self.ViewName()
return Markup(
'<audio controls>' +
'<source src="' + url_for(vn) + '" type="audio/mpeg"'> +'<i class="fa fa-volume-up"></i>' +
'Your browser does not support the audio element.' +
'</audio>'
)
# edit_form_extra_fields = {'field2': TextField('field2',
# widget=BS3TextFieldROWidget())}
id = Column(Integer, primary_key=True, autoincrement=True)
fordefense = Column(Boolean)
gender = Column(ForeignKey(u'gender.id'), nullable=False, index=True)
gender1 = relationship(u'Gender', primaryjoin='Witnes.gender == Gender.id', backref=u'witness')
| [
"[email protected]"
] | |
6a3989acf13637b1fe0ed5d2c37313685f921920 | 3043ef47d3863bb59fe91c7f76c789dfd375620a | /mysql-from-python13.py | 8a77946f304e4bd275570d1b14f36804262c6753 | [] | no_license | mlr314159/mysql1 | 73997fbd0759a1032570b31537baefde59822921 | 382659edc40ccfd04a7a2a42fc26beaa68364d21 | refs/heads/master | 2021-06-12T16:43:15.794554 | 2020-04-13T12:42:59 | 2020-04-13T12:42:59 | 254,359,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | import os
import pymysql
# get username
# (modify this variable if runn ing on a different environment)
username = os.getenv('C9_USER')
connection = pymysql.connect(host='localhost',
user=username,
password='',
db='Chinook')
try:
# Run a query
with connection.cursor() as cursor:
row = cursor.executemany('DELETE FROM Friends WHERE name= %s;', ['bob','Jim'])
connection.commit()
finally:
# Close the connection. regardless of whether the above was successfull
connection.close() | [
"[email protected]"
] | |
bee4204a5db0269f46a9888c9764dad47764fa6f | f09a017e6af77eed1e604db1c4396e762162f3f5 | /democoder/apps/core/views.py | 0f072fdbc30191031d69f28890bfbd51443113c5 | [] | no_license | yrik/democoder | de710fb524053f4160d82ca1d37ccbd2e813e708 | 43a4104498b2ab3ac3ef681c802b01d624663d6f | refs/heads/master | 2018-12-29T17:06:59.532604 | 2015-03-14T16:37:57 | 2015-03-14T16:37:57 | 38,677,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,390 | py | from django.shortcuts import redirect, render
from django.utils.translation import ugettext as _
from django.http import Http404
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.views.generic import (
DetailView, CreateView, UpdateView, DeleteView, ListView
)
from pure_pagination.mixins import PaginationMixin
from braces.views import OrderableListMixin
from enhanced_cbv.views import ListFilteredView
from allauth.account.utils import complete_signup
from .decorators import ForbiddenUser
from .forms import LoginForm
from .forms import SignUpUserForm
from core.models import (
Article,
User,
)
from core.forms import (
ArticleCreateForm,
)
from core.filters import (
ArticleListViewFilter,
UserListViewFilter,
)
@ForbiddenUser(forbidden_usertypes=[u'AnonymousUser'])
class ArticleCreateView(CreateView):
"""Article create view"""
model = Article
form_class = ArticleCreateForm
template_name = "article-create.html"
def get_success_url(self):
messages.success(self.request, _("Article succesfully created"))
return reverse("article-list", args=[])
class ArticleListView(OrderableListMixin, ListFilteredView, PaginationMixin):
"""Article list view"""
model = Article
template_name = "article-list.html"
paginate_by = 10
orderable_columns = ["description", "title", ]
orderable_columns_default = "id"
filter_set = ArticleListViewFilter
def auth(request):
login_form = LoginForm()
signup_form_user = SignUpUserForm(prefix="user", request=request)
redirect_url = '/'
redirect_url = request.GET.get('next', redirect_url)
if request.method == 'POST' and 'login_form' in request.POST:
login_form = LoginForm(request.POST)
if login_form.is_valid():
return login_form.login(request, redirect_url=redirect_url)
if request.method == 'POST' and 'signup_user_form' in request.POST:
signup_form_user = SignUpUserForm(
request.POST,
prefix="user",
request=request)
if signup_form_user.is_valid():
user = signup_form_user.save(request)
return complete_signup(request, user, False, redirect_url)
return render(request, "auth.html", {
"login_form": login_form,
"signup_form_user": signup_form_user,
})
| [
"[email protected]"
] | |
0803fc25829844f46b023753be5543d2e6a87e9c | d25cbb984047591a539c63c2de18f9820f724ee2 | /P003_Bucle_008For Contador pares impares.py | fac201ab65f8dd3044fad3abf42164563904feab | [] | no_license | jesuscm18/1-Evaluacion | 8149c00e99f8e4878d6c082799d79a7c5d5d62ca | 01c5106c643074ef4e36732295d4ebfd8c8a2c6c | refs/heads/master | 2020-04-08T05:55:35.314176 | 2018-12-19T15:57:02 | 2018-12-19T15:57:02 | 148,601,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | def contador_par_impar():
n=input("Hasta que numero deseas contar? ")
npares=0
nimpares=0
for n in range(1,n+1):
if(n%2==0):
print str(n)," es par"
npares=npares+1
else:
print str(n)," es impar"
nimpares=nimpares+1
print "He contado ",npares, "numeros pares."
print "He contado ",nimpares, " numeros impares."
contador_par_impar()
| [
"[email protected]"
] | |
f1f693729c6bf591f84820488f8cb11648fbac54 | bb943b09e60f219b27059171aaec9e0bb228b462 | /test.py | 9dcd730761acc7f4879e41a6eb0c06085ead955f | [] | no_license | Kienyew/Russian-Chinese-Vocabulary | 0fb7869da681a530f6b3d6275d460346a10a1a8d | 39a93e53d55937dbdeb53cd100561f495130b4a4 | refs/heads/master | 2022-11-30T10:25:59.458845 | 2020-07-16T07:44:10 | 2020-07-16T07:44:19 | 279,328,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,177 | py | #!/usr/bin/env python3
import unittest
from extract import *
class Test(unittest.TestCase):
def test_separate_ru_zh_chap(self):
inputs = [
'а而(1)',
'а而(1)',
'автомобиль汽车(18)',
'алло喂!(打电话时招呼用语)(7)',
'болеть[未]疼,痛(13)',
'артист[阳],артистка[阴]演员(13)',
'аудитория(大学)教室(6)',
'бегать[未,不定向]跑(18)',
'наступать//наступить(某种时间)来临,到来(17)',
'очередь (ж.) 次序;队列 (5)',
'покойный (сущ.) 亡者,死者 (12)',
'поехать[完](乘车、船等)前往,出发(15)',
'пятёрка数字5;(学校成绩)五分(12)'
]
expects = [
('а', '而', '(1)'),
('а', '而', '(1)'),
('автомобиль', '汽车', '(18)'),
('алло', '喂!(打电话时招呼用语)', '(7)'),
('болеть[未]', '疼,痛', '(13)'),
('артист[阳],артистка[阴]', '演员', '(13)'),
('аудитория', '(大学)教室', '(6)'),
('бегать[未,不定向]', '跑', '(18)'),
('наступать//наступить', '(某种时间)来临,到来', '(17)'),
('очередь (ж.)', '次序;队列', '(5)'),
('покойный (сущ.)', '亡者,死者', '(12)'),
('поехать[完]', '(乘车、船等)前往,出发', '(15)'),
('пятёрка', '数字5;(学校成绩)五分', '(12)'),
]
for input, expect in zip(inputs, expects):
self.assertEqual(separate_ru_zh_chap(input), expect)
def test_parse_ru_entry(self):
# RuEntry(word, annotations, gender, accent_index)
inputs = [
'а',
'автомобиль',
'алло',
'болеть[未]',
'артист[阳]',
'бегать[未,不定向]',
'очередь (ж.)',
'покойный (сущ.)',
'эмоциона´льно (нареч.)',
]
expects = [
RuEntry('а', [], None, None),
RuEntry('автомобиль', [], None, None),
RuEntry('алло', [], None, None),
RuEntry('болеть', ['[未]'], None, None),
RuEntry('артист', ['[阳]'], None, None),
RuEntry('бегать', ['[未,不定向]'], None, None),
RuEntry('очередь', [], FEMALE, None),
RuEntry('покойный', ['(сущ.)'], None, None),
RuEntry('эмоционально', ['(нареч.)'], None, 7),
]
for input, expect in zip(inputs, expects):
self.assertEqual(parse_ru_entry(input), expect)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
4078a1a4d378355f5f8216e7f2bfad9dd57aa9f5 | 10851762fcfedbe79a9f4f4374fb68085c05e06e | /homepage/admin.py | a6f5d2bed93aabd26fa7c5c61faddddf4b3afbcf | [] | no_license | epetrov1/MG-Ausbau-GmbH | b676c02310e7fe04b25d4cf18f3d8b9e9c21a274 | 083d224fce7af7353badeeadbf50c9f3bb2809a8 | refs/heads/main | 2023-05-13T14:28:12.466219 | 2021-06-04T15:13:12 | 2021-06-04T15:13:12 | 371,602,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | from django.contrib import admin
#admin.site.register(HomePage)
from django_summernote.admin import SummernoteModelAdmin
from .models import HomePage
class HomeAboutAdmin(SummernoteModelAdmin):
exclude = ('preporyka1', 'preporyka2', 'preporyka3',)
list_display = ('header',)
summernote_fields = ('about_us', 'about_us_de', 'about_us_en')
admin.site.register(HomePage, HomeAboutAdmin) | [
"[email protected]"
] | |
b9527ab17bba32b89da47f525605d586b5a3b1fb | 65a1c12e31a776cd4d098aac8e2f40a5393cd8b8 | /Code/4.process&predict_data/code_v7.0.py | 7b11fb2ddb0403a61d60a8579fad5ab7d57a4c52 | [] | no_license | YoOjii/Tianchi_DiabetesPredict | 78d7e7fa51ba91711068b49784e7129a23fae48d | 933f95707458dcb154565e2d346753987b13d5ab | refs/heads/master | 2022-01-09T15:43:59.522786 | 2019-05-07T04:50:36 | 2019-05-07T04:50:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,647 | py | # coding=utf-8
'''
Author:chenhao
Date: Jan 26 ,2017
Description: Use the train_Drop_Delete_Log_Poly_Ratio.csv in LGB model
'''
import time
import datetime
import numpy as np
import pandas as pd
import lightgbm as lgb
from dateutil.parser import parse
from sklearn.cross_validation import KFold
from sklearn.metrics import mean_squared_error
from scipy import stats
data_path = 'data/'
train = pd.read_csv(data_path + 'train_Drop_Delete_Log_Poly_Ratio_for_B.csv', encoding='gb2312')
test = pd.read_csv(data_path + 'test_Drop_Delete_Log_Poly_Ratio_for_B.csv', encoding='gb2312')
predictors = [f for f in train.columns if f not in ['血糖']]
#print(train[predictors])
def evalerror(pred, df):
label = df.get_label().values.copy()
score = mean_squared_error(label, pred) * 0.5
return ('0.5mse', score, False)
print('开始训练...')
#原版调参数据
params = {
'learning_rate': 0.01,
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': 'mse',
'sub_feature': 0.7,
'num_leaves': 60,
'colsample_bytree': 0.7,
'feature_fraction': 0.7,
'min_data': 100,
'min_hessian': 1,
'verbose': -1,
}
print('开始CV 5折训练...')
t0 = time.time()
train_preds = np.zeros(train.shape[0])
#产生相应的id数为行数5列全零数据
test_preds = np.zeros((test.shape[0], 5))
kf = KFold(len(train), n_folds=5, shuffle=True, random_state=520)
for i, (train_index, test_index) in enumerate(kf):
print('第{}次训练...'.format(i))
train_feat1 = train.iloc[train_index]
train_feat2 = train.iloc[test_index]
lgb_train1 = lgb.Dataset(train_feat1[predictors], train_feat1['血糖'])
lgb_train2 = lgb.Dataset(train_feat2[predictors], train_feat2['血糖'])
gbm = lgb.train(params,
lgb_train1,
num_boost_round=3000,
valid_sets=lgb_train2,
verbose_eval=100,
feval=evalerror,
early_stopping_rounds=100)
feat_imp = pd.Series(gbm.feature_importance(), index=predictors).sort_values(ascending=False)
train_preds[test_index] += gbm.predict(train_feat2[predictors])
test_preds[:, i] = gbm.predict(test[predictors])
print('线下得分: {}'.format(mean_squared_error(train['血糖'], train_preds) * 0.5))
print('CV训练用时{}秒'.format(time.time() - t0))
submission = pd.DataFrame({'pred': test_preds.mean(axis=1)})
submission.to_csv(r'sub{}.csv'.format(datetime.datetime.now().strftime('%Y%m%d_%H%M%S')), header=None,index=False, float_format='%.3f')
| [
"[email protected]"
] | |
50f086fd6e0a40d5eab3d18fd4d577b2909cf7a7 | 156c52604f36479beae565137f02493cf794912c | /blast_filter_100.py | c58cb44c79c9f875fbb4b42c9ecf12867767e920 | [] | no_license | hildeingvildhummel/NIOZ-ring-formation | ac936855e95abdc5c6f11325c4316853df324a47 | c542eb82d54ff7fd31426e884e3ce27909c132d8 | refs/heads/master | 2022-11-22T07:20:26.930647 | 2020-07-28T14:53:53 | 2020-07-28T14:53:53 | 283,243,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,179 | py | import argparse
ap = argparse.ArgumentParser(description = 'Remove the genes if all the samples have a 100% identity to the reference database.')
ap.add_argument('-b', '--blast', nargs='+', required = True, help = 'The file(s) containing the blast output')
ap.add_argument('-index', '--index', required = True, help = 'The index of the column containing the pIDENT, starting the count with zero. So, the first column is 0, the second column is 1, etc.')
ap.add_argument('-o', '--output', required = True, help = 'The output name of the list containing the IDs to keep. The output is saved as list file')
args, leftovers = ap.parse_known_args()
#Create 2 empty dictionaries
dict = {}
dict_list = {}
#Iterate over the blast output files
for f in args.blast:
#Print the name of the blast output file
print(f)
#Create an empty list
key_list = []
#Open the blast output file in read modus
blast = open(f, 'r')
#Read the Blast output file per line, creating a list
blast = blast.read().splitlines()
#Select the column within the blast file containing the pIDENT
blast_ident = [s.split('\t')[int(args.index)] for s in blast]
#Select the gene ID of PROKKA of the reference database
ID = [s.split('\t')[1] for s in blast]
#Select the gene ID of PROKKA of the query
ID_u = [s.split('\t')[0] for s in blast]
#Iterate over both gene names (reference and query) and the pIDENT
for i, j, k in zip(ID, blast_ident, ID_u):
#Check if the reference ID is already in the empty list
if i in key_list:
#If so, append the ID of the query to the dictionary with the given reference ID as reference
dict_list[i].append(k)
continue
#If the refrence ID is already noted within the second dictionary..
if i in dict:
#Add the given pIDENT to the already existing value of the key given the reference ID
dict[i] = float(dict[i]) + float(j)
#Append the query ID to the dictionary given the reference ID
dict_list[i].append(k)
#Append the reference ID to the list
key_list.append(i)
else:
#If the reference ID is not saved in either the list or the first dictionary..
# Add the pIDENT to the dictionary with the reference ID as key
dict[i] = float(j)
# Add the query ID as a list to the dictionary with the reference ID as key
dict_list[i] = [k]
#Save the reference ID to the list
key_list.append(i)
#Create a new dictionary, only containing the values and keys if the value is unequal to the number of samples * 100
d = { k : v for k,v in dict.items() if v != len(args.blast) * 100}
#Save the query ID values if their corresponding key is present within the filtered dictionary d
ID_list = [dict_list[x] for x in list(d.keys())]
#Flatten the list
flatten_list = [val for sublist in ID_list for val in sublist]
#Save the output
with open(args.output + '.list', 'w') as f:
for item in list(set(flatten_list)):
f.write("%s\n" % item)
| [
"[email protected]"
] | |
447e8064c4c28d908ba8b874cbb244f4b82aa242 | 9fa567346bc31d985f31ba8387d7135a1c8321ba | /Where's Wally.py | 21ab63f2a2c6f41fb48facec57d1bfce4293a897 | [] | no_license | MrPogson/Treasure | 6389c55c82bb2346f906ba441dccc93522077228 | d66b8f37a7fb3a99c8aa22feb36868cf067054ef | refs/heads/master | 2021-01-01T04:12:29.334512 | 2016-04-27T07:04:54 | 2016-04-27T07:04:54 | 57,034,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,616 | py | #import libraries
import random
import time
#Define global variables
global Pos
global grid
global grid2
global score
#set score to 0
score = 0
#set starting position to 30
Pos=30
#Main function
def main():
global grid #make the variable 'grid' available
global grid2#make the variable 'grid2' available
play =input("\nDo you want to 'Play' or 'Quit'? ").upper() #Ask if they want to play
if play == "PLAY": #if they choose to play
grid =["W","W","W","W","O","O","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X"]#set up the grid
random.shuffle(grid)#randomise the positions
grid2=["X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X","X",]#set up the blank grid
grid2.insert(Pos,"P")#put a P in the bottom left corner
movement()#go to function 'movement'
elif play == "QUIT":#if they choose quit
game_over()#go to function
else:#of they put anything else in
print("Error - Please choose 'Play' or 'Quit'")#tell them to try again
main()#go to function
def print_grid_2():#grid with game elements, hidden by default
global grid
print("\n")
matrix=[grid [i:i+6]for i in range (0,len(grid),6)]
for a,b,c,d,e,f in matrix:
print (a,b,c,d,e,f)
def print_grid():# - grid of X's
global grid2
print("\n")
matrix2=[grid2 [j:j+6]for j in range (0,len(grid2),6)]
for a,b,c,d,e,f in matrix2:
print (a,b,c,d,e,f)
def movement():
global grid
global grid2
global Pos
print_grid()
#print_grid_2() #uncomment to show position of game elements
move=input("\nDo you want to go 'Up', 'Down', 'Left' or 'Right'?\n")
move=move.upper()
if move == ("UP"):
qty = int(input("\nHow far up?\n"))
old_p=Pos
Pos=Pos+((qty)*-6)
if qty == 0:
print("\nPlease choose a number between 1-6!")
Pos=old_p
movement()
if Pos <0:
print ("\nThat's too far up! You've gone off the board!\nTry again!")
Pos=old_p
movement()
print("\n#####################\nnew Position =",Pos)
check_score()
grid2[Pos] = "P"
grid2[old_p]="-"
elif move == ("DOWN"):
qty = int(input("\nHow far down?\n"))
old_p=Pos
Pos=Pos+((qty)*6)
if qty == 0:
print("\nPlease choose a number between 1-6!")
Pos=old_p
movement()
if Pos >35:
print ("\nThat's too far up! You've gone off the board!\nTry again!")
Pos=old_p
movement()
print("\nnew Position =",Pos)
check_score()
grid2[Pos] = "P"
grid2[old_p]="-"
elif move == ("RIGHT"):
qty = int(input("\nHow far right?\n"))
old_p=Pos
Pos=Pos+(qty)
if qty == 0:
print("\nPlease choose a number between 1-6!")
Pos=old_p
movement()
print("\n#########################\nnew Position =",Pos)
check_score()
grid2[Pos] = "P"
grid2[old_p]="-"
elif move == ("LEFT"):
qty = int(input("\nHow far left?\n"))
old_p=Pos
Pos=Pos-(qty)
if qty == 0:
print("\nPlease choose a number between 1-6!")
Pos=old_p
movement()
print("\n########################\nnew Position =",Pos)
check_score()
grid2[Pos] = "P"
grid2[old_p]="-"
else:
print ("choose 'Up', 'Down', 'Left' or 'Right'")
movement()
def check_score():
global Pos
global score
global grid
if (grid[Pos])not in ["W","O"]:
print("\nMiss!\n")
print ("Score = ",score,"\n####################")
elif (grid[Pos])=="W":
print("\nYou found A Wally!! 100 points!\n")
score=score+100
print ("Score = ",score,"\n################################")
grid[Pos]="X"
if (grid[Pos])=="O":
print("\nYou hit Oddlaw! You lose!\n")
score=0
print ("Score = ",score,"\n################################")
def game_over():
print ("##############\nGame Over!\nThank you for playing!\n#############\n")
time.sleep(5)
exit()
print ("Welcome!")
main()
| [
"[email protected]"
] | |
dd570b50aacbe9917550509619ad2e260c97469c | 6e4bc4663adf0ed369cd5ca068a10d1ef35eef05 | /mysql-test2.py | 2253ee0278076ddc72ab00de16e0378faef5fce2 | [] | no_license | makgab/Python-examples | b8227612108d9484a3868500adfdaacae74f4436 | 5fb1a5d2b953c6b58bb9e02f94bbe76508a63226 | refs/heads/master | 2020-04-13T05:04:54.979651 | 2018-12-24T10:56:17 | 2018-12-24T10:56:17 | 162,981,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | # mysql demo
import mysql.connector
# conncetion:
mydb = mysql.connector.connect(
host="localhost",
user="root",
passwd="",
database="mysql"
)
print(mydb)
print ("SELECT SQL...")
# SELECT SQL
mycursor = mydb.cursor()
mycursor.execute("SELECT * FROM pythonDB.customers")
myresult = mycursor.fetchall()
for x in myresult:
print(x)
# end
| [
"[email protected]"
] | |
d49fd565d9ed77cf4480dddda0ed1c4da82fceff | 46be20cc55a4e48c0281f6340d7e8d381c1f12cf | /day3/part1_d3_v1.py | 260e2ceb564c247c8eb1bad40468d697ed295213 | [
"MIT"
] | permissive | MarinaPin/adventofcode-2018 | 5c32ff7a7b224d76b2982044b6e7b5cf80a9e85e | 9fd6242258b1546c4858e4da7b6d04453bffe25d | refs/heads/master | 2020-04-09T07:05:28.776280 | 2018-12-14T08:47:08 | 2018-12-14T08:47:08 | 160,140,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 852 | py | import numpy
import re
N = 1000
def extract_info(line):
j = re.findall("\d+", line)
j = map(int, j)
x,y,w,h =j[0], j[1], j[2], j[3], j[4]
return x,y,w,h
def fill_matrix(file_path):
matrix = numpy.zeros((N,N))
with open(file_path) as f:
for line in f:
x,y,w,h = extract_info(line)
for j in range(y,y+h):
for i in range(x, x+w):
if matrix[j][i] == 1 or matrix[j][i] ==8:
matrix[j][i] = 8
else:
matrix[j][i] = 1
return matrix
def count_Xs(matrix):
n = 0
for j in range(N):
for i in range(N):
if matrix[i][j] == 8:
n += 1
return n
matrix = fill_matrix('input.txt')
num_Xs = count_Xs(matrix)
print num_Xs | [
"[email protected]"
] | |
1d71cdd16103283b54ddbfae586dbd58e635dea8 | 7c28640e152dad3843423d04c96a3a37015bd9ba | /Examples/Game Tutorial/Tutorial Part 6.py | 0b571e28753411ea66a52d103d7f671bc5c1d42d | [] | no_license | DocVaughan/Pythonista | 251bbfd69203cf91f3d6a6bf20d478efd74a61a0 | 7d482c7db2c7b4daae10289b765f09a4f348a50c | refs/heads/master | 2021-01-20T20:48:32.603993 | 2017-12-31T10:33:48 | 2017-12-31T10:33:48 | 61,178,643 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,233 | py | # coding: utf-8
'''
Part 6 -- Meteors Incoming! ☄️
Collecting coins is fun, but did you notice the distinct lack of... challenge?
Let's change that now, and add some meteors to the mix. The mechanism is essentially the same as with the coins, but when the alien collides with a meteor, the game is over.
To make the game a bit harder, the speed at which coins and meteors fall to the ground now increases slightly over time.
'''
from scene import *
import sound
import random
A = Action
def cmp(a, b):
return ((a > b) - (a < b))
standing_texture = Texture('plf:AlienGreen_front')
walk_textures = [Texture('plf:AlienGreen_walk1'), Texture('plf:AlienGreen_walk2')]
# ---[1]
# Because the alien can be hit by a meteor, we need one additional texture for the unhappy alien:
hit_texture = Texture('plf:AlienGreen_hit')
class Coin (SpriteNode):
def __init__(self, **kwargs):
SpriteNode.__init__(self, 'plf:Item_CoinGold', **kwargs)
# ---[2]
# As with the coins, we use a custom subclass of SpriteNode to represent the meteors. For some variety, the texture of the meteor is chosen randomly.
class Meteor (SpriteNode):
def __init__(self, **kwargs):
img = random.choice(['spc:MeteorBrownBig1', 'spc:MeteorBrownBig2'])
SpriteNode.__init__(self, img, **kwargs)
class Game (Scene):
def setup(self):
self.background_color = '#004f82'
self.ground = Node(parent=self)
x = 0
while x <= self.size.w + 64:
tile = SpriteNode('plf:Ground_PlanetHalf_mid', position=(x, 0))
self.ground.add_child(tile)
x += 64
self.player = SpriteNode(standing_texture)
self.player.anchor_point = (0.5, 0)
self.add_child(self.player)
score_font = ('Futura', 40)
self.score_label = LabelNode('0', score_font, parent=self)
self.score_label.position = (self.size.w/2, self.size.h - 70)
self.score_label.z_position = 1
self.items = []
# ---[3]
# Because the game can end now, we need a method to restart it.
# Some of the initialization logic that was previously in `setup()` is now in `new_game()`, so it can be repeated without having to close the game first.
self.new_game()
def new_game(self):
# Reset everything to its initial state...
for item in self.items:
item.remove_from_parent()
self.items = []
self.score = 0
self.score_label.text = '0'
self.walk_step = -1
self.player.position = (self.size.w/2, 32)
self.player.texture = standing_texture
self.speed = 1.0
# ---[4]
# The game_over attribute is set to True when the alien gets hit by a meteor. We use this to stop player movement and collision checking (the update method simply does nothing when game_over is True).
self.game_over = False
def update(self):
if self.game_over:
return
self.update_player()
self.check_item_collisions()
if random.random() < 0.05 * self.speed:
self.spawn_item()
def update_player(self):
g = gravity()
if abs(g.x) > 0.05:
self.player.x_scale = cmp(g.x, 0)
x = self.player.position.x
max_speed = 40
x = max(0, min(self.size.w, x + g.x * max_speed))
self.player.position = x, 32
step = int(self.player.position.x / 40) % 2
if step != self.walk_step:
self.player.texture = walk_textures[step]
sound.play_effect('rpg:Footstep00', 0.05, 1.0 + 0.5 * step)
self.walk_step = step
else:
self.player.texture = standing_texture
self.walk_step = -1
def check_item_collisions(self):
# ---[5]
# The hit testing is essentially the same as before, but now distinguishes between coins and meteors (simply by checking the class of the item).
# When a meteor hits, the game is over (see the `player_hit()` method below).
player_hitbox = Rect(self.player.position.x - 20, 32, 40, 65)
for item in list(self.items):
if item.frame.intersects(player_hitbox):
if isinstance(item, Coin):
self.collect_item(item)
elif isinstance(item, Meteor):
self.player_hit()
elif not item.parent:
self.items.remove(item)
def player_hit(self):
# ---[6]
# This is. alled from `check_item_collisions()` when the alien collides with a meteor. The alien simply drops off the screen, and after 2 seconds, a new game is started.
self.game_over = True
sound.play_effect('arcade:Explosion_1')
self.player.texture = hit_texture
self.player.run_action(A.move_by(0, -150))
# Note: The duration of the `wait` action is multiplied by the current game speed, so that it always takes exactly 2 seconds, regardless of how fast the rest of the game is running.
self.run_action(A.sequence(A.wait(2*self.speed), A.call(self.new_game)))
def spawn_item(self):
if random.random() < 0.3:
# ---[7]
# Whenever a new item is created, there's now a 30% chance that it is a meteor instead of a coin.
# Their behavior is very similar to that of the coins, but instead of moving straight down, they may come in at an angle. To accomplish this, the x coordinate of the final position is simply chosen randomly.
meteor = Meteor(parent=self)
meteor.position = (random.uniform(20, self.size.w-20), self.size.h + 30)
d = random.uniform(2.0, 4.0)
actions = [A.move_to(random.uniform(0, self.size.w), -100, d), A.remove()]
meteor.run_action(A.sequence(actions))
self.items.append(meteor)
else:
coin = Coin(parent=self)
coin.position = (random.uniform(20, self.size.w-20), self.size.h + 30)
d = random.uniform(2.0, 4.0)
actions = [A.move_by(0, -(self.size.h + 60), d), A.remove()]
coin.run_action(A.sequence(actions))
self.items.append(coin)
# ---[8]
# To make things a bit more interesting, the entire game gets slightly faster whenever a new item is spawned. The `speed` attribute is essentially a multiplier for the duration of all actions in the scene. Note that this is actually an attribute of `Node`, so you could apply different speeds for different groups of nodes. Since all items are added directly to the scene in this example, we don't make use of that here though.
self.speed = min(3, self.speed + 0.005)
def collect_item(self, item, value=10):
sound.play_effect('digital:PowerUp7')
item.remove_from_parent()
self.items.remove(item)
self.score += value
self.score_label.text = str(self.score)
if __name__ == '__main__':
run(Game(), PORTRAIT, show_fps=True) | [
"[email protected]"
] | |
12902024fb5d9e8618c638334d18e08b1035a528 | 180d0b6eb0fddfdac35b503a48119d1aaab37bcb | /usst_info/search/search_indexes.py | 2c3c8c32ac4295eec1427a6f7769ceabfdd6f35f | [] | no_license | liupengmolly/jelly | 7f2bd4c755b6bc007a653ec53ea8354a2394f52d | 227607169a3a2f06fac7006a4394f41dfa238022 | refs/heads/master | 2021-01-18T16:49:06.836643 | 2017-11-21T14:30:07 | 2017-11-21T14:30:07 | 100,468,094 | 0 | 1 | null | 2017-09-01T06:04:16 | 2017-08-16T08:44:25 | Python | UTF-8 | Python | false | false | 585 | py | import datetime
from haystack import indexes
from search.models import Jwcinfo
class JwcinfoIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
title=indexes.CharField(model_attr='title',boost=1.125)
url = indexes.CharField(model_attr='url')
site=indexes.CharField(model_attr='site',faceted=True)
download=indexes.IntegerField(model_attr='download')
pubtime=indexes.DateField(model_attr='pubtime')
glances=indexes.IntegerField(model_attr='glances')
def get_model(self):
return Jwcinfo
| [
"[email protected]"
] | |
0864a55af4f109e92a6b1185d04837dc723f87a7 | e5d130e183b5dea1b7aad23a047c703fa0d2b3bf | /lightbus/transports/__init__.py | d0a7d70c722ce0c3f6df21091b256155c18899f5 | [
"Apache-2.0"
] | permissive | adamcharnock/lightbus | 4a86428b8203bfe98f77a32375ac961ef398ce16 | cf892779a9a9a8f69c789ffa83c24acfb7f9a336 | refs/heads/master | 2023-08-26T04:19:39.395735 | 2023-08-23T11:07:44 | 2023-08-23T11:07:44 | 94,617,214 | 193 | 22 | Apache-2.0 | 2023-08-10T21:21:51 | 2017-06-17T10:39:23 | Python | UTF-8 | Python | false | false | 534 | py | from lightbus.transports.base import (
RpcTransport,
ResultTransport,
EventTransport,
SchemaTransport,
Transport,
)
from lightbus.transports.debug import (
DebugRpcTransport,
DebugResultTransport,
DebugEventTransport,
DebugSchemaTransport,
)
from lightbus.transports.redis.rpc import RedisRpcTransport
from lightbus.transports.redis.result import RedisResultTransport
from lightbus.transports.redis.event import RedisEventTransport
from lightbus.transports.redis.schema import RedisSchemaTransport
| [
"[email protected]"
] | |
6cd57a50d097a963f425239544f4443e7fd08746 | 1733d497d4056ea1f5f3a5d78f45615d7c07dabd | /Week 1/week1.py | a8bf2d7f2e72e29f8f1d12898cb7306cbda480f5 | [] | no_license | VMCSC/Junior-20-21 | d0f3e0caedcf44c6a485d01caaa35f2a83980db3 | 428937d0984f21e968a6fbd5c8aa8820e5bcc72e | refs/heads/master | 2023-02-15T05:24:30.556876 | 2021-01-14T02:36:27 | 2021-01-14T02:36:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,740 | py | print("Hello World!") # Outputs "Hello World!"
# Variables - stores information
# Stored with name, but with restrictions (any letters, numbers, and underscore)
# Variables cannot start with numbers, but shouldn't start with underscore
# Example of invalid variable: 1234abc
# Variables are also case sensitive: abc is different ABC
# Example of assigning variables
stringVariable = "string variable" # Use double or single quote
numberVariable = 123 # No set largest number
floatVariable = 3.1415 # Decimals
# Comparison - Comparing variables, returns boolean (True or False)
1 == 1 # Equality - compares if 2 things are equal
1 != 1 # Not equality - compares if 2 things aren't equal
1 <= 1 # Less than or equal to
1 >= 1 # Greater than or equal to
1 < 2 # Less than
1 > 2 # Greater than
1 >= 0.3 # Can also mix types
# == only checks if properties are equal, not the same
1 is 1 # Checks if both are the same
# Strings can be used in comparison
# Each character is connected to a number, which allows comparison
"Hello" > "Apple" # True, as Hello is further down than Apple in a dictionary
"Apple" == "apple" # False, letters case sensitive, and each number share different values
ord("A") # Gets the number of letter A, which is 65
ord("a") # Gets the letter of letter a, which is 97
# Operations
test = True # Variable that stores True
test = not test # Inverts the variable, which is now false
# String operations
# Use square brackets []
test = "massey"
test[1] # Gets character at 1st position, starts counting at 0 rather than 1, value is 'a'
test[0:2] # Gets characters from 0th position up to, but not including the 2nd position, value is 'ma'
test[3:] # Gets from 3rd position to the end, value is 'sey'
test[:3] # Gets from the beginning, up to, but not including the 3rd position, value is 'mas'
test[::2] # Starts beginning to end, but only grabs every other letter, value is 'mse'
# Syntax: test[start:end:step]
# Can also go backwards in string operation
test[-1] # Gets character at the end, value is 'y'
test[-1:-5:-1] # Gets from the end, up to, but not including the 5th character from the end, with a step going backwards, value is 'yess'
test[::-1] # Goes from beginning to end, but goes backwards, value is 'yessam'
# Input - gets input from the user
test = input() # Gets input from user, as a string
print(test * 5) # Gets the input, but 5 times, can multiply strings, but it appears 5 times
# Can cast variables, which changes it, e.g. int() to int, float() to float, or bool() to boolean
print(int(test) * 5) # Converts test into an integer, and multiplies it by 5
# Input gets the whole line, rather than seperating by space
line1 = input() # Gets input in line 1
line2 = input() # Gets input in line 2 (you cannot go backwards to get data)
# You can also add a prompt in input (DO NOT USE DURING CONTESTS)
input("Enter a prompt in here")
# Loops
times = int(input("How many times would you like to multiply?"))
number = 10
# Example of a for loop
for i in range(times): # i is your counter, that goes through range(), which goes through a range of values
number = number * 10 # Reassigns number to 10 times number
number *= 10 # Same as doing number = number * 10
number = number + 10 # Reassigns number to 10 + number
number += 10 # Same as doing number = number + 10
range(0, times, 2) # Similar to string splicing, where you have start, stop, step
# Example of a while loop, not all information is given, rather just a condition
number = 0
while number != 10: # Will constantly run, while the number isn't 10
print("hello")
number = int(input("Enter a number"))
# Commenting
# Ignored by the computer
# This is a single line comment
'''This is a
multiline
comment''' | [
"[email protected]"
] | |
1c39974b28c415eb6d3f895052348a4790754e61 | 9c61b955cbee4509c3e89a02e6d0420e60a3d1c6 | /D4 Insertion Sort/insertionSort.py | 7031ea5d24e88a6bbed196c9ef134938c2582052 | [] | no_license | ErmiyasG/CompititveProgramming | 53f3a9fc2b65a51addde9f339d7ee95740ec71c0 | 927b0a0115c4baf59c62ea7a17280afc303c4cb8 | refs/heads/master | 2020-09-15T07:59:27.365879 | 2019-11-27T00:52:54 | 2019-11-27T00:52:54 | 223,387,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | import random
def insertionSort():
numbers = [i for i in range(1, 10001)]
numbers = random.sample(numbers, len(numbers))
for i in range(len(numbers)):
for j in range(len(numbers)):
if numbers[i] < numbers[j]:
numbers[i], numbers[j] = numbers[j], numbers[i]
print(numbers)
return
insertionSort()
| [
"[email protected]"
] | |
9323eda82837b3b74d35a54415c599d65c1b11d8 | 451d80ff37337a13e9d8cb3500057acedda10ffd | /script.py | 73ec82f3f62731a3a128e7a635780a15dbf2c529 | [] | no_license | cbfsocial/Python-Cisco-mac-ip-paring | f30f617529476a60f97b5c68851f4d791b776983 | 8508f4ceb064bb4118f9bb5652612b992201bd04 | refs/heads/main | 2023-03-01T00:49:04.381991 | 2021-01-29T10:44:11 | 2021-01-29T10:44:11 | 323,436,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,999 | py | import paramiko
ip = [all device's ip]
usr = ""
pwd = ""
def hostname_parser(host):
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname=host, username=usr, password=pwd, look_for_keys=False, allow_agent=False)
stdin, stdout, stderr = client.exec_command('sh run | i hostname')
return stdout.readlines()[0].replace('hostname ', '').replace('\r\n', '')
def arp_line_parser(line):
spl = list(filter(lambda s: s != '', line.split(' ')))
return spl[1], spl[3]
def arp_parser(host):
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname=host, username=usr, password=pwd, look_for_keys=False, allow_agent=False)
stdin, stdout, stderr = client.exec_command('show arp')
lines = stdout.readlines()[4:]
return list(map(arp_line_parser, lines))
def line_parser(line):
return line.split(' ')[1], line.split(' ')[-1].replace('\r\n', '')
def port_parser(host, port):
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname=host, username=usr, password=pwd, look_for_keys=False, allow_agent=False)
stdin, stdout, stderr = client.exec_command("show mac address-table int Fa0/" + str(port))
lines = stdout.readlines()
if len(lines) > 5:
return list(map(line_parser, lines[5:-1]))
client.close()
arp_table = arp_parser('CORE IP where all arp collected')
for host in ip:
hostname = hostname_parser(host)
f = open("hostname_done.txt", 'a')
for port in range(1, 49):
parsed = port_parser(host, port)
if parsed is not None:
for parsed_elem in parsed:
arp = list(filter(lambda a: a[1] == parsed_elem[0], arp_table))
if len(arp) > 0:
f.write(hostname + ',' + parsed_elem[1] + ',' + parsed_elem[0] + ',' + arp[0][0] + '\n')
f.close()
| [
"[email protected]"
] | |
67d2259610e25b0d69794e33f601aaf958ab1d9b | ef68592777e356bd268410803195a4bc45f3c976 | /NN.py | 780afb1efde5e7fe85b06679b9d37f34b6e03035 | [] | no_license | msieb1/multilayer-perceptron | a2ce7cc417d9d87f8067ddcad078fc75def0d0c7 | 9e0f8b5a490a9221dc352cdea3078a9d5b29e6ab | refs/heads/master | 2021-08-08T03:33:19.458366 | 2017-11-09T13:45:20 | 2017-11-09T13:45:20 | 109,531,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,026 | py | import math
import random
import numpy as np
import csv
import re
import matplotlib.pyplot as plt
class NN:
"""
create a neural networks as class object
"""
def __init__(self, n_nodes, inputs):
"""
:param
n_nodes: array with number of nodes in each layer.
length(n_nodes - 2) thus equals the number of hidden layers.
first entry is number of input nodes, last entry output nodes
inputs: Array of dim=784 vectors
"""
############## Hyper parameters ##################
self.l_rate = 0.1
self.beta_1 = 0.9
self.beta_2 = 0.98
self.eps = 0.0000001
self.lambd = 0.1
self.batch_size = 100
#######################################
"""
Here, the following can be specified:
- activation function (and derivative)
- output function
- optimizer (Standard, Momentum, Adam)
- batch normalization on or off
"""
self.activation_fun = self.sigmoid
self.activation_fun_g = self.d_sigmoid
self.output_fun = self.softmax
#Specify optimizer: Adam, Standard or Momentum (Standard with Momentum)
self.optimization_function = 'Standard'
self.batchnorm_on = False
self.lrate_decay_on = False
######################################
n = self.batch_size
self.layers = np.size(n_nodes)-1
#Parameters W
self.W = {0: np.zeros(n_nodes[0])}
self.W_g = {0: np.zeros(n_nodes[0])}
#bias b
self.b = {0: np.zeros(n_nodes[0])}
self.b_g = {0: np.zeros(n_nodes[0])}
# batch norm parameters gamma and beta
self.gamma = {0: np.zeros(n_nodes[0])}
self.gamma_g = {0: np.zeros(n_nodes[0])}
self.beta = {0: np.zeros(n_nodes[0])}
self.beta_g = {0: np.zeros(n_nodes[0])}
#activations a
self.a = {0: np.zeros((n_nodes[0],n))}
self.a_g = {0: np.zeros(n_nodes[0])} # gradients just defined for one sample as of now
#hidden layers h (h[self.layers] := output layer)
self.h = {0: np.zeros((n_nodes[0],n))}
self.h_g = {0: np.zeros(n_nodes[0])}
# batch norm layer including gradient, output, mean and variance of batch
self.bn = {0: np.zeros((n_nodes[0],n))}
self.bn_g = {0: np.zeros(n_nodes[0])}
self.bn_mean = {0: np.zeros(n_nodes[0])}
self.bn_var = {0: np.zeros(n_nodes[0])}
# params for early stopping
self.best_params = {'W': 0, 'b': 0, 'gamma': 0, 'beta': 0, 'best_ii': 0}
self.best_ii = 0
for i in np.arange(1, self.layers+1):
"""
init weights
"""
self.W[i] = np.zeros((n_nodes[i],n_nodes[i-1]))
self.W_g[i] = np.zeros((n_nodes[i],n_nodes[i-1]))
self.b[i] = np.zeros((n_nodes[i],1))
self.b_g[i] = np.zeros(n_nodes[i])
self.gamma[i] = np.zeros((n_nodes[i], 1))
self.beta[i] = np.zeros((n_nodes[i], 1))
self.gamma_g[i] = np.zeros(n_nodes[i])
self.beta_g[i] = np.zeros(n_nodes[i])
self.a[i] = np.zeros((n_nodes[i], n))
self.bn[i] = np.zeros((n_nodes[i], n))
self.bn_mean[i] = np.zeros(n_nodes[i])
self.bn_var[i] = np.zeros(n_nodes[i])
self.h[i] = np.zeros((n_nodes[i], n))
self.a_g[i] = np.zeros(n_nodes[i])
self.bn_g[i] = np.zeros(n_nodes[i])
self.h_g[i] = np.zeros(n_nodes[i])
con = np.sqrt(6)/np.sqrt(len(self.h[i])+len(self.h[i-1]))
row, col = self.W[i].shape
np.random.seed()
self.W[i][:, :] = np.reshape(np.random.uniform(-con, con, row*col), [row,col])
self.b[i][:] = 0.1
self.gamma[i] = np.zeros((n_nodes[i], 2))
self.beta[i] = np.zeros((n_nodes[i], 2))
if i == 1:
self.W[1] = np.load('W_RBM200.npy')
def set_hyperparams(self, l_rate, beta_1, beta_2, eps, lambd, batch_size, act, act_g, out, opt, batch_on, lrate_decay_on):
self.l_rate = l_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.eps = eps
self.lambd = lambd
self.batch_size = batch_size
self.activation_fun = act
self.activation_fun_g = act_g
self.output_fun = out
self.optimization_function = opt
self.batchnorm_on = batch_on
self.lrate_decay_on = lrate_decay_on
def forward_activations(self, inputs):
"""
:param inputs:
:return: yields output as return value
"""
if ~self.batchnorm_on:
n_layers = self.layers
self.h[0] = inputs
self.a[1] = np.dot(self.W[1], inputs)+ self.b[1]
self.h[1] = self.activation_fun(self.a[1])
for i in np.arange(2, n_layers):
self.a[i] = np.dot(self.W[i], self.h[i-1])+self.b[i]
self.h[i] = self.activation_fun(self.a[i])
self.a[n_layers] = np.dot(self.W[n_layers], self.h[n_layers-1])+self.b[n_layers]
self.h[n_layers] = self.output_fun(self.a[n_layers])
return self.h[n_layers]
else:
n_layers = self.layers
self.h[0] = inputs
self.a[1] = np.dot(self.W[1], inputs) + self.b[1]
self.bn[1], self.bn_mean[1], self.bn_var[1] = self.batchnorm(self.a[1], 1)
self.h[1] = self.activation_fun(self.bn[1])
for i in np.arange(2, n_layers):
self.a[i] = np.dot(self.W[i], self.h[i - 1]) + self.b[i]
self.bn[i], self.bn_mean[i], self.bn_var[i] = self.batchnorm(self.a[i], i)
self.h[i] = self.activation_fun(self.bn[i])
self.a[n_layers] = np.dot(self.W[n_layers], self.h[n_layers - 1]) + self.b[n_layers]
self.bn[n_layers], self.bn_mean[n_layers], self.bn_var[n_layers] = self.batchnorm(self.a[n_layers], n_layers)
self.h[n_layers] = self.output_fun(self.bn[n_layers])
return self.h[n_layers]
def calculate_loss(self, x_train, y_train):
try:
dim, n = x_train.shape
except:
dim = len(x_train)
n = 1
loss = 0
weight_loss = 0
self.forward_activations(x_train)
output = self.h[self.layers]
#for i in range(1, self.layers+1):
# weight_loss += np.sum(self.W[i]) + np.sum(self.b[i])
for i in range(0, n):
ind = np.where(y_train[:,i]==1)
f = output[ind, i]
if f < 0.0001:
f = 0.0001
loss -= np.log(f)
return loss/n
def backprop(self, x_data, y_data):
"""
:param x_data: single data input
:param y_data: single data label (one-hot)
:param index: index of current data point
:return:
"""
batch_size = x_data.shape[1]
if ~self.batchnorm_on:
n_layers = self.layers
output = self.h[self.layers]
e = y_data #serves as indicator function
W_g = {**self.W_g}
b_g = {**self.b_g}
for j in np.arange(batch_size):
self.a_g[n_layers] = (-(e-output))[:, j]
for i in np.arange(self.layers, 0, -1):
self.W_g[i] = np.outer(self.a_g[i], self.h[i-1][:, j])
self.b_g[i] = self.a_g[i]
self.h_g[i-1] = np.dot(self.W[i].T, self.a_g[i])
self.a_g[i-1] = self.h_g[i-1] * self.activation_fun_g(self.a[i-1][:, j])
W_g[i] += self.W_g[i]/batch_size
b_g[i] += self.b_g[i]/batch_size
self.W_g = {**W_g}
self.b_g = {**b_g}
return
else:
n_layers = self.layers
output = self.h[self.layers]
e = y_data #serves as indicator function
W_g = {**self.W_g}
b_g = {**self.b_g}
for j in np.arange(batch_size):
self.bn_g[n_layers] = (-(e-output))[:, j]
self.a_g[n_layers], self.gamma_g[n_layers], self.beta_g[n_layers] = self.batchnorm_backward(self.bn_g[n_layers], n_layers)
for i in np.arange(self.layers, 0, -1):
self.W_g[i] = np.outer(self.a_g[i], self.h[i-1][:, j])
self.b_g[i] = self.a_g[i]
self.h_g[i-1] = np.dot(self.W[i].T, self.a_g[i])
self.bn_g[i-1] = self.h_g[i-1] * self.activation_fun_g(self.bn[i-1][:, j])
self.a_g[i-1], self.gamma_g[i-1], self.beta_g[i-1] = self.batchnorm_backward(self.bn_g[i-1], i-1)
W_g[i] += self.W_g[i]/batch_size
b_g[i] += self.b_g[i]/batch_size
self.W_g = {**W_g}
self.b_g = {**b_g}
return
def update_params(self, m_prev, v_prev, opt_fun):
beta_1 = self.beta_1
beta_2 = self.beta_2
eps = self.eps
lambd = self.lambd
l_rate = self.l_rate
if opt_fun == 'Standard':
for i in np.arange(1, self.layers+1):
grad = np.hstack([self.W_g[i], np.atleast_2d(self.b_g[i]).T, self.gamma_g[i], self.beta_g[i]])
self.W[i] -= l_rate*self.W_g[i]
self.b[i] -= l_rate*np.atleast_2d(self.b_g[i]).T
self.gamma[i] -= l_rate*self.gamma_g[i]
self.beta[i] -= l_rate*self.beta_g[i]
return m_prev, v_prev
if opt_fun == 'Momentum':
for i in np.arange(1, self.layers+1):
grad = np.hstack([self.W_g[i], np.atleast_2d(self.b_g[i]).T,np.atleast_2d(self.gamma_g[i]).T,np.atleast_2d(self.beta_g[i]).T])
delta = grad - beta_1*m_prev[i]
m_prev[i] = delta
self.W[i] -= l_rate*(delta[:, :-3] + self.lambd*self.W[i])
self.b[i] -= l_rate*(np.atleast_2d(delta[:, -3]).T + self.lambd*self.b[i])
self.gamma[i] -= l_rate*(np.atleast_2d(delta[:, -2]).T + self.lambd*self.gamma[i])
self.beta[i] -= l_rate*(np.atleast_2d(delta[:, -1]).T + self.lambd*self.beta[i])
return m_prev, v_prev
# works
# if opt_fun == 'Adam':
# for i in np.arange(1, self.layers+1):
# grad = np.hstack([self.W_g[i], np.atleast_2d(self.b_g[i]).T])
# rows, cols = grad.shape
# m = beta_1*m_prev + (1-beta_1)*grad
# #m = delta
# v = beta_2*v_prev + (1-beta_2)*grad**2
# #m = m / (1-beta_1)
# #v = v / (1-beta_2)
# self.W[i] -= l_rate*m[:, :-1]/(np.sqrt(v[:, :-1]) + eps)
# self.b[i] -= np.atleast_2d(l_rate*(m[:, -1])/(np.sqrt((v[:, -1])) + eps)).T
# return m, v
if opt_fun == 'Adam':
for i in np.arange(1, self.layers+1):
grad = np.hstack([self.W_g[i], np.atleast_2d(self.b_g[i]).T,np.atleast_2d(self.gamma_g[i]).T,np.atleast_2d(self.beta_g[i]).T])
delta = -beta_1*m_prev[i] + (1-beta_1)*grad
m_prev[i] = delta
v_prev[i] = beta_2*v_prev[i] + (1-beta_2)*grad**2
m_prev[i] = m_prev[i] * (1-beta_1) #inquire those
v_prev[i] = v_prev[i] * (1-beta_2)
self.W[i] -= l_rate*(m_prev[i][:, :-3]/(np.sqrt(v_prev[i][:, :-3]) + eps)+self.lambd*self.W[i])
self.b[i] -= l_rate*(np.atleast_2d(m_prev[i][:, -3])/(np.sqrt(v_prev[i][:, -3]) + eps).T + self.lambd*self.b[i])
self.gamma[i] -= l_rate*(np.atleast_2d(delta[:, -2]).T(np.sqrt(v_prev[i][:, -2]) + eps).T + self.lambd*self.gamma[i])
self.beta[i] -= l_rate*(np.atleast_2d(delta[:, -1]).T/(np.sqrt(v_prev[i][:, -1]) + eps).T+ self.lambd*self.beta[i])
def train(self, n_epochs, data):
"""
train the network with backprop
:param l_rate:
:param n_epochs:
:return:
"""
opt_fun = self.optimization_function
l_rate = self.l_rate
batch_size = self.batch_size
# m stores gradient, v stores second moment gradient (copying W is just for dictionary dimonesion, aka layers)
# the actual m and v dict will be filled with all gradients w.r.t W, b, gamma and beta
m = {**self.W_g}
v = {**self.W_g}
for i in range(1, self.layers + 1):
m[i] = np.hstack([self.W_g[i], np.atleast_2d(self.b_g[i]).T,np.atleast_2d(self.gamma_g[i]).T,np.atleast_2d(self.beta_g[i]).T ])*0
v[i] = np.hstack([self.W_g[i], np.atleast_2d(self.b_g[i]).T,np.atleast_2d(self.gamma_g[i]).T,np.atleast_2d(self.beta_g[i]).T]) * 0
x_train = data['x_train']
y_train = data['y_train']
x_valid = data['x_valid']
y_valid = data['y_valid']
dim, n_samples = x_train.shape
####### TRACK LOSS ##############
loss_tracker = {'train': np.zeros((n_epochs+1, 1)), 'valid': np.zeros((n_epochs+1, 1))}
loss_tracker['train'][0, 0] = self.calculate_loss(x_train, y_train)
loss_tracker['valid'][0, 0] = self.calculate_loss(x_valid, y_valid)
######################
###### TRACK CLASSIFICATION ERROR ##############
error_tracker = {'train': np.zeros((n_epochs+1, 1)), 'valid': np.zeros((n_epochs+1, 1))}
self.forward_activations(x_train)
y_hat = self.h[self.layers]
error_tracker['train'][0,0] = classification_error(y_hat, y_train)
self.forward_activations(x_valid)
y_hat2 = self.h[self.layers]
error_tracker['valid'][0,0] = classification_error(y_hat2, y_valid)
###########################
### EARlY STOPPING PARAMS ###
ii = 0
jj = 0
vv = 100000
pp = 10000
nn = 10
for epoch in np.arange(n_epochs):
randomize = np.arange(x_train.shape[1])
np.random.shuffle(randomize)
x_train = x_train[:, randomize][:, :]
y_train = y_train[:, randomize][:, :]
for j in np.arange(int(n_samples/batch_size)):
#get output for current batch
self.forward_activations((x_train[:, j*batch_size:(j+1)*batch_size])) #Make 784,1 instead of 784,
self.backprop(x_train[:, j*batch_size:(j+1)*batch_size], y_train[:, j*batch_size:(j+1)*batch_size]) #fix index passing
m, v = self.update_params(m, v, opt_fun)
## EARLY STOPPING
ii += 1
if (int(ii / nn) == 1) and (jj != pp):
v_prime = self.calculate_loss(x_valid, y_valid)
if v_prime < vv:
jj = 0
self.best_params['W'] = self.W
self.best_params['b'] = self.b
self.best_params['gamma'] = self.gamma
self.best_params['beta'] = self.beta
self.best_ii = ii
self.best_params['best_ii'] = self.best_ii
vv = v_prime
else:
jj += 1
##################################
if (n_samples -int(n_samples/batch_size)*batch_size) != 0:
# get data if batch does not divide perfectly sample set
self.forward_activations((x_train[:, int(n_samples/batch_size)*batch_size:])) # Make 784,1 instead of 784,
self.backprop(x_train[:, int(n_samples/batch_size)*batch_size:],
y_train[:, int(n_samples/batch_size)*batch_size:])
m, v = self.update_params(m, v, opt_fun)
### EARLY STOPPING ###
ii += 1
if (int(ii / nn) == 1) and (jj != pp):
ii += nn
v_prime = self.calculate_loss(x_valid, y_valid)
if v_prime < vv:
jj = 0
self.best_params['W'] = self.W
self.best_params['b'] = self.b
self.best_params['gamma'] = self.gamma
self.best_params['beta'] = self.beta
self.best_ii = ii
self.best_params['best_ii'] = self.best_ii
vv = v_prime
else:
jj += 1
#################
#update error and loss dictionaries
loss_tracker['train'][epoch+1, 0] = self.calculate_loss(x_train, y_train)
loss_tracker['valid'][epoch+1, 0] = self.calculate_loss(x_valid, y_valid)
y_hat = self.forward_activations(x_valid)
error_tracker['valid'][epoch+1, 0] = classification_error(y_hat, y_valid)
y_hat2 = self.forward_activations(x_train)
error_tracker['train'][epoch + 1, 0] = classification_error(y_hat2, y_train)
##################################
print("Epoch %s: error is %s" % (epoch + 1, loss_tracker['train'][epoch+1, 0]))
#decrease learning rate if hitting a plateau
if (np.abs(error_tracker['valid'][epoch+1, 0] - error_tracker['valid'][epoch, 0]) < 0.002) and self.lrate_decay_on:
self.l_rate /= 1.5
return loss_tracker, error_tracker
def batchnorm(self, batch, curr_layer):
i = curr_layer
eps = 0.00000001
dim, n = batch.shape
m = np.sum(batch, 1)/n
var = np.var(batch, 1)/n
x_hat = (batch - m)/(np.sqrt(var + eps))
y = self.gamma[i]*x_hat + self.beta[i]
return y, m, var
def batchnorm_backward(self, dout, curr_layer):
eps = self.eps
i = curr_layer
# get the dimensions of the input/output
dim, n = dout.shape
# step9
dbeta = np.sum(dout, 1)
dgammax = dout # not necessary, but more understandable
# step8
dgamma = np.sum(dgammax * self.bn[i], 1)
dxhat = dgammax * self.gamma[i]
# step7
divar = np.sum(dxhat * self.bn_mean[i], 1)
dxmu1 = dxhat * 1/self.bn_var[i]
# step6
dsqrtvar = -1. / (np.sqrt(self.bn_var[i] + eps) ** 2) * divar
# step5
dvar = 0.5 * 1. / np.sqrt(self.bn_var[i] + eps) * dsqrtvar
# step4
dsq = 1. / n * np.ones((dim , n)) * dvar
# step3
dxmu2 = 2 * self.bn[i] * dsq
# step2
dx1 = (dxmu1 + dxmu2)
dmu = -1 * np.sum(dxmu1 + dxmu2, 1)
# step1
dx2 = 1. / n * np.ones((dim, n)) * dmu
# step0
dx = dx1 + dx2
return dx, dgamma, dbeta
#### Activation and Output functions ####
def sigmoid(self, a):
np.clip(a,-300,300)
return 1/(1+np.exp(-a))
def d_sigmoid(self, a):
return self.sigmoid(a)*(1-self.sigmoid(a))
def relu(self, a):
buff = np.copy(a)
return np.maximum(a, buff*0)
def d_relu(self, a):
res = np.copy(a)
res[np.where(a > 0)] = 1
res[np.where(a <= 0)] = 0
return res
def tanh(self, a):
return np.tanh(a)
def d_tanh(self, a):
return 1 - np.tanh(a) ** 2
def softmax(self, a):
m = np.max(a,0)
out = np.exp(a - m - np.log(np.sum(np.exp(a - m),0)))
return out
#########################################
def check_gradient(self, eps, layer, ind1, ind2, x_train, y_train):
W_o = self.W
b_o = self.b
real_grad = self.W_g[layer][ind1, ind2]
self.W[layer][ind1,ind2] += eps
loss_plus = self.calculate_loss(x_train, y_train)
self.W[layer][ind1,ind2] -= 2*eps
loss_minus = self.calculate_loss(x_train, y_train)
grad = (loss_plus - loss_minus)/(2*eps)
error = np.abs(grad-real_grad)
if real_grad > 0.1:
a = 0
return error, grad
##########################
def load_data():
with open('/home/max/PyCharm/PycharmProjects/10-707/hw1/data/digitstrain.txt') as f:
reader = csv.reader(f, delimiter=" ")
d = list(reader)
x_train = np.zeros((784,3000))
y_train = np.zeros((10,3000))
for i in range(0, len(d)):
s = d[i][0]
p = re.compile(r'\d+\.\d+') # Compile a pattern to capture float values
data = [float(i) for i in p.findall(s)] # Convert strings to float
p = re.compile(r'\d+') # Compile a pattern to capture float values
label = [int(i) for i in p.findall(s)]
x_train[:,i] = data
y_train[label[-1],i] = 1
np.save('x_train.npy', x_train)
np.save('y_train.npy', y_train)
with open('/home/max/PyCharm/PycharmProjects/10-707/hw1/data/digitstest.txt') as f:
reader = csv.reader(f, delimiter=" ")
d = list(reader)
x_test = np.zeros((784, 3000))
y_test = np.zeros((10, 3000))
for i in range(0, len(d)):
s = d[i][0]
p = re.compile(r'\d+\.\d+') # Compile a pattern to capture float values
data = [float(i) for i in p.findall(s)] # Convert strings to float
p = re.compile(r'\d+') # Compile a pattern to capture float values
label = [int(i) for i in p.findall(s)]
x_test[:, i] = data
y_test[label[-1], i] = 1
np.save('x_test.npy', x_test)
np.save('y_test.npy', y_test)
with open('/home/max/PyCharm/PycharmProjects/10-707/hw1/data/digitsvalid.txt') as f:
reader = csv.reader(f, delimiter=" ")
d = list(reader)
x_valid = np.zeros((784, 1000))
y_valid = np.zeros((10, 1000))
for i in range(0, len(d)):
s = d[i][0]
p = re.compile(r'\d+\.\d+') # Compile a pattern to capture float values
data = [float(i) for i in p.findall(s)] # Convert strings to float
p = re.compile(r'\d+') # Compile a pattern to capture float values
label = [int(i) for i in p.findall(s)]
x_valid[:, i] = data
y_valid[label[-1], i] = 1
np.save('x_valid.npy', x_valid)
np.save('y_valid.npy', y_valid)
dat = {'x_train': x_train}
dat['y_train'] = y_train
dat['x_test'] = x_test
dat['y_test'] = y_test
dat['x_valid'] = x_valid
dat['y_valid'] = y_valid
return dat
def plotting(path, save_to_file=False):
path_to_save = path
plt.figure(1)
plt.clf()
plt.plot(np.arange(1,n_epochs+1),loss_tracker['train'][1:], 'ro-', markersize=4, label='train')
plt.plot(np.arange(1,n_epochs+1),loss_tracker['valid'][1:], 'yo-', markersize=4, label='valid')
plt.xlabel('epoch')
plt.ylabel('loss')
#plt.title('cross-entropy')
plt.legend(loc='upper left')
if save_to_file:
name = 'graph_loss_RBM200'
path = path_to_save + name
plt.savefig(path)
plt.figure(2)
plt.clf()
plt.plot(np.arange(1,n_epochs+1),error_tracker['train'][1:], 'bo-', markersize=4, label='train')
plt.plot(np.arange(1,n_epochs+1),error_tracker['valid'][1:], 'go-', markersize=4, label='valid')
plt.xlabel('epoch')
plt.ylabel('error')
#plt.title('classification error')
plt.legend(loc='upper left')
if save_to_file:
name = 'graph_error_RBM200'
path = path_to_save + name
plt.savefig(path)
#plt.show()
def visualize_params(net):
#make sure you got good params!
plt.figure(1)
plt.clf()
for i in range(0, 100):
plt.subplot(10, 10, i+1)
buff = np.reshape(net.W[1][i, :], [28, 28])
plt.imshow(buff, cmap='gray')
plt.tick_params(axis='both', left='off', right='off', which='both', bottom='off', top='off', labelbottom='off', labeltop='off')
plt.subplots_adjust(wspace=0, hspace=0)
plt.xticks([])
plt.yticks([])
plt.tight_layout()
name = 'visualized_params2'
path_to_save = '/home/max/PyCharm/PycharmProjects/10-707/hw1/figures/'
path = path_to_save + name
plt.savefig(path)
an = 0
def normalize(arr):
delta = 0.00000001
arr_norm = (arr - np.array([np.mean(arr, 1)]).T) / (np.array([np.std(arr, 1)]).T + delta) # normalize data
return arr_norm
def classification_error(prediction, label):
try:
n = label.shape[1]
except:
n = 1
row, col = prediction.shape
prediction += np.reshape(np.random.uniform(-0.0001, 0.0001, row * col), [row, col])
def array_row_intersection(a, b):
tmp = np.prod(np.swapaxes(a[:, :, None], 1, 2) == b, axis=2)
return a[np.sum(np.cumsum(tmp, axis=0) * tmp == 1, axis=1).astype(bool)]
ind_pred = np.argwhere(prediction == np.max(prediction, axis=0))
ind_corr = np.argwhere(label)
n_correct = array_row_intersection(ind_pred, ind_corr).shape[0]
return (n-n_correct)/n
####################################
if __name__ == '__main__':
# ONLY LOAD IF .npy files not stored!!
# data = load_data()
#normalize if needed
x_train = (np.load('x_train.npy'))
y_train = (np.load('y_train.npy'))
x_valid = (np.load('x_valid.npy'))
y_valid = (np.load('y_valid.npy'))
x_test = (np.load('x_test.npy'))
y_test = (np.load('y_test.npy'))
# store data in a dictionary
data = {'x_train': x_train, 'y_train': y_train, 'x_valid': x_valid, 'y_valid': y_valid, 'x_test': x_test, 'y_valid': y_valid}
######################################
for i in range(8,9):
#specifiy net architecture: .e.g.: 784 inputs, first hidden layer 500 nodes, second one 300 nodes, output 10
net = NN([784,100,10], x_train)
# specify initial learning rate
l_rate = 0.1
# momentum parameter
beta_1 = 0.9
# RMSProp/Adam parameter
beta_2 = 0.98
# variance square root regularizer
eps = 0.0000001
# regularizer
lambd = 0.1
# batch sample size
batch_size = 64
# activation function: net.sigmoid, net.relu, net.tanh
activation_fun = net.sigmoid
# activation function derivative: net.d_sigmoid, net.d_relu, net.d_tanh
activation_fun_g = net.d_sigmoid
# output function: net.softmax
output_fun = net.softmax
# Specify optimizer: Adam, Standard or Momentum (Standard with Momentum)
optimization_function = 'Momentum'
# batch normalization on or off
batchnorm_on = False
# learning rate decay on or off (divides main learning rate by 1.5 if validation error stays constant
lrate_decay_on = True
# number of training epochs
n_epochs = 100
# path to save figure and data
path = '/home/max/PyCharm/PycharmProjects/10-707/hw2/figures/pretraining/'
####################################
# initialize network and execute training
net.set_hyperparams(l_rate, beta_1, beta_2, eps, lambd, batch_size, activation_fun, activation_fun_g, output_fun,
optimization_function, batchnorm_on, lrate_decay_on)
loss_tracker, error_tracker = net.train(n_epochs, data)
# calculate loss on test set
# loss_train = net.calculate_loss(x_train, y_train)
# out = net.h[net.layers]
# err_train = classification_error(out, y_train)
# loss_valid = net.calculate_loss(x_valid, y_valid)
# out = net.h[net.layers]
# err_valid = classification_error(out, y_valid)
# loss_test = net.calculate_loss(x_test, y_test)
# out = net.h[net.layers]
# err_test = classification_error(out, y_test)
#
# np.save(path + 'err_train.npy', err_train)
# np.save(path + 'loss_train.npy', loss_train)
# np.save(path + 'err_valid.npy', err_valid)
# np.save(path + 'loss_valid.npy', loss_valid)
# np.save(path + 'err_test.npy', err_test)
# np.save(path + 'loss_test.npy', loss_test)
# np.save(path + 'W.npy', net.W[1])
plotting(path)
# visualize_params(net)
plt.show()
| [
"[email protected]"
] | |
165dab7e57e2a352300f0576717c1cdae8927d4b | 5e2dddce9c67d5b54d203776acd38d425dbd3398 | /spacy/lang/es/syntax_iterators.py | 869f404e040edf4e143bf6e80dab2eaac4390688 | [
"MIT"
] | permissive | yuxuan2015/spacy_zh_model | 8164a608b825844e9c58d946dcc8698853075e37 | e89e00497ab3dad0dd034933e25bc2c3f7888737 | refs/heads/master | 2020-05-15T11:07:52.906139 | 2019-08-27T08:28:11 | 2019-08-27T08:28:11 | 182,213,671 | 1 | 0 | null | 2019-04-19T06:27:18 | 2019-04-19T06:27:17 | null | UTF-8 | Python | false | false | 1,695 | py | # coding: utf8
from __future__ import unicode_literals
from ...symbols import NOUN, PROPN, PRON, VERB, AUX
def noun_chunks(obj):
doc = obj.doc
np_label = doc.vocab.strings.add('NP')
left_labels = ['det', 'fixed', 'neg'] # ['nunmod', 'det', 'appos', 'fixed']
right_labels = ['flat', 'fixed', 'compound', 'neg']
stop_labels = ['punct']
np_left_deps = [doc.vocab.strings[label] for label in left_labels]
np_right_deps = [doc.vocab.strings[label] for label in right_labels]
stop_deps = [doc.vocab.strings[label] for label in stop_labels]
def noun_bounds(root):
left_bound = root
for token in reversed(list(root.lefts)):
if token.dep in np_left_deps:
left_bound = token
right_bound = root
for token in root.rights:
if (token.dep in np_right_deps):
left, right = noun_bounds(token)
if list(filter(lambda t: is_verb_token(t) or t.dep in stop_deps,
doc[left_bound.i: right.i])):
break
else:
right_bound = right
return left_bound, right_bound
token = doc[0]
while token and token.i < len(doc):
if token.pos in [PROPN, NOUN, PRON]:
left, right = noun_bounds(token)
yield left.i, right.i+1, np_label
token = right
token = next_token(token)
def is_verb_token(token):
return token.pos in [VERB, AUX]
def next_token(token):
try:
return token.nbor()
except:
return None
SYNTAX_ITERATORS = {
'noun_chunks': noun_chunks
}
| [
"[email protected]"
] | |
5f187621e0c1861126cb19a1ffcd3ae9705e5b59 | ea9e1222a128df7f0652f331065533d61798ae16 | /unit4/4.5.improving_the_index.py | 0726f55cf9399b8dc45155c0638820b7ed27534c | [] | no_license | AK88-RM/Udacity-CS101 | 5ff11d32a2da9bb80987d2e4078a40cb64c95b71 | 2e71f204e982aa6831824d9bab343db629a967cc | refs/heads/master | 2020-04-06T06:38:29.659575 | 2014-08-28T13:07:09 | 2014-08-28T13:07:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,684 | py | def add_to_index(index, keyword, url):
for entry in index:
if entry[0] == keyword:
if url not in entry[1]:
entry[1].append(url)
return
# not found, add new keyword to index
index.append([keyword, [url]])
def get_page(url):
try:
if url == "http://www.udacity.com/cs101x/index.html":
return '''<html> <body> This is a test page for learning to crawl!
<p> It is a good idea to
<a href="http://www.udacity.com/cs101x/crawling.html">
learn to crawl</a> before you try to
<a href="http://www.udacity.com/cs101x/walking.html">walk</a> or
<a href="http://www.udacity.com/cs101x/flying.html">fly</a>.</p></body>
</html>'''
elif url == "http://www.udacity.com/cs101x/crawling.html":
return '''<html> <body> I have not learned to crawl yet, but I am
quite good at <a href="http://www.udacity.com/cs101x/kicking.html">kicking</a>.
</body> </html>'''
elif url == "http://www.udacity.com/cs101x/walking.html":
return '''<html> <body> I cant get enough
<a href="http://www.udacity.com/cs101x/index.html">crawling</a></body></html>'''
elif url == "http://www.udacity.com/cs101x/flying.html":
return '''<html>
<body>The magic words are Squeamish Ossifrage!</body></html>'''
except:
return ""
return ""
def union(a, b):
for e in b:
if e not in a:
a.append(e)
def get_next_target(page):
start_link = page.find('<a href=')
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
def get_all_links(page):
links = []
while True:
url, endpos = get_next_target(page)
if url:
links.append(url)
page = page[endpos:]
else:
break
return links
def crawl_web(seed):
tocrawl = [seed]
crawled = []
index = []
while tocrawl:
page = tocrawl.pop()
if page not in crawled:
content = get_page(page)
add_page_to_index(index, page, content)
union(tocrawl, get_all_links(content))
crawled.append(page)
return index
def add_page_to_index(index, url, content):
words = content.split()
for word in words:
add_to_index(index, word, url)
def lookup(index, keyword):
for entry in index:
if entry[0] == keyword:
return entry[1]
return None
index = crawl_web("http://www.udacity.com/cs101x/index.html")
print lookup(index,"is")
#>>> ['http://www.udacity.com/cs101x/index.html']
| [
"[email protected]"
] | |
c1376eb883c5a0451dce7d372ae147efe9b2a0d9 | 4d2942cb8b7a6c15c1d09c36e4a881cd8d54b981 | /삼성 SW 역량 테스트 기출 문제/17837_새로운게임2.py | 297d12891eb3d9b4099c498c743fd3381587bdf3 | [] | no_license | Novicett/codingtest_with_python | 4ebbceedce42ea5c27bebbacaec0046a7fc7cce8 | 9cfb1a1be81acd69bf73d2f3698145c74e305dc0 | refs/heads/master | 2023-05-30T02:03:08.620929 | 2021-06-20T03:22:24 | 2021-06-20T03:22:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,869 | py | import sys
from collections import defaultdict
dx = [0, 0, 0, -1, 1]
dy = [0, 1, -1, 0, 0]
reverse = {1:2, 2:1, 3:4, 4:3}
input = sys.stdin.readline
N, k = map(int, input().split())
board_color = []
for _ in range(N):
board_color.append(list(map(int, input().split())))
horses = defaultdict()
horse_on_board = [[[] for _ in range(N)] for _ in range(N)]
for i in range(k):
x, y, d = map(int, input().split())
horses[i] = [x-1, y-1, d]
horse_on_board[x-1][y-1].append(i)
def in_bound(x,y):
if x in range(N) and y in range(N):
return True
return False
def move_white(h):
x, y, d = horses[h]
nx, ny = x+dx[d], y+dy[d]
idx = horse_on_board[x][y].index(h)
if len(horse_on_board[nx][ny]) + len(horse_on_board[x][y][idx:]) >= 4:
return False
for moving in horse_on_board[x][y][idx:]:
horses[moving][0] = nx
horses[moving][1] = ny
horse_on_board[nx][ny] += horse_on_board[x][y][idx:]
horse_on_board[x][y] = horse_on_board[x][y][:idx]
return True
def move_red(h):
x, y, d = horses[h]
nx, ny = x+dx[d], y+dy[d]
idx = horse_on_board[x][y].index(h)
if len(horse_on_board[nx][ny]) + len(horse_on_board[x][y][idx:]) >= 4:
return False
for moving in horse_on_board[x][y][idx:]:
horses[moving][0] = nx
horses[moving][1] = ny
horse_on_board[nx][ny] += list(reversed(horse_on_board[x][y][idx:]))
horse_on_board[x][y] = horse_on_board[x][y][:idx]
return True
def move_blue(h):
flag = True
x, y, d = horses[h]
horses[h][2] = reverse[d]
nx, ny = x+dx[horses[h][2]], y+dy[horses[h][2]]
if in_bound(nx, ny) and board_color[nx][ny] != 2:
# 범위를 벗어나지 않고 파란 칸이 아니면
if board_color[nx][ny] == 0:
flag = move_white(h)
else:
flag = move_red(h)
return flag
count = 1
while True:
flag = True
if count > 1000:
break
for h in horses.keys():
# 모든 말에 대해서 이동
x, y, d = horses[h]
nx, ny = x+dx[d], y+dy[d]
if in_bound(nx, ny):
if board_color[nx][ny] == 0:
# 흰색이면
if move_white(h) == False:
flag = False
break
elif board_color[nx][ny] == 1:
# 빨간색이면
if move_red(h) == False:
flag = False
break
else:
# 파란색이면
if move_blue(h) == False:
flag = False
break
else:
# 방향 바꾸고 한칸 이동
if move_blue(h) == False:
flag = False
break
if flag == False:
break
count += 1
if count > 1000:
print(-1)
else:
print(count) | [
"[email protected]"
] | |
4450db57e64db6586c682bfbdf846ffb456d9e4e | 4d718292ec9f90444eeda13d18febb10757da894 | /mission 11/classement.py | e50dc9fbba32e99394191730b85603be4aa7080f | [] | no_license | rverschuren/Info | b40fb04a6260dacfc95d12e63c99abd82b140e06 | c9aa0bdc1b026c8ba8134b878b5fae7d49d75e19 | refs/heads/master | 2020-04-16T07:29:49.847812 | 2019-01-14T14:50:18 | 2019-01-14T14:50:18 | 165,389,281 | 1 | 2 | null | 2019-01-12T18:56:01 | 2019-01-12T13:12:46 | Python | UTF-8 | Python | false | false | 3,812 | py | class Classement :
"""
Une implémentation primitive de classement, non ordonnée et de capacité fixe.
@author Kim Mens
@version 02 Décembre 2018
"""
__maxcapacity = 10
def __init__(self):
"""
@pre: -
@post: un classement vide de taille 0 a été créé
"""
self.__resultats = {} # dictionnaire de résultats actuelle (clé = coureur; valeur = résultat)
self.__size = 0 # nombre de résultats actuel (initialement 0, maximum __maxcapacity)
def size(self):
"""
Méthode accesseur.
Retourne la taille de ce classement.
@pre: -
@post: Le nombre de résultats actuellement stockés dans ce classement a été retourné.
"""
return self.__size
def add(self,r):
"""
Ajoute un résultat r dans ce classement.
@pre: r est une instance de la classe Resultat
@post: Le résultat r a été inséré selon l'ordre du classement.
En cas d'ex-aequo, r est inséré après les autres résultats de même ordre.
ATTENTION : L'implémentation actuelle ne respecte pas encore la post-condition!
Le résultat est simplement ajouté à la dictionnaire, sans tenir compte de l'ordre.
Une dictionnaire ne donne pas de garanties sur l'ordre des éléments.
"""
if self.size() >= self.__maxcapacity :
raise Error("Capacity of classement exceeded")
else :
self.__size += 1
self.__resultats[r.coureur()] = r
def get(self,c):
"""
Retourne le résultat d'un coureur donné.
@pre c est un Coureur
@post retourne le premier (meilleur) Resultat r du coureur c dans le
classement. Retourne None si le coureur ne figure pas (encore)
dans le classement.
"""
return self.__resultats.get(c)
def get_position(self,c):
"""
Retourne la meilleure position d'un coureur dans ce classement.
@pre c est un Coureur
@post retourne un entier représentant la position du coureur c dans ce classement,
à partir de 1 pour la tête de ce classement. Si le coureur figure plusieurs fois
dans le classement, la première (meilleure) position est retournée.
Retourne -1 si le coureur ne figure pas dans le classement.
ATTENTION : L'implémentation actuelle ne respecte pas encore la post-condition!
Etant donné que la dictionnaire de résultats ne connaît pas de position,
pour le moment cette méthode retourne toujours "***position inconnue***".
A vous de la corriger en utilisant une liste chaînée ordonnée
comme structure de données, plutôt qu'une simple dictionnaire.
"""
return "***position inconnue***"
def remove(self,c):
"""
Retire un résultat du classement.
@pre c est un Coureur
@post retire le premier (meilleur) résultat pour le coureur c du classement.
c est comparé au sens de __eq__. Retourne c si un résultat a été retiré,
of False si c n'est pas trouvé dans la liste.
"""
self.__size -= 1
return self.__resultats.pop(c,False)
def __str__(self):
"""
Méthode magique
Retourne une représentation string de cet objet.
@pre: -
@post: Retourne une représentation de ce classement sous forme d'un string,
avec une ligne par résultat.
"""
s = ""
d = self.__resultats
for c in d:
s += " " + str(self.get_position(c)) + " > " + str(d[c]) + "\n"
return s | [
"[email protected]"
] | |
2c97ff281efd8d8c1aa5ca18d5eaf702ae7ce0d6 | b47309ab12aecb683797b3c5490e8f926818ec90 | /plugin.video.movie25/resources/libs/live/nhl.py | bfc51a813385908a599ab7bae51391dcb3030680 | [] | no_license | alejusar/starthere | fb41d631afa1a7e610c22999b721db2fd5260729 | a12c17369a15dacac06fd0b9c348435587058c02 | refs/heads/master | 2021-01-10T14:23:32.197067 | 2015-11-01T00:21:55 | 2015-11-01T00:21:55 | 44,849,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,303 | py | import urllib,urllib2,re,cookielib,os,sys
import xbmc, xbmcgui, xbmcaddon, xbmcplugin,time
from resources.libs import main
#Mash Up - by Mash2k3 2012.
from t0mm0.common.addon import Addon
addon_id = 'plugin.video.movie25'
selfAddon = xbmcaddon.Addon(id=addon_id)
addon = Addon('plugin.video.movie25', sys.argv)
art = main.art
def MAINNHL(murl):
source_media = {}
from datetime import datetime
datex=datetime.now().strftime('%Y%m%d')
xml='http://live.nhl.com/GameData/SeasonSchedule-20142015.json'
link=main.OPENURL(xml)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','').replace(' ','')
main.addDir('[COLOR red]Archived Games[/COLOR]','Archived',394,art+'/nhl.png')
if 'Archived' not in murl:
main.addLink("[COLOR red]Live Games Windows Only, Requires some modifications to get working visit forum.[/COLOR]",'','')
match=re.compile('{"id":(.+?),"est":"(.+?)","a":"(.+?)","h":"(.+?)"}',re.DOTALL).findall(link)
for id,timed,ateam,hteam in match:
split= re.search('(.+?)\s(\d+:\d+):\d+',timed)
split1=str(split.group(1))
split2=str(split.group(2))
if 'Archived' in murl:
if int(split1)<=int(datex):
dates= re.search('(\d{4})(\d{2})(\d{2})',split1)
date=str(dates.group(2))+"/"+str(dates.group(3))+"/"+str(dates.group(1))
timed = time.strftime("%I:%M %p", time.strptime(split2, "%H:%M"))
main.addDir(ateam+' at '+hteam+' [COLOR red]('+timed+')[/COLOR] [COLOR blue]('+date+')[/COLOR]',id,395,art+'/nhl.png')
else:
if datex == split1:
dates= re.search('(\d{4})(\d{2})(\d{2})',split1)
date=str(dates.group(2))+"/"+str(dates.group(3))+"/"+str(dates.group(1))
timed = time.strftime("%I:%M %p", time.strptime(split2, "%H:%M"))
main.addDir(ateam+' at '+hteam+' [COLOR red]('+timed+')[/COLOR] [COLOR blue]('+date+')[/COLOR]',id,395,art+'/nhl.png')
def LISTSTREAMS(mname,murl):
mname=main.removeColoredText(mname)
id= re.search('(\d{4})(\d{2})(\d{4})',murl)
xml='http://smb.cdnak.neulion.com/fs/nhl/mobile/feed_new/data/streams/'+str(id.group(1))+'/ipad/'+str(id.group(2))+'_'+str(id.group(3))+'.json'
link=main.OPENURL(xml)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','').replace(' ','')
match=re.compile('"vod-condensed":{"bitrate0":"([^"]+)"},"vod-continuous":{"bitrate0":"([^"]+)","image":"([^"]+)"},"vod-whole":{"bitrate0":"([^"]+)"}',re.DOTALL).findall(link)
for cond,cont,thumb,whole in match:
if '_h_condensed' in cond:
main.addPlayc(mname+' [COLOR blue]Home Condensed[/COLOR]',cond,396,thumb,'','','','','')
else:
main.addPlayc(mname+' [COLOR blue]Away Condensed[/COLOR]',cond,396,thumb,'','','','','')
if '_h_continuous' in cont:
main.addPlayc(mname+' [COLOR blue]Home Continuous[/COLOR]',cont,396,thumb,'','','','','')
else:
main.addPlayc(mname+' [COLOR blue]Away Continuous[/COLOR]',cont,396,thumb,'','','','','')
if '_h_whole' in whole:
main.addPlayc(mname+' [COLOR blue]Home Whole[/COLOR]',whole,396,thumb,'','','','','')
else:
main.addPlayc(mname+' [COLOR blue]Away Whole[/COLOR]',whole,396,thumb,'','','','','')
match2=re.compile('"away".+?"live":{"bitrate0":"([^"]+)"},.+?"image":"([^"]+)"',re.DOTALL).findall(link)
for live,thumb in match2:
main.addPlayc(mname+' [COLOR blue]Away Live[/COLOR]',live+'x0xe'+str(murl),396,thumb,'','','','','')
match3=re.compile('"home".+?"live":{"bitrate0":"([^"]+)"},.+?"image":"([^"]+)"',re.DOTALL).findall(link)
for live,thumb in match3:
main.addPlayc(mname+' [COLOR blue]Home LIVE[/COLOR]',live+'x0xe'+str(murl),396,thumb,'','','','','')
def LINK(mname,murl,thumb):
#main.GA(mname,"Watched")
ok=True
namelist=[]
urllist=[]
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
if '_whole' in murl:
link=main.OPENURL(murl)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','').replace(' ','')
part= re.findall('/([^/]+)ipad.mp4.m3u8',murl)[0]
match=re.compile('BANDWIDTH=.+?'+part+'(.+?)_ipad.mp4.m3u8',re.DOTALL).findall(link)
for band in sorted(match):
namelist.append(band)
dialog = xbmcgui.Dialog()
answer =dialog.select("Pick A Bandwidth", namelist)
if answer != -1:
nurl=murl.split('ipad.mp4.m3u8')[0]
stream_url=nurl+namelist[int(answer)]+'_ipad.mp4.m3u8'+'|User-Agent=PS4 libhttp/1.76 (PlayStation 4)'
else:
return
elif '/live/' in murl:
import subprocess
jarfile = xbmc.translatePath('special://home/addons/plugin.video.movie25/resources/libs/live/FuckNeulionV2.jar')
if 'Home' in mname:
Side='home'
if 'Away' in mname:
Side='away'
SelectGame=murl.split('x0xe')[1]
murl=murl.split('x0xe')[0]
startupinfo = None
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
command=['java','-jar',jarfile,SelectGame,Side]
proxy_hack_process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
startupinfo=startupinfo)
xbmc.sleep(1000)
link=main.OPENURL(murl)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','').replace(' ','')
part= re.findall('/([^/]+)ipad.m3u8',murl)[0]
match=re.compile('BANDWIDTH=.+?'+part+'(.+?)_ipad.m3u8',re.DOTALL).findall(link)
for band in sorted(match):
namelist.append(band)
dialog = xbmcgui.Dialog()
answer =dialog.select("Pick A Bandwidth", namelist)
if answer != -1:
nurl=murl.split('ipad.m3u8')[0]
stream_url=nurl+namelist[int(answer)]+'_ipad.m3u8'+'|User-Agent=PS4 libhttp/1.76 (PlayStation 4)'
else:
return
else:
stream_url = murl+'|User-Agent=PS4 libhttp/1.76 (PlayStation 4)'
listitem = xbmcgui.ListItem(thumbnailImage=thumb)
infoL={'Title': mname, 'Genre': 'Live'}
from resources.universal import playbackengine
player = playbackengine.PlayWithoutQueueSupport(resolved_url=stream_url, addon_id=addon_id, video_type='movie', title=mname,season='', episode='', year='',img=thumb,infolabels=infoL, watchedCallbackwithParams='',imdb_id='')
#WatchHistory
if selfAddon.getSetting("whistory") == "true":
from resources.universal import watchhistory
wh = watchhistory.WatchHistory('plugin.video.movie25')
wh.add_item(mname+' '+'[COLOR green]NHL[/COLOR]', sys.argv[0]+sys.argv[2], infolabels='', img=thumb, fanart='', is_folder=False)
return ok
| [
"[email protected]"
] | |
9a658ef90f5b3a387e050f4f3798f53757a8de54 | fca4b400cc9c08b028ee427e5bb66fc1441d226d | /vatsava/Gautham/get_data_test.py | 46b9ebb23cfe406baecf69d639c2bfbbf915c9ee | [] | no_license | vatsava-rac/RPibackup | 29db15864f97d7f7e10fcdbbb1b893a8522982d8 | db19ca031e99d7e797b384eab72d752489a1d3ac | refs/heads/master | 2022-04-24T17:48:03.866461 | 2020-04-21T11:01:31 | 2020-04-21T11:01:31 | 257,565,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,159 | py | import tkinter as tk # python 3
from tkinter import font as tkfont # python 3
from tkinter import *
class BSS_App(Tk):
def __init__(self):
Tk.__init__(self)
container = tk.Frame(self)
self.app_data = {"UUID": StringVar(),
"Connection_status": StringVar()
}
container.pack(side="top", fill="both", expand = True)
self.frames = {}
for F in (PageOne, PageTwo):
frame = F(container, self)
self.frames[F] = frame
frame.grid(row=0, column=0, sticky = NSEW)
self.show_frame(PageOne)
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
class PageOne(tk.Frame):
def __init__(self, parent, controller):
self.controller = controller
tk.Frame.__init__(self, parent)
tk.Label(self, text='PageOne').grid(padx=(20,20), pady=(20,20))
self.make_widget(controller)
def make_widget(self, controller):
self.some_input = StringVar
self.some_entry = tk.Entry(self, textvariable=self.controller.app_data["UUID"], width=8)
self.some_entry.grid()
button1 = tk.Button(self, text='Next Page',
command=lambda: controller.show_frame(PageTwo))
button1.grid()
class PageTwo(tk.Frame):
def __init__(self, parent, controller):
self.controller = controller
tk.Frame.__init__(self, parent)
tk.Label(self, text='PageTwo').grid(padx=(20,20), pady=(20,20))
button1 = tk.Button(self, text='Previous Page',
command=lambda: controller.show_frame(PageOne))
button1.grid()
button2 = tk.Button(self, text='press to print', command=self.print_it)
button2.grid()
def print_it(self):
value = self.controller.app_data["UUID"].get()
tk.Label(self, text="%s"% value).grid(padx=(30,30), pady=(30,30))
print(value)
#What do I put here
#to print the value of some_input from PageOne
app = BSS_App()
app.title('Multi-Page Test App')
app.mainloop() | [
"“[email protected]”"
] | |
923853a7f7ac4c46fa45ea951fc1ffaed37f58f2 | b667ef7f22f8141898917533ec0a3305dc461e6c | /wsgi_server/dynamic/mini_frame.py | 98463d55659246a3c6b1387d4e2facb220576fdc | [] | no_license | supermouse123/python-network_frame | 6c2b30cecf85fec7ac92371f3346678ffa7e84b2 | a9023051459c1ab845618ecb1fbe0c9b68eb87af | refs/heads/main | 2023-02-26T01:43:46.753288 | 2021-01-27T05:52:14 | 2021-01-27T05:52:14 | 333,292,775 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | # @Time : 2021/1/27
# @Author : sunyingqiang
# @Email : [email protected]
import re
URL_DICT = dict()
def route(url):
def set_func(func):
URL_DICT[url] = func
def wapper(*arg, **kwargs):
return func(*arg, **kwargs)
return wapper
return set_func
@route('/index.html')
def index():
return '这是主页'
@route('/login.html')
def login():
with open('./templates/qunee_test.html') as f:
conetent = f.read()
return conetent
@route(r'/add/\d+\.html')
def add():
return 'add ok .....'
def application(env, start_response):
start_response('200 OK', [('Content-Type', 'text/html; charset=UTF-8')])
file_name = env['PATH_INFO']
try:
for url, func in URL_DICT.items():
ret = re.match(url, file_name)
if ret:
return func()
# func = URL_DICT[file_name]
# return func()
except:
return '404 NOT FOUND' | [
"[email protected]"
] | |
9551d519b20dfcc4061ff956e357f5bdb2481c6d | 7cd8ee14711eaf33cee0d9e06e78a974fc579242 | /Linkedin/Linkedin/spiders/linkedin_distinct_12logincount.py | 73e600bb84ee61402c91c2f94e2db905b9cf883c | [] | no_license | Chandler-Song/pi | c618117dfdd9a7496a57c69f029851e94787f591 | aebc6d65b79ed43c66e7e1bf16d6d9f31b470372 | refs/heads/master | 2022-03-13T02:44:30.452673 | 2019-02-19T09:38:45 | 2019-02-19T09:38:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,482 | py | from linkedin_voyager_functions import *
class Companylidsurls(object):
def __init__(self, *args, **kwargs):
self.con, self.cur = get_mysql_connection(DB_HOST, 'FACEBOOK', '')
#self.qu1 = 'select profile_sk , connections_profile_url from linkedin_connections where date(modified_at)>"2017-04-17" and date(modified_at)<"2017-08-21" and member_id = "%s"'
self.qu1 = "select distinct member_id from linkedin_meta where date(modified_at) < '2017-08-20'"
self.qu2 = "select distinct member_id from linkedin_connections where date(modified_at) > '2017-08-20'"
self.query2 = "select connections_profile_url, member_id, sk from FACEBOOK.linkedin_connections where date(modified_at) >= '2017-08-20'"
self.excel_file_name = 'linkedin_connections_profiles_%s.csv'%str(datetime.datetime.now().date())
if os.path.isfile(self.excel_file_name):
os.system('rm %s'%self.excel_file_name)
oupf = open(self.excel_file_name, 'ab+')
self.todays_excel_file = csv.writer(oupf)
self.headers = ['Linkedin_Profile_url', 'member_id']
self.todays_excel_file.writerow(self.headers)
def main(self):
"""with open('duplicate_members', 'r') as f:
rows = f.readlines()
for inde, row in enumerate(rows):
row = row.strip('\n')
one_ = fetchmany(self.cur, self.qu1 % row)
pf_sk = '<>'.join([i[0] for i in one_])
pf_url = '<>'.join([i[0] for i in one_])
file("duplicate_member_info","ab+").write("%s, %s, %s\n" % (row, pf_sk, pf_url))"""
re1 = fetchall(self.cur, self.qu1)
re2 = fetchall(self.cur, self.qu2)
re2 = [str(i[0]) for i in re2]
re1 = [str(i[0]) for i in re1]
new_list = []
for i in re1:
if i in re2:
new_list.append(i)
print len(new_list)
total_distinct_list = []
total_connection_records = fetchall(self.cur, self.query2)
for tocr in total_connection_records:
linkedin_profilef, member_id, connection_sk = tocr
if member_id in new_list:
continue
total_distinct_list.append((linkedin_profilef, member_id))
print len(total_distinct_list), 'total_length'
print len(set(total_distinct_list)), 'total_distinct_lenth'
total_distinct_list = set(total_distinct_list)
for tdl in total_distinct_list:
lk_url, mem_id = tdl
values = [lk_url, mem_id]
values = [normalize(i) for i in values]
self.todays_excel_file.writerow(values)
if __name__ == '__main__':
Companylidsurls().main()
| [
"[email protected]"
] | |
da11800eb90e67c80528836a5601e3bda8546b4b | 7dcf487ae09659e3a711e08bd7a9057b1abc43b4 | /newcolumn.py | c9c8f80706e6b5c747358f9fc0ccca9964dc302b | [] | no_license | vaibhavbhat123/Reservoir-characterization | 6d769ce2b3e3067b5cd04df070b91df01ffddf10 | 5e61f4a57a6561106b8989574eff8ce58a580875 | refs/heads/master | 2020-05-18T17:45:22.638535 | 2019-06-05T16:33:18 | 2019-06-05T16:33:18 | 184,565,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | import pandas as pd
import numpy as np
data = pd.read_csv("E:/file7.csv")
print(data.head())
data['Probability'] = np.where(data['POR']>6,'yes','no')
print(data.head())
data.to_csv("E:/file97.csv")
| [
"[email protected]"
] | |
2ec6f9f43065ad0d28c012eb412b7a9fe4bbb343 | 5cc6203602d2d0dc89b9b1204e6f1e0ce4e17825 | /lafopafo/confirmed/6/prediction.py | 2f387dfe8f595a4441df5bf7857c6bb1f12d97c5 | [] | no_license | arezooh/COVID-19-prediction | 200dd520626fd280bd69d2ba7799d593207b3d67 | eb3edd1494b87abaf39c19e5fc0cdb10604e12e3 | refs/heads/main | 2023-04-15T06:17:23.817219 | 2021-04-26T21:49:46 | 2021-04-26T21:49:46 | 313,310,295 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 87,103 | py | from makeHistoricalData import makeHistoricalData
from models import GBM, GLM, KNN, NN, MM_GLM, GBM_grid_search, NN_grid_search, MM_NN
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.use('Agg')
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.model_selection import train_test_split
import seaborn as sns
from matplotlib import colors as mcolors
from pexecute.process import ProcessLoom
import time
from sys import argv
import sys
from math import floor, sqrt
import os
# import dill
import glob
import shutil
import zipfile
import email, smtplib, ssl
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import mimetypes
import subprocess as cmd
import shelve
import matplotlib.pyplot as plt
import random
import datetime
import statistics
import tensorflow as tf
from numpy.random import seed
seed(1)
tf.random.set_seed(1)
plt.rcParams.update({'figure.max_open_warning': 0})
pivot = 'country'
r = 6 * 7 # the following day to predict
numberOfSelectedCounties = -1
target_mode = 'weeklyaverage'
spatial_mode = 'country'
numberOfSelectedCountiesname = 1535
push_flag = 0
# set the size of test set. validation and train sets will have 30/70 proportion from the remaining days (optional),
# the default values are |test_set| = |val_set| = r, |train_set| = the remaining days
test_size = 21
# maxHistory = 2 * 7
maxHistory = min((19 * 7 - ((2*r) -7) - ((int(argv[1]) - 6) * 7)), 5 * 7)
maxC = 100 # maximum number of covariates to be considered
data_address = (os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd())))+'/data/').replace('\\','/')
######################################################### split data to train, val, test
def splitData(numberOfCounties, main_data, target, spatial_mode, mode):
numberOfCounties = len(main_data['county_fips'].unique())
main_data = main_data.sort_values(by=['date of day t', 'county_fips'])
target = target.sort_values(by=['date of day t', 'county_fips'])
# we set the base number of days to the minimum number of days existed between the counties
# and then compute the validation size for the non-default state.
baseNumberOfDays = (main_data.groupby(['county_fips']).size()).min()
test_size = r
if target_mode == 'weeklyaverage':
test_size = 1
val_size = 1
# val_size = round(0.3 * (baseNumberOfDays - test_size))
else:
test_size = 21 #
val_size = 1
# val_size = round(0.3 * (baseNumberOfDays - test_size))
if mode == 'val':
# if not future_mode: # the default state
# X_train_train = main_data.iloc[:-2 * (r * numberOfCounties), :].sort_values(
# by=['county_fips', 'date of day t'])
# X_train_val = main_data.iloc[-2 * (r * numberOfCounties):-(r * numberOfCounties), :].sort_values(
# by=['county_fips', 'date of day t'])
# X_test = main_data.tail(r * numberOfCounties).sort_values(by=['county_fips', 'date of day t'])
# y_train_train = target.iloc[:-2 * (r * numberOfCounties), :].sort_values(
# by=['county_fips', 'date of day t'])
# y_train_val = target.iloc[-2 * (r * numberOfCounties):-(r * numberOfCounties), :].sort_values(
# by=['county_fips', 'date of day t'])
# y_test = target.tail(r * numberOfCounties).sort_values(by=['county_fips', 'date of day t'])
# else:
X_test = main_data.tail(test_size * numberOfCounties).copy()
X_train_val = main_data.iloc[:-((test_size + r-1) * numberOfCounties)].tail(val_size * numberOfCounties).copy()
X_train_train = main_data.iloc[:-((val_size + test_size + r-1) * numberOfCounties)].copy()
y_test = target.tail(test_size * numberOfCounties).copy()
y_train_val = target.iloc[:-((test_size + r-1) * numberOfCounties)].tail(val_size * numberOfCounties).copy()
y_train_train = target.iloc[:-((val_size + test_size + r-1) * numberOfCounties)].copy()
return X_train_train, X_train_val, X_test, y_train_train, y_train_val, y_test
if mode == 'test':
# if not future_mode:
# X_train = main_data.iloc[:-(r * numberOfCounties), :].sort_values(by=['county_fips', 'date of day t'])
# X_test = main_data.tail(r * numberOfCounties).sort_values(by=['county_fips', 'date of day t'])
# y_train = target.iloc[:-(r * numberOfCounties), :].sort_values(by=['county_fips', 'date of day t'])
# y_test = target.tail(r * numberOfCounties).sort_values(by=['county_fips', 'date of day t'])
# else:
X_test = main_data.tail(test_size * numberOfCounties).copy()
X_train = main_data.iloc[:-((test_size + r-1) * numberOfCounties)].copy()
y_test = target.tail(test_size * numberOfCounties).copy()
y_train = target.iloc[:-((test_size + r-1) * numberOfCounties)]
return X_train, X_test, y_train, y_test
########################################################### clean data
def clean_data(data, numberOfSelectedCounties, spatial_mode):
global numberOfDays
data = data.sort_values(by=['county_fips', 'date of day t'])
# select the number of counties we want to use
# numberOfSelectedCounties = numberOfCounties
if numberOfSelectedCounties == -1:
numberOfSelectedCounties = len(data['county_fips'].unique())
using_data = data[(data['county_fips'] <= data['county_fips'].unique()[numberOfSelectedCounties - 1])]
using_data = using_data.reset_index(drop=True)
if (spatial_mode == 'county') or (spatial_mode == 'country'):
if pivot == 'county':
main_data = using_data.drop(['county_name', 'state_fips', 'state_name'],
axis=1) # , 'date of day t'
elif pivot == 'state':
main_data = using_data.drop(['county_name'],
axis=1) # , 'date of day t'
elif pivot == 'country':
main_data = using_data
elif (spatial_mode == 'state'):
main_data = using_data.drop(['county_name', 'state_name'],
axis=1)
numberOfDays = len(using_data['date of day t'].unique())
return main_data
########################################################### preprocess
def preprocess(main_data, spatial_mode, validationFlag):
if spatial_mode == 'state':
target = pd.DataFrame(main_data[['date of day t', 'county_fips', 'state_fips', 'Target']])
else:
target = pd.DataFrame(main_data[['date of day t', 'county_fips', 'Target']])
main_data = main_data.drop(['Target'], axis=1)
# produce train, validation and test data
if validationFlag: # validationFlag is 1 if we want to have a validation set and 0 otherwise
X_train_train, X_train_val, X_test, y_train_train, y_train_val, y_test = splitData(numberOfSelectedCounties,
main_data, target,
spatial_mode, 'val')
return X_train_train, X_train_val, X_test, y_train_train, y_train_val, y_test
else:
X_train, X_test, y_train, y_test = splitData(numberOfSelectedCounties, main_data, target, spatial_mode, 'test')
return X_train, X_test, y_train, y_test
################################ MASE_denominator
def mase_denominator(r, h, data, target_name, target_mode, numberOfSelectedCounties, spatial_mode):
if numberOfSelectedCounties == -1:
numberOfSelectedCounties = len(data['county_fips'].unique())
X_train_train, X_train_val, X_test, y_train_train_date, y_train_val_date, y_test_date = preprocess(data,
spatial_mode, 1)
train_val_df = (y_train_train_date.append(y_train_val_date).reset_index(drop=True)).sort_values(by=['date of day t', 'county_fips'])
val_test_df = (train_val_df.append(y_test_date).reset_index(drop=True)).sort_values(by=['date of day t', 'county_fips'])
train_val = train_val_df.tail(len(y_train_train_date))
train_train = train_val_df.iloc[:-r,:].tail(len(y_train_train_date))
test = val_test_df.tail(len(y_test_date))
train = val_test_df.iloc[:-r,:].tail(len(y_test_date))
train = train.tail(len(test)).rename(columns={'Target': 'train-Target', 'date of day t': 'train-date'})
train_train = train_train.tail(len(train_val)).rename(
columns={'Target': 'train-Target', 'date of day t': 'train-date'})
train_val = train_val.rename(columns={'Target': 'val-Target', 'date of day t': 'val-date'})
test = test.rename(columns={'Target': 'test-Target', 'date of day t': 'test-date'})
df_for_train_val_MASE_denominator = pd.concat(
[train_train.reset_index(drop=True), train_val.reset_index(drop=True)], axis=1)
df_for_train_val_MASE_denominator['absolute-error'] = abs(df_for_train_val_MASE_denominator['val-Target'] -
df_for_train_val_MASE_denominator['train-Target'])
df_for_val_test_MASE_denominator = pd.concat([train.reset_index(drop=True), test.reset_index(drop=True)], axis=1)
df_for_val_test_MASE_denominator['absolute-error'] = abs(df_for_val_test_MASE_denominator['test-Target'] -
df_for_val_test_MASE_denominator['train-Target'])
train_val_MASE_denominator = df_for_train_val_MASE_denominator['absolute-error'].mean()
val_test_MASE_denominator = df_for_val_test_MASE_denominator['absolute-error'].mean()
# we need to have mase denominator based on target values for whole country (sum of target for all counties)
# this will be used for calculation of country error
df_for_train_val_MASE_denominator_country = df_for_train_val_MASE_denominator.groupby(['val-date']).sum()
df_for_train_val_MASE_denominator_country['absolute-error'] = abs(
df_for_train_val_MASE_denominator_country['val-Target'] -
df_for_train_val_MASE_denominator_country['train-Target'])
df_for_val_test_MASE_denominator_country = df_for_val_test_MASE_denominator.groupby(['test-date']).sum()
df_for_val_test_MASE_denominator_country['absolute-error'] = abs(
df_for_val_test_MASE_denominator_country['test-Target'] -
df_for_val_test_MASE_denominator_country['train-Target'])
train_val_MASE_denominator_country = df_for_train_val_MASE_denominator_country['absolute-error'].mean()
val_test_MASE_denominator_country = df_for_val_test_MASE_denominator_country['absolute-error'].mean()
return train_val_MASE_denominator, val_test_MASE_denominator, train_val_MASE_denominator_country, val_test_MASE_denominator_country
########################################################### run non-mixed methods in parallel
def parallel_run(method, X_train_train, X_train_val, y_train_train, y_train_val, best_loss, c):
y_prediction, y_prediction_train = None, None
if method == 'GBM':
y_prediction, y_prediction_train = GBM(X_train_train, X_train_val, y_train_train, best_loss['GBM'])
elif method == 'GLM':
y_prediction, y_prediction_train = GLM(X_train_train, X_train_val, y_train_train)
elif method == 'KNN':
y_prediction, y_prediction_train = KNN(X_train_train, X_train_val, y_train_train)
elif method == 'NN':
y_prediction, y_prediction_train = NN(X_train_train, X_train_val, y_train_train, y_train_val, best_loss['NN'])
return y_prediction, y_prediction_train
########################################################### run mixed methods in parallel
def mixed_parallel_run(method, X_train, X_test, y_train, y_test, best_loss):
y_prediction, y_prediction_train = None, None
if method == 'MM_GLM':
y_prediction, y_prediction_train = MM_GLM(X_train, X_test, y_train)
elif method == 'MM_NN':
y_prediction, y_prediction_train = MM_NN(X_train, X_test, y_train, y_test, best_loss[method])
return y_prediction, y_prediction_train
########################################################### run algorithms in parallel except mixed models
def run_algorithms(X_train_dict, X_val_dict, y_train_dict, y_val_dict, best_loss, c, spatial_mode, county_fips):
from models import GBM, GLM, KNN, NN
t1 = time.time()
methods = ['GBM', 'GLM', 'KNN', 'NN']
X_train = {method: None for method in methods}
X_val = {method: None for method in methods}
y_train = {method: None for method in methods}
y_val = {method: None for method in methods}
loom = ProcessLoom(max_runner_cap=4)
# add the functions to the multiprocessing object, loom
if spatial_mode == 'country':
for method in methods:
X_train[method] = X_train_dict[method].drop(['county_fips', 'date of day t'], axis=1)
X_val[method] = X_val_dict[method].drop(['county_fips', 'date of day t'], axis=1)
y_train[method] = np.array(y_train_dict[method]['Target']).reshape(-1)
y_val[method] = np.array(y_val_dict[method]['Target']).reshape(-1)
loom.add_function(GBM, [X_train['GBM'], X_val['GBM'], y_train['GBM'], best_loss['GBM']], {})
loom.add_function(GLM, [X_train['GLM'], X_val['GLM'], y_train['GLM']], {})
loom.add_function(KNN, [X_train['KNN'], X_val['KNN'], y_train['KNN']], {})
loom.add_function(NN, [X_train['NN'], X_val['NN'], y_train['NN'], y_val['NN'], best_loss['NN']], {})
if spatial_mode == 'county':
for method in methods:
X_train[method] = X_train_dict[method]
X_train[method] = X_train[method][X_train[method]['county_fips'] == county_fips].drop(
['county_fips', 'date of day t'], axis=1)
X_val[method] = X_val_dict[method]
X_val[method] = X_val[method][X_val[method]['county_fips'] == county_fips].drop(
['county_fips', 'date of day t'], axis=1)
y_train[method] = y_train_dict[method]
y_train[method] = y_train[method][y_train[method]['county_fips'] == county_fips].drop(
['county_fips', 'date of day t'], axis=1)
y_val[method] = y_val_dict[method]
y_val[method] = y_val[method][y_val[method]['county_fips'] == county_fips].drop(
['county_fips', 'date of day t'], axis=1)
y_train[method] = np.array(y_train[method]['Target']).reshape(-1)
y_val[method] = np.array(y_val[method]['Target']).reshape(-1)
loom.add_function(GBM, [X_train['GBM'], X_val['GBM'], y_train['GBM'], best_loss['GBM']], {})
loom.add_function(GLM, [X_train['GLM'], X_val['GLM'], y_train['GLM']], {})
loom.add_function(KNN, [X_train['KNN'], X_val['KNN'], y_train['KNN']], {})
loom.add_function(NN, [X_train['NN'], X_val['NN'], y_train['NN'], y_val['NN'], best_loss['NN']], {})
# run the processes in parallel
output = loom.execute()
t2 = time.time()
print('total time - run algorithms: ', t2 - t1)
return output[0]['output'], output[1]['output'], output[2]['output'], output[3]['output']
########################################################### run mixed models in parallel
def run_mixed_models(X_train_MM, X_test_MM, y_train_MM, y_test_MM, best_loss):
from models import GBM, GLM, KNN, NN, MM_GLM
t1 = time.time()
loom = ProcessLoom(max_runner_cap=2)
# add the functions to the multiprocessing object, loom
loom.add_function(MM_GLM, [X_train_MM['MM_GLM'], X_test_MM['MM_GLM'], y_train_MM['MM_GLM']], {})
loom.add_function(MM_NN, [X_train_MM['MM_NN'], X_test_MM['MM_NN'], y_train_MM['MM_NN'], y_test_MM['MM_NN'],
best_loss['MM_NN']], {})
# run the processes in parallel
output = loom.execute()
t2 = time.time()
print('total time - run mixed models: ', t2 - t1)
return output[0]['output'], output[1]['output']
####################################################################### update best loss
def update_best_loss(model_type, spatial_mode, county_fips, best_loss, X_train_train_to_use, X_train_val_to_use,
y_train_train, \
y_train_val, y_prediction_train, y_prediction, covariates, \
numberOfCovariates, max_c):
h = 1
if model_type == 'mixed_model':
loom = ProcessLoom(max_runner_cap=1)
c = numberOfCovariates
if numberOfCovariates > max_c:
c = max_c
y_predictions_test, y_predictions_train = [], []
if spatial_mode == 'county':
# Construct the outputs for the testing dataset of the 'MM' methods
y_predictions_test.extend(
[y_prediction[county_fips]['GBM'][(h, c)], y_prediction[county_fips]['GLM'][(h, c)],
y_prediction[county_fips]['KNN'][(h, c)], y_prediction[county_fips]['NN'][(h, c)]])
elif spatial_mode == 'country':
y_predictions_test.extend([y_prediction['GBM'][(h, c)], y_prediction['GLM'][(h, c)],
y_prediction['KNN'][(h, c)], y_prediction['NN'][(h, c)]])
y_prediction_test_np = np.array(y_predictions_test).reshape(len(y_predictions_test), -1)
X_test_mixedModel = pd.DataFrame(y_prediction_test_np.transpose())
if spatial_mode == 'county':
# Construct the outputs for the training dataset of the 'MM' methods
y_predictions_train.extend(
[y_prediction_train[county_fips]['GBM'][(h, c)], y_prediction_train[county_fips]['GLM'][(h, c)],
y_prediction_train[county_fips]['KNN'][(h, c)], y_prediction_train[county_fips]['NN'][(h, c)]])
elif spatial_mode == 'country':
y_predictions_train.extend([y_prediction_train['GBM'][(h, c)], y_prediction_train['GLM'][(h, c)],
y_prediction_train['KNN'][(h, c)], y_prediction_train['NN'][(h, c)]])
y_prediction_train_np = np.array(y_predictions_train).reshape(len(y_predictions_train), -1)
X_train_mixedModel = pd.DataFrame(y_prediction_train_np.transpose())
loom.add_function(NN_grid_search, [X_train_mixedModel, y_train_train, X_test_mixedModel, y_train_val], {})
best_loss_output = loom.execute()
best_loss['MM_NN'] = best_loss_output[0]['output']
if model_type == 'none_mixed_model':
print('check 292')
loom = ProcessLoom(max_runner_cap=2)
if spatial_mode == 'country':
loom.add_function(GBM_grid_search, [X_train_train_to_use['GBM'][covariates],
y_train_train, X_train_val_to_use['GBM'][covariates],
y_train_val], {})
loom.add_function(NN_grid_search, [X_train_train_to_use['NN'][covariates],
y_train_train, X_train_val_to_use['NN'][covariates],
y_train_val], {})
if spatial_mode == 'county':
loom.add_function(GBM_grid_search, [X_train_train_to_use[county_fips][h]['GBM'][covariates],
y_train_train, X_train_val_to_use[county_fips][h]['GBM'][covariates],
y_train_val], {})
loom.add_function(NN_grid_search, [X_train_train_to_use[county_fips][h]['NN'][covariates],
y_train_train, X_train_val_to_use[county_fips][h]['NN'][covariates],
y_train_val], {})
best_loss_output = loom.execute()
best_loss['GBM'], best_loss['NN'] = best_loss_output[0]['output'], best_loss_output[1]['output']
return best_loss
###########################################################
def get_best_loss_mode(counties_best_loss_list):
methods_with_loss = ['GBM', 'NN', 'MM_NN']
best_loss = {method: None for method in methods_with_loss}
for method in methods_with_loss:
counties_best_loss_array = np.array(counties_best_loss_list[method])
# when we choose number_of_selected_counties smaller than number of different losses
# some times its not possibel to find mode
if len(np.unique(counties_best_loss_array)) == len(counties_best_loss_array):
best_loss[method] = random.choice(counties_best_loss_list[method])
else:
best_loss[method] = statistics.mode(counties_best_loss_list[method])
return (best_loss)
########################################################### generate data for best h and c
def generate_data(h, numberOfCovariates, covariates_names, numberOfSelectedCounties):
data = makeHistoricalData(h, r, test_size, 'confirmed', 'mrmr', spatial_mode, target_mode, data_address, future_features, pivot, int(argv[1]))
data = clean_data(data, numberOfSelectedCounties, spatial_mode)
X_train, X_test, y_train, y_test = preprocess(data, spatial_mode, 0)
covariates = [covariates_names[i] for i in range(numberOfCovariates)]
best_covariates = force_features.copy()
indx_c = 0
for covar in covariates: # iterate through sorted covariates
indx_c += 1
for covariate in data.columns: # add all historical covariates of this covariate and create a feature
pure_name = covar.split(' ')[0]
cov_temp = covariate.split(' ')[0]
if pure_name == cov_temp and pure_name not in force_features:
best_covariates.append(covariate)
best_covariates += ['county_fips',
'date of day t'] # we add this two columns to use when we want break data to county_data
X_train = X_train[best_covariates]
X_test = X_test[best_covariates]
return X_train, X_test, y_train, y_test
########################################################### plot validation results
def plot_results(row, col, numberOfCovariates, methods, history, errors, mode):
mpl.style.use('seaborn')
plt.rc('font', size=20)
fig, ax = plt.subplots(row, col, figsize=(40, 40))
colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)
# Sort colors by hue, saturation, value and name.
by_hsv = sorted((tuple(mcolors.rgb_to_hsv(mcolors.to_rgba(color)[:3])), name)
for name, color in colors.items())
sorted_names = [name for hsv, name in by_hsv]
colorset = set(sorted_names[::-1])
for item in colorset:
if ('white' in item) or ('light' in item):
colorset = colorset - {item}
colors = list(colorset - {'lavenderblush', 'aliceblue', 'lavender', 'azure',
'mintcream', 'honeydew', 'beige', 'ivory', 'snow', 'w'})
# colors = ['tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan',
# 'tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple']
ind = 0
for i in range(row):
for j in range(col):
color = 0
for h in history:
errors_h = []
# x label: covariates
covariates_list = [c for c in range(1, numberOfCovariates + 1)][:maxC]
# y label: errors
for c in range(1, numberOfCovariates + 1):
errors_h.append(errors[methods[ind]][(h, c)])
if c == maxC:
break
ax[i, j].plot(covariates_list, errors_h, colors[color * 2], label="h = " + str(h))
ax[i, j].set_xlabel("Number Of Covariates")
ax[i, j].set_ylabel(mode)
ax[i, j].set_title(str(methods[ind]))
ax[i, j].legend()
ax[i, j].set_xticks(covariates_list)
color += 1
ind += 1
address = validation_address + 'plots_of_errors/'
if not os.path.exists(address):
os.makedirs(address)
plt.savefig(address + str(mode) + '.pdf')
########################################################### plot table for final results
def plot_table(table_data, col_labels, row_labels, name, mode):
fig = plt.figure() # dpi=50 figsize=(30, 10)
ax = fig.add_subplot(111)
colWidths = [0.1, 0.1, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25]
address = ''
if mode == 'val':
# colWidths.pop()
address = validation_address + 'tables/'
if not os.path.exists(address):
os.makedirs(address)
else:
address = test_address + 'tables/'
if not os.path.exists(address):
os.makedirs(address)
the_table = plt.table(cellText=table_data,
colWidths=colWidths,
rowLabels=row_labels,
colLabels=col_labels,
loc='center',
cellLoc='center')
the_table.auto_set_font_size(False)
the_table.set_fontsize(9)
the_table.scale(1.5, 1.5)
ax.axis('off')
plt.savefig(address + name + '.pdf', bbox_inches='tight')
csv_table = pd.DataFrame(table_data, columns=col_labels)
csv_table['method'] = list(row_labels)
csv_table.to_csv(address + name +'.csv', index = False)
########################################################### plotting mean errors (first error)
def plot_targets(method, x_axis, df, main_address):
mpl.style.use('default')
plt.rc('font', size=40)
fig, ax = plt.subplots(figsize=(60, 20))
ax.plot(x_axis, df['average of targets'], label='Target')
ax.plot(x_axis, df['average of predictions'], label='Prediction')
ax.set_xlabel('date', fontsize=40)
ax.set_ylabel('real and predicted targets for ' + str(method), fontsize=40)
ax.legend()
address = main_address + 'procedure_of_prediction/'
if not os.path.exists(address):
os.makedirs(address)
plt.savefig(address + 'procedure_' + str(method) + '.pdf')
########################################################### box plots and violin plots
def box_violin_plot(X, Y, figsizes, fontsizes, name, address):
mpl.style.use('default')
# box plot
fig = plt.figure(figsize=figsizes['box'])
plt.rc('font', size=fontsizes['box'])
plt.locator_params(axis='y', nbins=20)
sns.boxplot(x=X, y=Y)
plt.savefig(address + str(name) + 'boxplot.pdf')
plt.close()
# violin plot
fig = plt.figure(figsize=figsizes['violin'])
plt.rc('font', size=fontsizes['violin'])
plt.locator_params(axis='y', nbins=20)
sns.violinplot(x=X, y=Y)
plt.savefig(address + str(name) + 'violinplot.pdf')
plt.close()
########################################################### plot prediction and real values
def real_prediction_plot(df, r, test_size, target_name, target_mode, best_h, maxHistory, spatial_mode, methods,
future_mode, numberOfSelectedCounties):
address = test_address + 'plots_of_real_prediction_values/'
if not os.path.exists(address):
os.makedirs(address)
if target_mode == 'weeklyaverage':
label_prefix = 'Weekly averaged \n n'
elif target_mode == 'weeklymovingaverage':
label_prefix = 'weekly moving averaged \n n'
elif target_mode == 'differential':
label_prefix = 'differential \n n'
elif target_mode == 'logarithmic':
label_prefix = 'logarithmic \n n'
elif target_mode == 'cumulative':
label_prefix = 'cumulative \n n'
else:
label_prefix = 'N'
if target_name == 'confirmed':
label_suffix = 'cases'
else:
label_suffix = 's'
for method in methods:
method_prediction_df = df[method] # this df contain real and predicted target values
if pivot == 'county':
county_name_df = pd.read_csv(data_address + 'fixed-data.csv')[
['county_fips', 'county_name']] # we need county names for plot label
elif pivot == 'state':
county_name_df = pd.read_csv(data_address + 'fixed-data.csv')[
['state_fips', 'state_name']] # we need county names for plot label
county_name_df.rename(columns={'state_fips': 'county_fips', 'state_name': 'county_name'},
inplace=True)
county_name_df.drop_duplicates(subset=["county_fips", "county_name"],
keep='first', inplace=True)
df_for_plot = pd.merge(method_prediction_df, county_name_df, how='left')
if target_mode != 'weeklyaverage':
df_for_plot['date'] = df_for_plot['date of day t'].apply(
lambda x: datetime.datetime.strptime(x, '%m/%d/%y') + datetime.timedelta(days=r))
df_for_plot['weekday'] = df_for_plot['date'].apply(lambda x: x.weekday())
df_for_plot['date'] = df_for_plot['date'].apply(lambda x: datetime.datetime.strftime(x, '%m/%d/%y'))
else:
df_for_plot['date'] = df_for_plot['date of day t'].apply(lambda x: 'week ' + str(x + r))
df_for_plot.loc[df_for_plot['prediction'] < 0, 'prediction'] = 0
counties = []
for i in [36061, 40117, 51059]: # newyork + two random county
if len(df_for_plot[df_for_plot['county_fips'] == i]) > 0:
counties.append(i)
else:
counties = counties + random.sample(df_for_plot['county_fips'].unique().tolist(), 1)
length = list()
for county in counties:
length.append(len(df_for_plot[df_for_plot['county_fips'] == county]))
plot_with = max(length) + 20
fig, ax = plt.subplots(figsize=(plot_with, 75))
mpl.style.use('default')
plt.rc('font', size=45)
for index, county in enumerate(counties):
plt.subplot(311 + index)
county_df_for_plot = df_for_plot.loc[df_for_plot['county_fips'] == county]
plt.plot(county_df_for_plot['date'][:-(r - 1)],
county_df_for_plot['prediction'].round()[:-(r - 1)],
label='Train prediction', color='forestgreen', linewidth=2.0)
plt.plot(county_df_for_plot['date'][-r:],
county_df_for_plot['prediction'].round()[-r:],
label='Test prediction', color='dodgerblue', linewidth=2.0)
plt.plot(county_df_for_plot['date'],
county_df_for_plot['Target'].round(), label='Real values',
color='black', linewidth=2.0)
# if target_mode != 'cumulative':
# plt.plot(county_df_for_plot['date'][-r:],county_df_for_plot['Target'].round()[-(2*r):-r],'-.',color='gray',label='Naive prediction',linewidth=2.0)
if target_mode != 'weeklyaverage':
county_df_for_plot = county_df_for_plot.reset_index(drop=True)
weekend_index = county_df_for_plot[county_df_for_plot['weekday'].isin([5, 6])].index
for i in weekend_index:
plt.gca().get_xticklabels()[i].set_color("red")
plt.xticks(rotation=65)
fig.subplots_adjust(hspace=0.4)
plt.ylabel(label_prefix + 'umber of ' + target_name + label_suffix)
countyname = df_for_plot.loc[df_for_plot['county_fips'] == county, 'county_name'].unique()
plt.title(df_for_plot.loc[df_for_plot['county_fips'] == county, 'county_name'].unique()[0])
plt.legend()
plt.xlabel('Date')
plt.savefig(address + str(method) + ' real_prediction_values.pdf')
plt.close()
########################################################### get errors for each model in each h and c
def get_errors(h, c, method, y_prediction, y_prediction_train, y_test_date, y_train_date, regular_data,
MASE_denominator, numberOfSelectedCounties, target_name, mode):
# y_test_date and y_train_date are a dataframes with columns ['date of day t', 'county_fips', 'Target']
# set negative predictions to zero
y_prediction[y_prediction < 0] = 0
# country_errors show error for prediction of target variable for whole country
country_errors = {error: None for error in
['meanAbsoluteError', 'percentageOfAbsoluteError', 'adj_r_squared', 'second_error', 'MASE']}
# state_errors = {state_fips: {error: None
# for error in ['meanAbsoluteError', 'percentageOfAbsoluteError', 'adj_r_squared', 'second_error', 'MASE']}
# for state_fips in [1, 2]}
# next 8 lines sort y_prediction and y_prediction_train like output of preprocess function
# we need to sort predictions because in county and state mode their order may be cluttered
y_train_date['prediction'] = y_prediction_train
y_train_date = y_train_date.sort_values(by=['county_fips', 'date of day t'])
y_prediction_train = list(y_train_date['prediction'])
y_train_date = y_train_date.drop(['prediction'], axis=1)
y_test_date['prediction'] = y_prediction
y_test_date = y_test_date.sort_values(by=['county_fips', 'date of day t'])
y_prediction = list(y_test_date['prediction'])
y_test_date = y_test_date.drop(['prediction'], axis=1)
y_test = np.array(y_test_date['Target']).reshape(-1)
if numberOfSelectedCounties == -1:
numberOfSelectedCounties = len(y_test_date['county_fips'].unique())
# we need data with regular target to return modified target to its original state
# in validation mode we read regular data in main function and passed to get_error to avoid redundancy
# but in test mode its not possible because each method has different h(best_h)
if mode == 'test':
regular_data = makeHistoricalData(h, r, test_size, target_name, 'mrmr', spatial_mode, 'regular', data_address,
future_features, pivot, int(argv[1]))
regular_data = clean_data(regular_data, numberOfSelectedCounties, spatial_mode)
temp_1, temp_2, regular_y_train_date, regular_y_test_date = preprocess(regular_data, spatial_mode, 0)
if mode == 'val':
temp_1, temp_2, temp_3, regular_y_train_date, regular_y_test_date, temp_4 = preprocess(regular_data,
spatial_mode, 1)
# if target mode is cumulative we need to return the target variable to its original state
if target_mode == 'cumulative':
cumulative_data = y_train_date.append(y_test_date)
cumulative_data['prediction'] = list(y_train_date['Target']) + list(y_prediction)
cumulative_data = cumulative_data.sort_values(by=['date of day t', 'county_fips'])
reverse_dates = cumulative_data['date of day t'].unique()[-(r + 1):][::-1]
for index in range(len(reverse_dates)):
date = reverse_dates[index]
past_date = reverse_dates[index + 1]
cumulative_data.loc[cumulative_data['date of day t'] == date, 'Target'] = list(
np.array(cumulative_data.loc[cumulative_data['date of day t'] == date, 'Target']) - np.array(
cumulative_data.loc[cumulative_data['date of day t'] == past_date, 'Target']))
cumulative_data.loc[cumulative_data['date of day t'] == date, 'prediction'] = list(
np.array(cumulative_data.loc[cumulative_data['date of day t'] == date, 'prediction']) - np.array(
cumulative_data.loc[cumulative_data['date of day t'] == past_date, 'prediction']))
if index == len(reverse_dates) - 2:
break
cumulative_data = cumulative_data.sort_values(by=['date of day t', 'county_fips'])
y_test_date = cumulative_data.tail(r * numberOfSelectedCounties)
y_test = np.array(y_test_date['Target']).reshape(-1)
y_prediction = np.array(cumulative_data.tail(r * numberOfSelectedCounties)['prediction']).reshape(-1)
# if target mode is logarithmic we need to return the target variable to its original state
if target_mode == 'logarithmic':
y_test = np.array(np.round(np.exp(y_test) - 1)).reshape(-1)
y_test_date['Target'] = list(np.round(np.exp(y_test_date['Target']) - 1))
y_prediction = np.array(np.exp(y_prediction) - 1).reshape(-1)
# if target mode is moving average we need to return the target variable to its original state
if target_mode == 'weeklymovingaverage':
# past values of targets that will be use for return the weeklymovingaverage target (predicted)
# to original state to calculate errors
regular_real_predicted_target = regular_y_train_date.append(
regular_y_test_date) # dataframe with columns ['date of day t', 'county_fips', 'Target']
regular_real_predicted_target['prediction'] = list(regular_y_train_date['Target']) + list(y_prediction)
regular_real_predicted_target = regular_real_predicted_target.sort_values(by=['date of day t', 'county_fips'])
regular_real_predicted_target = regular_real_predicted_target.tail((r + 6) * numberOfSelectedCounties)
dates = regular_real_predicted_target['date of day t'].unique()
for index in range(len(dates)):
ind = index + 6
date = dates[ind]
regular_real_predicted_target.loc[
regular_real_predicted_target['date of day t'] == date, 'prediction'] = list(7 * np.array(
regular_real_predicted_target.loc[
regular_real_predicted_target['date of day t'] == date, 'prediction']))
for i in range(6):
past_date = dates[ind - (i + 1)]
regular_real_predicted_target.loc[
regular_real_predicted_target['date of day t'] == date, 'prediction'] = list(np.array(
regular_real_predicted_target.loc[
regular_real_predicted_target['date of day t'] == date, 'prediction']) - np.array(
regular_real_predicted_target.loc[
regular_real_predicted_target['date of day t'] == past_date, 'prediction']))
if ind == len(dates) - 1:
break
y_test_date = regular_real_predicted_target.tail(r * numberOfSelectedCounties)
y_test = np.array(y_test_date['Target']).reshape(-1)
y_prediction = np.array(regular_real_predicted_target.tail(r * numberOfSelectedCounties)['prediction']).reshape(
-1)
# if target mode is differential we need to return the target variable to its original state
if target_mode == 'differential':
# past values of targets that will be use for return the differential target (predicted)
# to original state to calculate errors
regular_real_predicted_target = regular_y_train_date.append(
regular_y_test_date) # dataframe with columns ['date of day t', 'county_fips', 'Target']
regular_real_predicted_target['prediction'] = list(regular_y_train_date['Target']) + list(y_prediction)
regular_real_predicted_target = regular_real_predicted_target.sort_values(by=['date of day t', 'county_fips'])
regular_real_predicted_target = regular_real_predicted_target.tail((r + 1) * numberOfSelectedCounties)
dates = regular_real_predicted_target['date of day t'].unique()
for index in range(len(dates)):
date = dates[index + 1]
past_date = dates[index]
regular_real_predicted_target.loc[
regular_real_predicted_target['date of day t'] == date, 'prediction'] = list(np.array(
regular_real_predicted_target.loc[
regular_real_predicted_target['date of day t'] == date, 'prediction']) + np.array(
regular_real_predicted_target.loc[
regular_real_predicted_target['date of day t'] == past_date, 'prediction']))
if index == len(dates) - 2:
break
y_test_date = regular_real_predicted_target.tail(r * numberOfSelectedCounties)
y_test = np.array(y_test_date['Target']).reshape(-1)
y_prediction = np.array(regular_real_predicted_target.tail(r * numberOfSelectedCounties)['prediction']).reshape(
-1)
# make predictions rounded to their closest number
y_prediction = np.array(y_prediction)
if target_mode != 'weeklyaverage':
y_prediction = np.round(y_prediction)
# for calculating the country error we must sum up all the county's target values to get country target value
y_test_date['prediction'] = y_prediction
# y_test_date['state_fips'] = y_test_date['county_fips'].map(lambda x: int(str(x)[:2]) if len(str(x)) == 5 else int(str(x)[:1]))
# print(150 * '*')
# print(y_test_date.shape)
# print(y_test_date.columns.values)
# print(y_test_date.tail())
# print(150 * '*')
y_test_date_country = y_test_date.groupby(['date of day t']).sum()
y_test_country = np.array(y_test_date_country['Target']).reshape(-1)
y_prediction_country = np.array(y_test_date_country['prediction']).reshape(-1)
#############################################################
# write outputs into a file
orig_stdout = sys.stdout
f = open(env_address + 'out.txt', 'a')
sys.stdout = f
meanAbsoluteError = mean_absolute_error(y_test, y_prediction)
print("Mean Absolute Error of ", method, " for h =", h, "and #covariates =", c, ": %.2f" % meanAbsoluteError)
sumOfAbsoluteError = sum(abs(y_test - y_prediction))
percentageOfAbsoluteError = np.mean((abs(y_test - y_prediction)/y_test)*100)
#(sumOfAbsoluteError / sum(y_test)) * 100
# we change zero targets into 1 and add 1 to their predictions
y_test_temp = y_test.copy()
y_test_temp[y_test == 0] = 1
y_prediction_temp = y_prediction.copy()
y_prediction_temp[y_test == 0] += 1
# meanPercentageOfAbsoluteError = sum((abs(y_prediction_temp - y_test_temp) / y_test_temp) * 100) / len(y_test)
print("Percentage of Absolute Error of ", method, " for h =", h, "and #covariates =", c,
": %.2f" % percentageOfAbsoluteError)
rootMeanSquaredError = sqrt(mean_squared_error(y_test, y_prediction))
print("Root Mean Squared Error of ", method, " for h =", h, "and #covariates =", c, ": %.2f" % rootMeanSquaredError)
second_error = sum(abs(y_prediction - y_test))
### compute adjusted R squared error
# SS_Residual = sum((y_test - y_prediction.reshape(-1)) ** 2)
# SS_Total = sum((y_test - np.mean(y_test)) ** 2)
# r_squared = 1 - (float(SS_Residual)) / SS_Total
adj_r_squared = 1# - (1 - r_squared) * (len(y_test) - 1) / (len(y_test) - c - 1)
print("Adjusted R Squared Error of ", method, " for h =", h, "and #covariates =", c, ": %.2f" % adj_r_squared)
MASE_numerator = sum(abs(y_prediction_temp - y_test_temp)) / len(y_test)
MASE = MASE_numerator / MASE_denominator['county']
print("MASE Error of ", method, " for h =", h, "and #covariates =", c, ": %.2f" % MASE)
print("-----------------------------------------------------------------------------------------")
# calculate whole country error
country_errors['meanAbsoluteError'] = mean_absolute_error(y_test_country, y_prediction_country)
sumOfAbsoluteError = sum(abs(y_test_country - y_prediction_country))
country_errors['percentageOfAbsoluteError'] = np.mean((abs(y_test - y_prediction)/y_test)*100)
#(sumOfAbsoluteError / sum(y_test_country)) * 100
y_test_temp_country = y_test_country.copy()
y_test_temp_country[y_test_country == 0] = 1
y_prediction_temp_country = y_prediction_country.copy()
y_prediction_temp_country[y_test_country == 0] += 1
# meanPercentageOfAbsoluteError = sum((abs(y_prediction_temp - y_test_temp) / y_test_temp) * 100) / len(y_test)
### compute adjusted R squared error
# SS_Residual = sum((y_test_country - y_prediction_country.reshape(-1)) ** 2)
# SS_Total = sum((y_test_country - np.mean(y_test_country)) ** 2)
# r_squared = 1 - (float(SS_Residual)) / SS_Total
if len(y_test_country) - c - 1 > 0:
country_errors['adj_r_squared'] = 1 # - (1 - r_squared) * (len(y_test_country) - 1) / (
#len(y_test_country) - c - 1)
else:
country_errors['adj_r_squared'] = 1
MASE_numerator = sum(abs(y_prediction_temp_country - y_test_temp_country)) / len(y_test_country)
country_errors['MASE'] = MASE_numerator / MASE_denominator['country']
country_errors['second_error'] = (sum(y_prediction_country - y_test_country) / sum(y_test_country)) * 100
# save outputs in 'out.txt'
sys.stdout = orig_stdout
f.close()
# for the test mode we compute some additional errors, we need 'date of day t' column so we use the main dataframe
# we add our prediction, the difference between prediction and target ('error' column),
# the absolute difference between prediction and target ('absolute_error' column),
# the precentage of this difference (('percentage_error' column) -> we change zero targets into 1 and add 1 to their predictions),
# and second_error as follows and save these in 'all_errors' file
# then we compute the average of percentage_errors (and other values) in each day and save them in
# 'first_error' file
if mode == 'test':
# write outputs into a file
orig_stdout = sys.stdout
f = open(env_address + 'out.txt', 'a')
sys.stdout = f
first_error_address = test_address + 'averages_of_errors_in_each_day/'
all_errors_address = test_address + 'all_errors/' + str(method) + '/'
if not os.path.exists(first_error_address):
os.makedirs(first_error_address)
if not os.path.exists(all_errors_address):
os.makedirs(all_errors_address)
dataframe = pd.DataFrame(y_test_date, copy=True)
dataframe['prediction'] = y_prediction
dataframe['error'] = y_prediction - y_test
dataframe['absoulte_error'] = abs(y_prediction - y_test)
y_test_temp = y_test.copy()
y_test_temp[y_test == 0] = 1
y_prediction_temp = y_prediction.copy()
y_prediction_temp[y_test == 0] += 1
dataframe['percentage_error'] = ((abs(y_prediction_temp - y_test_temp)) / y_test_temp) * 100
second_error = (sum(dataframe['error']) / sum(y_test)) * 100
dataframe.to_csv(all_errors_address + 'all_errors_' + str(method) + '.csv')
box_violin_plot(dataframe['date of day t'], dataframe['percentage_error'],
figsizes={'box': (60, 30), 'violin': (100, 50)},
fontsizes={'box': 40, 'violin': 60}, name=str(method) + '_percentage_errors_in_each_day_',
address=all_errors_address)
box_violin_plot(dataframe['date of day t'], dataframe['error'], figsizes={'box': (20, 10), 'violin': (50, 30)},
fontsizes={'box': 15, 'violin': 30}, name=str(method) + '_pure_errors_in_each_day_',
address=all_errors_address)
dataframe['county_fips'] = dataframe['county_fips'].astype(float)
if numberOfSelectedCounties == -1:
numberOfSelectedCounties = len(dataframe['county_fips'])
first_error = pd.DataFrame((dataframe.groupby(['date of day t']).sum() / numberOfSelectedCounties))
first_error.columns = ['fips', 'average of targets', 'average of predictions', 'average of errors',
'average of absoulte_errors', 'average of percentage_errors']
first_error = first_error.drop(['fips'], axis=1)
first_error.to_csv(first_error_address + 'first_error_' + str(method) + '.csv')
plot_targets(method, first_error.index, first_error, first_error_address)
# save outputs in 'out.txt'
sys.stdout = orig_stdout
f.close()
return meanAbsoluteError, percentageOfAbsoluteError, adj_r_squared, second_error, MASE, country_errors
########################################################### push results to github
def push(message):
if push_flag == 1:
try:
cmd.run("git pull", check=True, shell=True)
print("everything has been pulled")
cmd.run("git add .", check=True, shell=True)
cmd.run(f"git commit -m '{message}'", check=True, shell=True)
cmd.run("git push", check=True, shell=True)
print('pushed.')
except:
print('could not push')
########################################################### zip some of the results
def make_zip(selected_for_email, subject):
for source_root in selected_for_email:
for i in [x[0] for x in os.walk(source_root)]:
address = mail_address + '//' + '/'.join(i.split('/')[3:])
# print(address)
if not os.path.exists(address):
os.makedirs(address)
for pdffile in glob.iglob(os.path.join(i, "*.pdf")):
shutil.copy(pdffile, address)
shutil.make_archive(subject, 'zip', mail_address)
########################################################### mail some of the results
def send_email(*attachments):
subject = "Server results"
body = " "
sender_email = "[email protected]"
receiver_email = ["[email protected]"] # ,"[email protected]"
CC_email = [] # "[email protected]"
password = "S.123456.S"
# Create a multipart message and set headers
message = MIMEMultipart()
message["From"] = sender_email
message["To"] = ','.join(receiver_email) # receiver_email
message["Subject"] = subject
message["CC"] = ','.join(CC_email) # Recommended for mass emails
# Add body to email
message.attach(MIMEText(body, "plain"))
# Add attachments
for file_name in attachments:
f = open(file_name, 'rb')
ctype, encoding = mimetypes.guess_type(file_name)
if ctype is None or encoding is not None:
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
# in case of a text file
if maintype == 'text':
part = MIMEText(f.read(), _subtype=subtype)
# any other file
else:
part = MIMEBase(maintype, subtype)
part.set_payload(f.read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(file_name))
message.attach(part)
f.close()
text = message.as_string()
# Log in to server using secure context and send email
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email + CC_email, text)
########################################################## flatten
def flatten(data=None, h=None, c=None, method=None, covariates_list=None, state=1):
if state == 1:
result = []
for county_fips in data:
result += list(data[county_fips][method][(h, c)])
elif state == 2:
result = []
for county_fips in data:
result += list(data[county_fips][(h, c)])
elif state == 3:
result = pd.DataFrame(columns=covariates_list)
for county_fips in data:
result = pd.concat([result, data[county_fips][h][method][covariates_list]], ignore_index=True)
elif state == 4:
for county_fips in data:
result = pd.DataFrame(columns=data[county_fips].columns.values)
break
for county_fips in data:
result = pd.concat([result, data[county_fips]], ignore_index=True)
elif state == 5:
result = []
for county_fips in data:
result += list(data[county_fips])
result = np.array(result)
elif state == 6:
result = []
for county_fips in data:
result += list(data[county_fips][method])
return result
############################################################
# we define test as function to call it when h equal to half the maxHistory or when none of the models have improved in current h
def test_process(h, r, test_size, target_name, spatial_mode, target_mode, best_h, best_c, historical_X_train, \
historical_X_test, historical_y_train_date, historical_y_test_date, best_loss, \
numberOfSelectedCounties, covariates_names, maxHistory, train_val_MASE_denominator, \
val_test_MASE_denominator, future_mode, test_address, env_address, mail_address):
columns_table_t = ['best_h', 'best_c', 'mean absolute error', 'percentage of absolute error',
'adjusted R squared error',
'second error', 'mean absolute scaled error']
columns_table = ['best_h', 'best_c', 'mean absolute error', 'percentage of absolute error',
'adjusted R squared error',
'sum of absolute error', 'mean absolute scaled error']
methods = ['GBM', 'GLM', 'KNN', 'NN', 'MM_GLM', 'MM_NN']
none_mixed_methods = ['GBM', 'GLM', 'KNN', 'NN']
mixed_methods = ['MM_GLM', 'MM_NN']
df_for_prediction_plot = {method: None for method in methods}
y_prediction = {}
y_prediction_train = {}
# run non-mixed methods on the whole training set with their best h and c
X_train_dict, X_test_dict, y_train_dict, y_test_dict = {}, {}, {}, {}
GBM, GLM, KNN, NN = run_algorithms(historical_X_train, historical_X_test, historical_y_train_date,
historical_y_test_date, best_loss, 0, spatial_mode, None)
y_prediction['GBM'], y_prediction_train['GBM'] = GBM
y_prediction['GLM'], y_prediction_train['GLM'] = GLM
y_prediction['KNN'], y_prediction_train['KNN'] = KNN
y_prediction['NN'], y_prediction_train['NN'] = NN
table_data = []
country_table_data = []
for method in none_mixed_methods:
meanAbsoluteError, percentageOfAbsoluteError, adj_r_squared, second_error, meanAbsoluteScaledError, country_errors = get_errors(
best_h[method]['MAPE'],
best_c[method]['MAPE'], method, y_prediction[method], y_prediction_train[method],
historical_y_test_date[method], historical_y_train_date[method],
None, val_test_MASE_denominator[best_h[method]['MAPE']], numberOfSelectedCounties, target_name, mode='test')
table_data.append([best_h[method]['MAPE'], best_c[method]['MAPE'], round(meanAbsoluteError, 2),
round(percentageOfAbsoluteError, 2), round(adj_r_squared, 2), round(second_error, 2),
round(meanAbsoluteScaledError, 2)])
country_table_data.append(
[best_h[method]['MAPE'], best_c[method]['MAPE'], round(country_errors['meanAbsoluteError'], 2),
round(country_errors['percentageOfAbsoluteError'], 2), round(country_errors['adj_r_squared'], 2),
round(country_errors['second_error'], 2), round(country_errors['MASE'], 2)])
push('a new table added')
for method in none_mixed_methods:
method_real_pred_df = historical_y_train_date[method].append(historical_y_test_date[method])
prediction = list(y_prediction_train[method]) + list(y_prediction[method])
method_real_pred_df['prediction'] = prediction
df_for_prediction_plot[method] = method_real_pred_df
# generate data for non-mixed methods with the best h and c of mixed models and fit mixed models on them
# (with the whole training set)
y_predictions = {'MM_GLM': [], 'MM_NN': []}
y_prediction = {}
# table_data = []
X_train_MM_dict, X_test_MM_dict, y_train_MM_dict, y_test_MM_dict = {}, {}, {}, {}
y_train, y_test = {}, {}
y_test_date = {}
for mixed_method in mixed_methods:
X_train, X_test, y_train_date, y_test_date[mixed_method] = generate_data(best_h[mixed_method]['MAPE'],
best_c[mixed_method]['MAPE'],
covariates_names,
numberOfSelectedCounties)
y_test_date_temp = y_test_date[mixed_method]
y_train[mixed_method] = y_train_date
y_test[mixed_method] = y_test_date_temp
mixed_model_covariates_names = list(X_train.columns)
X_train_to_use = {method: None for method in methods}
X_test_to_use = {method: None for method in methods}
for method in none_mixed_methods:
X_train_to_use[method] = X_train.copy()
X_test_to_use[method] = X_test.copy()
if method in models_to_log:
# make temporal and some fixed covariates logarithmic
negative_features = ['temperature', 'Retail', 'Grocery', 'Parks', 'Transit', 'Workplace', 'Residential']
for covar in mixed_model_covariates_names:
if (' t' in covar) and (covar.split(' ')[0] not in negative_features) and (
covar not in ['county_fips', 'date of day t']):
X_train_to_use[method][covar] = np.log((X_train_to_use[method][covar] + 1).astype(float))
X_test_to_use[method][covar] = np.log((X_test_to_use[method][covar] + 1).astype(float))
fix_log_list = ['total_population', 'population_density', 'area', 'median_household_income',
'houses_density', 'airport_distance', 'deaths_per_100000']
for covar in fix_log_list:
if covar in mixed_model_covariates_names:
X_train_to_use[method][covar] = np.log((X_train_to_use[method][covar] + 1).astype(float))
X_test_to_use[method][covar] = np.log((X_test_to_use[method][covar] + 1).astype(float))
X_train_dict[method] = X_train_to_use[method]
X_test_dict[method] = X_test_to_use[method]
y_train_dict[method] = y_train[mixed_method]
y_test_dict[method] = y_test[mixed_method]
GBM, GLM, KNN, NN = run_algorithms(X_train_dict, X_test_dict, y_train_dict, y_test_dict, best_loss, 0,
spatial_mode, None)
y_prediction['GBM'], y_prediction_train['GBM'] = GBM
y_prediction['GLM'], y_prediction_train['GLM'] = GLM
y_prediction['KNN'], y_prediction_train['KNN'] = KNN
y_prediction['NN'], y_prediction_train['NN'] = NN
y_predictions_test, y_predictions_train = [], []
# Construct the outputs for the testing dataset of the 'MM' methods
y_predictions_test.extend([y_prediction['GBM'], y_prediction['GLM'], y_prediction['KNN'], y_prediction['NN']])
y_prediction_test_np = np.array(y_predictions_test).reshape(len(y_predictions_test), -1)
X_test_mixedModel = pd.DataFrame(y_prediction_test_np.transpose())
# Construct the outputs for the training dataset of the 'MM' methods
y_predictions_train.extend(
[y_prediction_train['GBM'], y_prediction_train['GLM'], y_prediction_train['KNN'], y_prediction_train['NN']])
y_prediction_train_np = np.array(y_predictions_train).reshape(len(y_predictions_train), -1)
X_train_mixedModel = pd.DataFrame(y_prediction_train_np.transpose())
X_train_MM_dict[mixed_method] = X_train_mixedModel
X_test_MM_dict[mixed_method] = X_test_mixedModel
y_train_MM_dict[mixed_method] = y_train[mixed_method]
y_test_MM_dict[mixed_method] = y_test[mixed_method]
y_test_MM_dict[mixed_method] = np.array(y_test_MM_dict[mixed_method]['Target']).reshape(-1)
y_train_MM_dict[mixed_method] = np.array(y_train_MM_dict[mixed_method]['Target']).reshape(-1)
# # save the entire session
# filename = env_address + 'test.out'
# my_shelf = shelve.open(filename, 'n') # 'n' for new
# for key in dir():
# try:
# my_shelf[key] = locals()[key]
# except:
# print('ERROR shelving: {0}'.format(key))
# my_shelf.close()
# mixed model with linear regression and neural network
MM_GLM, MM_NN = run_mixed_models(X_train_MM_dict, X_test_MM_dict, y_train_MM_dict, y_test_MM_dict, best_loss)
y_prediction['MM_GLM'], y_prediction_train['MM_GLM'] = MM_GLM
y_prediction['MM_NN'], y_prediction_train['MM_NN'] = MM_NN
for mixed_method in mixed_methods:
meanAbsoluteError, percentageOfAbsoluteError, adj_r_squared, second_error, meanAbsoluteScaledError, country_errors = get_errors(
best_h[mixed_method]['MAPE'],
best_c[mixed_method]['MAPE'], mixed_method, y_prediction[mixed_method], y_prediction_train[mixed_method],
y_test_date[mixed_method], y_train[mixed_method], None,
val_test_MASE_denominator[best_h[mixed_method]['MAPE']],
numberOfSelectedCounties, target_name, mode='test')
table_data.append([best_h[mixed_method]['MAPE'], best_c[mixed_method]['MAPE'], round(meanAbsoluteError, 2),
round(percentageOfAbsoluteError, 2),
round(adj_r_squared, 2), round(second_error, 2), round(meanAbsoluteScaledError, 2)])
country_table_data.append(
[best_h[mixed_method]['MAPE'], best_c[mixed_method]['MAPE'], round(country_errors['meanAbsoluteError'], 2),
round(country_errors['percentageOfAbsoluteError'], 2), round(country_errors['adj_r_squared'], 2),
round(country_errors['second_error'], 2), round(country_errors['MASE'], 2)])
table_name = 'table_of_best_test_results'
plot_table(table_data, columns_table_t, methods, table_name, mode='test')
table_name = 'table_of_country_best_test_results'
plot_table(country_table_data, columns_table_t, methods, table_name, mode='test')
push('a new table added')
for method in mixed_methods:
method_real_pred_df = y_train[method].append(y_test[method])
prediction = list(y_prediction_train[method]) + list(y_prediction[method])
method_real_pred_df['prediction'] = prediction
df_for_prediction_plot[method] = method_real_pred_df
if pivot != 'country' :
real_prediction_plot(df_for_prediction_plot, r, test_size, target_name, target_mode, best_h, maxHistory,
spatial_mode, methods, future_mode, numberOfSelectedCounties)
# mail the test results
selected_for_email = [test_address + '/tables', test_address + '/all_errors/NN', test_address + '/all_errors/KNN',
test_address + '/plots_of_real_prediction_values']
# zip_file_name = 'test results for h =' + str(maxHistory) + ' #counties=' + str(numberOfSelectedCountiesname)
# make_zip(selected_for_email, zip_file_name)
# # send_email(zip_file_name + '.zip')
# # save the entire session
# filename = env_address + 'test.out'
# my_shelf = shelve.open(filename, 'n') # 'n' for new
# for key in dir():
# try:
# my_shelf[key] = locals()[key]
# except:
# print('ERROR shelving: {0}'.format(key))
# my_shelf.close()
########################################################### main
def main(maxHistory):
print("main started")
history = [i for i in range(1, maxHistory + 1)]
print("history: ", history)
methods = ['GBM', 'GLM', 'KNN', 'NN', 'MM_GLM', 'MM_NN']
none_mixed_methods = ['GBM', 'GLM', 'KNN', 'NN']
# none_mixed_methods = ['GBM']
mixed_methods = ['MM_GLM', 'MM_NN']
target_name = 'confirmed'
base_data = makeHistoricalData(0, r, test_size, target_name, 'mrmr', spatial_mode, target_mode, data_address,
future_features, pivot, int(argv[1]))
print("base data before clean shape: ", base_data.shape)
base_data_before_clean_columns = base_data.columns.values
base_data = clean_data(base_data, numberOfSelectedCounties, spatial_mode)
print("base data after clean shape: ", base_data.shape)
print("base data cleaned columns: ",
[c for c in base_data_before_clean_columns if c not in base_data.columns.values])
covariates_names = list(base_data.columns)
covariates_names.remove('Target')
covariates_names.remove('date of day t')
covariates_names.remove('county_fips')
# covariates_names.remove('daily-country-test-per-1000 t')
numberOfCovariates = len(covariates_names)
print('number of covariates: ', numberOfCovariates)
# print(covariates_names)
y_prediction = {'GBM': {}, 'GLM': {}, 'KNN': {}, 'NN': {}, 'MM_GLM': {}, 'MM_NN': {}}
y_prediction_train = {'GBM': {}, 'GLM': {}, 'KNN': {}, 'NN': {}, 'MM_GLM': {}, 'MM_NN': {}}
error_names = ['MAPE', 'MAE', 'adj-R2', 'sec', 'MASE']
complete_error_names = {'MAPE': 'Percentage Of Absolute Error', 'MAE': 'Mean Absolute Error',
'MASE': 'Mean Absolute Scaled Error', 'adj-R2': 'Adjusted R Squared Error',
'sec': 'Sum Of Absolute Error'}
validation_errors = {error: {method: {} for method in methods} for error in error_names}
minError = {method: {error: int(1e10) for error in error_names} for method in methods}
best_h = {method: {error: 0 for error in error_names} for method in methods}
best_c = {method: {error: 0 for error in error_names} for method in methods}
# best_loss = {method: None for method in ['GBM', 'NN', 'MM_NN']}
best_loss = {'GBM': 'poisson', 'MM_NN': 'MeanAbsoluteError', 'NN': 'poisson'}
columns_table_t = ['best_h', 'best_c', 'mean absolute error', 'percentage of absolute error',
'adjusted R squared error',
'second error', 'mean absolute scaled error'] # table columns names
columns_table = ['best_h', 'best_c', 'mean absolute error', 'percentage of absolute error',
'adjusted R squared error',
'sum of absolute error', 'mean absolute scaled error'] # table columns names
train_val_MASE_denominator = {h: {key: None for key in ['county', 'country']} for h in history}
val_test_MASE_denominator = {h: {key: None for key in ['county', 'country']} for h in history}
historical_X_train = {} # X_train for best h and c
historical_X_test = {} # X_test for best h and c
historical_y_train = {} # y_train for best h and c
historical_y_test = {} # y_test for best h and c
historical_y_train_date = {} # y_train for best h and c with dates info
historical_y_test_date = {} # y_test for best h and c with dates info
parallel_outputs = {}
for h in history:
print(100 * "#")
print("h =", h)
data = makeHistoricalData(h, r, test_size, target_name, 'mrmr', spatial_mode, target_mode, data_address,
future_features, pivot, int(argv[1]))
print("data before clean shape:", data.shape)
# pre-process and split the data, 'date's have dates info
data = clean_data(data, numberOfSelectedCounties, spatial_mode)
print("data after clean shape:", data.shape)
X_train_train_to_use = {method: None for method in methods}
X_train_val_to_use = {method: None for method in methods}
X_test_to_use = {method: None for method in methods}
X_train_train, X_train_val, X_test, y_train_train_date, y_train_val_date, y_test_date = preprocess(data,
spatial_mode,
1)
# print([c for c in data.columns.values if c not in X_train_train])
# print("X_train_train shape:", X_train_train.shape)
# print("X_train_val shape:", X_train_val.shape)
# print("X_test shape:", X_test.shape)
# print("y_train_train_date shape:", y_train_train_date.shape)
# print("y_train_val_date shape:", y_train_val_date.shape)
# print("y_test_date shape:", y_test_date.shape)
# print("y columns:", y_test_date.columns.values)
if target_mode not in ['regular',
'weeklyaverage']: # we need regular data to return predicted values to first state
regular_data = makeHistoricalData(h, r, test_size, target_name, 'mrmr', spatial_mode, 'regular', data_address,
future_features, pivot, int(argv[1]))
regular_data = clean_data(regular_data, numberOfSelectedCounties, spatial_mode)
else:
regular_data = data
print("regular_data shape:", regular_data.shape)
train_val_MASE_denominator[h]['county'], val_test_MASE_denominator[h]['county'], train_val_MASE_denominator[h][
'country'], val_test_MASE_denominator[h]['country'] = mase_denominator(r, h,
regular_data, target_name,
target_mode,
numberOfSelectedCounties,
spatial_mode)
# print(train_val_MASE_denominator)
for method in methods:
X_train_train_to_use[method] = X_train_train.copy()
X_train_val_to_use[method] = X_train_val.copy()
X_test_to_use[method] = X_test.copy()
if method in models_to_log:
# make temporal and some fixed covariates logarithmic
negative_features = ['temperature', 'Retail', 'Grocery', 'Parks', 'Transit', 'Workplace', 'Residential']
for covar in covariates_names:
if (' t' in covar) and (covar.split(' ')[0] not in negative_features):
# print(covar)
X_train_train_to_use[method][covar] = np.log(
(X_train_train_to_use[method][covar] + 1).astype(float))
X_train_val_to_use[method][covar] = np.log(
(X_train_val_to_use[method][covar] + 1).astype(float))
X_test_to_use[method][covar] = np.log((X_test_to_use[method][covar] + 1).astype(float))
fix_log_list = ['total_population', 'population_density', 'area', 'median_household_income',
'houses_density', 'airport_distance', 'deaths_per_100000']
for covar in fix_log_list:
if covar in covariates_names:
X_train_train_to_use[method][covar] = np.log(
(X_train_train_to_use[method][covar] + 1).astype(float))
X_train_val_to_use[method][covar] = np.log(
(X_train_val_to_use[method][covar] + 1).astype(float))
X_test_to_use[method][covar] = np.log((X_test_to_use[method][covar] + 1).astype(float))
y_train_date = (pd.DataFrame(y_train_train_date).append(pd.DataFrame(y_train_val_date))).reset_index(drop=True)
y_train_train = np.array(y_train_train_date['Target']).reshape(-1)
y_train_val = np.array(y_train_val_date['Target']).reshape(-1)
y_test = np.array(y_test_date['Target']).reshape(-1)
y_train = np.array(
(pd.DataFrame(y_train_train).append(pd.DataFrame(y_train_val))).reset_index(drop=True)).reshape(-1)
print("y_train shape:", y_train.shape)
print("y_test shape:", y_test.shape)
# find best loss
# print(best_loss)
# if (h == 1):
# best_loss = update_best_loss('none_mixed_model', spatial_mode, None, best_loss, X_train_train_to_use,
# X_train_val_to_use, \
# y_train_train, y_train_val, None, None,
# data.columns.drop(['Target', 'date of day t', 'county_fips']), \
# numberOfCovariates, maxC)
print(best_loss)
print('force_features len: ', len(force_features))
covariates_list = []
covariates_list = force_features.copy()
print('covariates_list len:', len(covariates_list))
# covariates are sorted by their correlation with Target. We start from the first important covariate and
# in each loop we add the next important one
loom = ProcessLoom(max_runner_cap=len(base_data.columns) * len(none_mixed_methods) + 5)
indx_c = 0
for c in covariates_names: # iterate through sorted covariates
indx_c += 1
# print('h=', h, ' c=', indx_c)
for covariate in data.columns: # add all historical covariates of this covariate and create a feature
pure_name = c.split(' ')[0]
cov_temp = covariate.split(' ')[0]
if pure_name == cov_temp and pure_name not in force_features:
covariates_list.append(covariate)
# print('covariates_list:', covariates_list)
for method in none_mixed_methods:
X_train_train_temp = X_train_train_to_use[method][covariates_list]
X_train_val_temp = X_train_val_to_use[method][covariates_list]
# print(X_train_train_temp.columns.values)
# print('X_train_train_temp shape:', X_train_train_temp.shape)
# print('X_train_val_temp shape:', X_train_val_temp.shape)
# print('y_train_train shape:', y_train_train.shape)
# print('y_train_val shape:', y_train_val.shape)
loom.add_function(parallel_run,
[method, X_train_train_temp, X_train_val_temp, y_train_train, y_train_val, best_loss,
indx_c])
if indx_c >= maxC:
break
print('covariates_list len:', len(covariates_list))
print('covariates_list:', covariates_list)
# run the processes in parallel
parallel_outputs['non_mixed'] = loom.execute()
ind = 0
for c in range(1, numberOfCovariates + 1):
for method in none_mixed_methods:
y_prediction[method][(h, c)], y_prediction_train[method][(h, c)] = parallel_outputs['non_mixed'][ind][
'output']
ind += 1
if c == maxC:
break
# for method in none_mixed_methods:
# print(y_prediction[method].keys())
# print(np.isnan(y_prediction[method][(h, 4)]))
# print(y_prediction[method][(h, 4)].shape)
# # save the entire session for each h and c
# filename = env_address + 'validation.out'
# my_shelf = shelve.open(filename, 'n') # 'n' for new
# for key in dir():
# try:
# my_shelf[key] = locals()[key]
# except:
# print('ERROR shelving: {0}'.format(key))
# my_shelf.close()
# find best loss
# if h == 1:
# best_loss = update_best_loss('mixed_model', spatial_mode, None, best_loss, None, None, y_train_train, \
# y_train_val, y_prediction_train, y_prediction, None, \
# numberOfCovariates, maxC)
print(best_loss)
# initiate loom for parallel processing
loom = ProcessLoom(max_runner_cap=len(base_data.columns) * len(mixed_methods) + 5)
indx_c = 0
for c in range(1, numberOfCovariates + 1):
indx_c += 1
for mixed_method in mixed_methods:
y_predictions_test, y_predictions_train = [], []
# Construct the outputs for the testing dataset of the 'MM' methods
y_predictions_test.extend([y_prediction['GBM'][(h, c)], y_prediction['GLM'][(h, c)],
y_prediction['KNN'][(h, c)], y_prediction['NN'][(h, c)]])
y_prediction_test_np = np.array(y_predictions_test).reshape(len(y_predictions_test), -1)
X_test_mixedModel = pd.DataFrame(y_prediction_test_np.transpose())
# Construct the outputs for the training dataset of the 'MM' methods
y_predictions_train.extend([y_prediction_train['GBM'][(h, c)], y_prediction_train['GLM'][(h, c)],
y_prediction_train['KNN'][(h, c)], y_prediction_train['NN'][(h, c)]])
y_prediction_train_np = np.array(y_predictions_train).reshape(len(y_predictions_train), -1)
X_train_mixedModel = pd.DataFrame(y_prediction_train_np.transpose())
loom.add_function(mixed_parallel_run,
[mixed_method, X_train_mixedModel, X_test_mixedModel, y_train_train, y_train_val,
best_loss])
if c == maxC:
break
# run the processes in parallel
parallel_outputs['mixed'] = loom.execute()
ind = 0
for c in range(1, numberOfCovariates + 1):
for mixed_method in mixed_methods:
y_prediction[mixed_method][(h, c)], y_prediction_train[mixed_method][(h, c)] = \
parallel_outputs['mixed'][ind]['output']
y_prediction[mixed_method][(h, c)] = np.array(y_prediction[mixed_method][(h, c)]).ravel()
y_prediction_train[mixed_method][(h, c)] = np.array(y_prediction_train[mixed_method][(h, c)]).ravel()
ind += 1
if c == maxC:
break
# for method in mixed_methods:
# print(y_prediction[method].keys())
# print(np.isnan(y_prediction[method][(h, 4)]))
# print(y_prediction[method][(h, 4)].shape)
# # save the entire session for each h and c
# filename = env_address + 'validation.out'
# my_shelf = shelve.open(filename, 'n') # 'n' for new
# for key in dir():
# try:
# my_shelf[key] = locals()[key]
# except:
# print('ERROR shelving: {0}'.format(key))
# my_shelf.close()
number_of_improved_methods = 0 # we count number_of_improved_methods to run test if no method have improved in current h
indx_c = 0
print('force_features len: ', len(force_features))
covariates_list = []
covariates_list = force_features.copy()
covariates_list.append('county_fips')
covariates_list.append('date of day t')
print('covariates_list len: ', len(covariates_list))
# covariates_list = ['county_fips', 'date of day t']
# covariates_list.extend(force_features.copy())
for c in covariates_names: # iterate through sorted covariates
indx_c += 1
for covariate in data.columns: # add all historical covariates of this covariate and create a feature
pure_name = c.split(' ')[0]
cov_temp = covariate.split(' ')[0]
if pure_name == cov_temp and pure_name not in force_features:
covariates_list.append(covariate)
y_val = np.array(y_train_val_date['Target']).reshape(-1)
for method in methods:
X_train_train_temp = X_train_train_to_use[method][covariates_list]
X_train_val_temp = X_train_val_to_use[method][covariates_list]
X_test_temp = X_test_to_use[method][covariates_list]
validation_errors['MAE'][method][(h, indx_c)], validation_errors['MAPE'][method][(h, indx_c)], \
validation_errors['adj-R2'][method][(h, indx_c)], validation_errors['sec'][method][(h, indx_c)], \
validation_errors['MASE'][method][(h, indx_c)], country_errors = \
get_errors(h, indx_c, method, y_prediction[method][(h, indx_c)],
y_prediction_train[method][(h, indx_c)], y_train_val_date,
y_train_train_date, regular_data, train_val_MASE_denominator[h],
numberOfSelectedCounties, target_name, mode='val')
# find best errors
for error in error_names:
if validation_errors[error][method][(h, indx_c)] < minError[method][error]:
minError[method][error] = validation_errors[error][method][(h, indx_c)]
best_h[method][error] = h
# we should not consider force_features
best_c[method][error] = indx_c
if error == 'MAPE':
number_of_improved_methods += 1
if error == 'MAPE' and method != 'MM_GLM' and method != 'MM_NN':
historical_X_train[method] = (X_train_train_temp.append(X_train_val_temp)).reset_index(
drop=True)
historical_X_test[method] = X_test_temp
historical_y_train[method] = y_train
historical_y_test[method] = y_test
historical_y_train_date[method] = y_train_date
historical_y_test_date[method] = y_test_date
if indx_c == maxC:
break
# # save the entire session for each h and c
# filename = env_address + 'validation.out'
# my_shelf = shelve.open(filename, 'n') # 'n' for new
# for key in dir():
# try:
# my_shelf[key] = locals()[key]
# except:
# print('ERROR shelving: {0}'.format(key))
# my_shelf.close()
# # save the entire session for each h
# filename = env_address + 'validation.out'
# my_shelf = shelve.open(filename, 'n') # 'n' for new
# for key in dir():
# try:
# my_shelf[key] = locals()[key]
# except:
# print('ERROR shelving: {0}'.format(key))
# my_shelf.close()
# push the file of outputs
push('logs of h=' + str(h) + ' added')
# we run test if none of models have improved in curent h or if we passed half of maxhistory
if (number_of_improved_methods == -1): ###########################
print('jump to test process')
test_process(h, r, test_size, target_name, spatial_mode, target_mode, best_h, best_c, historical_X_train, \
historical_X_test, historical_y_train_date, historical_y_test_date, best_loss, \
numberOfSelectedCounties, covariates_names, maxHistory, train_val_MASE_denominator, \
val_test_MASE_denominator, future_mode, test_address, env_address, mail_address)
# plot table for best results
table_data = []
for method in methods:
table_data.append([best_h[method]['MAPE'], best_c[method]['MAPE'], round(minError[method]['MAE'], 2),
round(minError[method]['MAPE'], 2), round(minError[method]['adj-R2'], 2),
round(minError[method]['sec'], 2), round(minError[method]['MASE'], 2)])
table_name = 'tabel_of_best_validation_results'
plot_table(table_data, columns_table, methods, table_name, mode='val')
# plot the results of methods on validation set
for error in error_names:
plot_results(3, 2, numberOfCovariates, methods, history, validation_errors[error], complete_error_names[error])
# mail the validation results
selected_for_email = [validation_address]
# zip_file_name = 'validation results for h =' + str(maxHistory) + ' #counties=' + str(numberOfSelectedCountiesname)
# make_zip(selected_for_email, zip_file_name)
# send_email(zip_file_name + '.zip')
push('plots added')
################################################################################################################# test zone
test_process(h, r, test_size, target_name, spatial_mode, target_mode, best_h, best_c, historical_X_train, \
historical_X_test, historical_y_train_date, historical_y_test_date, best_loss, \
numberOfSelectedCounties, covariates_names, maxHistory, train_val_MASE_denominator, \
val_test_MASE_denominator, future_mode, test_address, env_address, mail_address)
print(best_loss)
if __name__ == "__main__":
begin = time.time()
if maxHistory < 1:
print('Maximum History Must Be Positive!')
sys.exit()
future_mode = False
future_features = []
if r >= 28:
# future_mode = True
future_features = ['social-distancing-travel-distance-grade', 'social-distancing-encounters-grade',
'social-distancing-total-grade'] # sorted by their mrmr rank
if target_mode in ['weeklyaverage','augmentedweeklyaverage']:
r //= 7
maxHistory //= 7
test_size //= 7
force_features = []
force_mode = 0 # with force_mode we determine how many future feature have to be forced (be used in all eterations)
if future_mode:
for f in range(force_mode):
force_features.append('future-' + future_features[f])
# make directories for saving the results
validation_address = './' + str(argv[1]) + '/' + 'results/counties=' + str(numberOfSelectedCountiesname) + ' max_history=' + str(
maxHistory) + '/validation/'
test_address = './' + str(argv[1]) + '/' + 'results/counties=' + str(numberOfSelectedCountiesname) + ' max_history=' + str(
maxHistory) + '/test/'
env_address = './' + str(argv[1]) + '/' + 'results/counties=' + str(numberOfSelectedCountiesname) + ' max_history=' + str(
maxHistory) + '/session_parameters/'
mail_address = './' + str(argv[1]) + '/' + 'results/counties=' + str(numberOfSelectedCountiesname) + ' max_history=' + str(
maxHistory) + '/email'
if not os.path.exists(mail_address):
os.makedirs(mail_address)
if not os.path.exists(test_address):
os.makedirs(test_address)
if not os.path.exists(validation_address):
os.makedirs(validation_address)
if not os.path.exists(env_address):
os.makedirs(env_address)
push('new folders added')
models_to_log = ['NN', 'GLM', 'GBM'] # models we want to make the features logarithmic for them, we remove KNN
main(maxHistory)
end = time.time()
push('final results added')
print("The total time of execution in minutes: ", round((end - begin) / 60, 2))
| [
"[email protected]"
] | |
a95badfd54dbd2302101d0cf2b2569e911d8f07a | 2cb037e15ffd1c97a1c85acfb95dc63cebe1e961 | /MoEL/main.py | d4bb3c2e3a7e23c1b91e7572e013ea44cf54be11 | [
"MIT"
] | permissive | Ravikiran2402/MoEL | 4f399d6d1219782c79ae463f09f52558e3bbce6a | 9fde3a8a3744521b822b46915515a22de3d4c053 | refs/heads/master | 2021-05-17T12:51:00.019321 | 2020-04-20T12:49:40 | 2020-04-20T12:49:40 | 250,785,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,589 | py | from utils.data_loader import prepare_data_seq
from utils import config
from model.transformer import Transformer
from model.transformer_mulexpert import Transformer_experts
from model.common_layer import evaluate, count_parameters, make_infinite
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_
from copy import deepcopy
from tqdm import tqdm
import os
import time
import numpy as np
import math
from tensorboardX import SummaryWriter
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(0)
data_loader_tra, data_loader_val, data_loader_tst, vocab, program_number = prepare_data_seq(batch_size=config.batch_size)
#config.test = True
f=open("log.txt","w")
f.write("Iteration loss_val ppl_val bce_val acc_val bleu_g bleu_b\n")
f.close()
tf=open("train_log.txt","w")
tf.write("Iteration loss ppl bce_val acc_val\n")
tf.close()
if(config.test):
print("Test model",config.model)
if(config.model == "trs"):
model = Transformer(vocab,decoder_number=program_number, model_file_path=config.save_path, is_eval=True)
elif(config.model == "experts"):
model = Transformer_experts(vocab,decoder_number=program_number, model_file_path=config.save_path, is_eval=True)
if (config.USE_CUDA):
model.cuda()
model = model.eval()
#print(model.summary())
loss_test, ppl_test, bce_test, acc_test, bleu_score_g, bleu_score_b= evaluate(model, data_loader_tst ,ty="test", max_dec_step=50)
print("Exiting test loop")
exit(0)
if(config.model == "trs"):
model = Transformer(vocab,decoder_number=program_number)
for n, p in model.named_parameters():
if p.dim() > 1 and (n !="embedding.lut.weight" and config.pretrain_emb):
xavier_uniform_(p)
elif(config.model == "experts"):
model = Transformer_experts(vocab,decoder_number=program_number)
for n, p in model.named_parameters():
if p.dim() > 1 and (n !="embedding.lut.weight" and config.pretrain_emb):
xavier_uniform_(p)
print("MODEL USED",config.model)
print("TRAINABLE PARAMETERS",count_parameters(model))
print("Config.Oracle : ", config.oracle)
check_iter = 2000
try:
if (config.USE_CUDA):
model.cuda()
model = model.train()
best_ppl = 1000
patient = 0
writer = SummaryWriter(log_dir="save/log/")
weights_best = deepcopy(model.state_dict())
data_iter = make_infinite(data_loader_tra)
for n_iter in tqdm(range(1000000)):
loss, ppl, bce, acc = model.train_one_batch(next(data_iter),n_iter)
writer.add_scalars('loss', {'loss_train': loss}, n_iter)
writer.add_scalars('ppl', {'ppl_train': ppl}, n_iter)
writer.add_scalars('bce', {'bce_train': bce}, n_iter)
writer.add_scalars('accuracy', {'acc_train': acc}, n_iter)
tf=open("train_log.txt","a")
tf.write(str(n_iter)+" ")
tf.write(str(loss)+" ")
tf.write(str(ppl)+" ")
tf.write(str(bce)+" ")
tf.write(str(acc)+"\n")
tf.close()
if(config.noam):
writer.add_scalars('lr', {'learning_rate': model.optimizer._rate}, n_iter)
if((n_iter+1)%check_iter==0):
model = model.eval()
model.epoch = n_iter
model.__id__logger = 0
loss_val, ppl_val, bce_val, acc_val, bleu_score_g, bleu_score_b= evaluate(model, data_loader_val ,ty="valid", max_dec_step=50)
writer.add_scalars('loss', {'loss_valid': loss_val}, n_iter)
writer.add_scalars('ppl', {'ppl_valid': ppl_val}, n_iter)
writer.add_scalars('bce', {'bce_valid': bce_val}, n_iter)
writer.add_scalars('accuracy', {'acc_valid': acc_val}, n_iter)
model = model.train()
#torch.save(model, "saved_models_testing/saved_model{}_2603_0.3.pt".format(n_iter+1))
model.save_model(ppl_val,n_iter,0 ,0,bleu_score_g,bleu_score_b)
f=open("log.txt","a")
f.write(str(n_iter)+" ")
f.write(str(loss_val)+" ")
f.write(str(ppl_val)+" ")
f.write(str(bce_val)+" ")
f.write(str(acc_val)+" ")
f.write(str(bleu_score_g)+" ")
f.write(str(bleu_score_b)+"\n")
f.close()
#if (config.model == "experts" and n_iter<13000):
# continue
# if(ppl_val <= best_ppl):
# best_ppl = ppl_val
# patient = 0
# model.save_model(best_ppl,n_iter,0 ,0,bleu_score_g,bleu_score_b)
# weights_best = deepcopy(model.state_dict())
#else:
# patient += 1
# if(patient > 2): break
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
print("After the Exiting from training early\n")
## TESTING
model.load_state_dict({ name: weights_best[name] for name in weights_best })
print("After the load_state_dict \n\n")
model.eval()
print("After the eval \n\n")
model.epoch = 100
print("Data_loader_tst : ",data_loader_tst)
loss_test, ppl_test, bce_test, acc_test, bleu_score_g, bleu_score_b= evaluate(model, data_loader_tst ,ty="test", max_dec_step=50)
print("After the evaluate \n\n")
file_summary = "saved_models_testing/"+"summary.txt"
with open(file_summary, 'a+') as the_file:
the_file.write("EVAL\tLoss\tPPL\tAccuracy\tBleu_g\tBleu_b\n")
the_file.write("{}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.2f}\t{:.2f}\n".format("test",loss_test,ppl_test, acc_test, bleu_score_g,bleu_score_b))
the_file.write("\n\n")
| [
"[email protected]"
] | |
23f3a9b619600c2c45f879384f3a51dda94f5c3e | 38466811d0e12a8f755bae58d7244622ef5f4d9b | /leetcode/200/141_linked_list_cycle.py | 9e5262a7110cf85407e3ce7e9183543e977219f0 | [] | no_license | andysitu/algo-problems | 4ab5a2b6591f0c0d84174b69598f30bc354ff8aa | 35c88dc747e7afa4fdd51d538bc80c4712eb1172 | refs/heads/master | 2023-06-24T15:55:39.019652 | 2021-02-26T20:31:07 | 2021-02-26T20:31:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def hasCycle(self, head: ListNode) -> bool:
if head == None:
return False
forward1 = head
forward2 = head
while True:
forward2 = forward2.next
if forward2 == None:
return False
forward2 = forward2.next
if forward2 == None:
return False
forward1 = forward1.next
if forward1 == forward2: | [
"[email protected]"
] | |
12da0c9ef7c319623e61c8a07547828aeb19c71f | f9c2461c22ab458a8a6f2a4e2b0a81de3cf8a8eb | /Project/Code/target_generator.py | 11b541f2117fd0bee0c60e198f85c823bf709ca3 | [] | no_license | terrenceedmonds/BassGenerator | f368ac844e32dee6bd0c0ab0e9138f1930371d8d | 4df3ebd6e440ea5d8db73df012dc0fd657de3daa | refs/heads/master | 2021-06-06T03:14:55.996062 | 2016-08-16T21:39:25 | 2016-08-17T18:05:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 881 | py | import pickle
import numpy as np
def X_y_split(split_number, matrix):
'''SPLIT NUMBER = 32: This splits the bass line in 2 equal halves:
The first 32 16ths determine the 33rd 16th note then moves down
for a total of 32 X, y pairs per bass line.'''
'''SPLIT NUMBER = 48: This splits the bass line in 3/4:
The first 48 16ths determine the 49th 16th note then moves down
for a total of 16 X, y pairs per bass line.'''
X_list = []
y_list = []
for line in matrix:
for i in xrange(64 - split_number):
X = line[i:i + split_number]
y = line[i + split_number]
X_list.append(X)
y_list.append(y)
X_array = np.array(X_list)
y_array = np.array(y_list)
return X_array, y_array
if __name__ == '__main__':
X, y = X_y_split(32, divide_features=True)
print len(X)
print len(y)
| [
"[email protected]"
] | |
8a06868b655739e92f853e34bd8de4d8bf56462c | ad04d0321197886ed2ccbf38e715181ee1b9bf64 | /tasks/201409_roadbed_centerline/time_test.py | 0b0891e2149663ee0704a6985d73f745e940cc37 | [] | no_license | nygeog/taxi | 20a4b525b46538bfd26807d03c3066de2739d8ef | f5b6de8455a25300624b87746460cb49ad942161 | refs/heads/master | 2016-09-11T06:59:52.782254 | 2015-04-28T19:42:54 | 2015-04-28T19:42:54 | 22,586,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | import arcpy, time, datetime, csv, sys, traceback
from arcpy import env
env.overwriteOutput = True
import glob
from datetime import datetime
print str(datetime.now()) | [
"daniel.martin.sheehan@gmailcom"
] | daniel.martin.sheehan@gmailcom |
6ab964904330fa8d9bc2040184817fda24ed9179 | cda795c607c4b65027006e96ad79d3a0b34e0d95 | /imgdraw.py | 5a636135dce82aee155b08b3a8ad0866ef3f67ce | [] | no_license | kamaljahangir/10thmayML | 9b1638b6ca8d83921d23b28be80183b11b0e810d | ac1efa37c5c438ba62fd10ae1588b6d363aa2254 | refs/heads/master | 2020-06-05T10:19:15.217004 | 2018-06-21T02:17:21 | 2018-06-21T02:17:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | #!/usr/bin/python3
import cv2,time
import numpy as np
import matplotlib.pyplot as plt
# reading images
img=cv2.imread('dogs.jpg',cv2.IMREAD_COLOR)
#img=np.zeros((512,512,3),np.uint8)
#img=img[0:100,0:200]
print(img.shape)
time.sleep(1)
cv2.line(img,(10,20),(200,200),(255,0,0),5)
cv2.rectangle(img,(150,150),(350,350),(20,30,200),2)
cv2.circle(img,(400,200),100,(220,130,200),-1)
cv2.circle(img,(400,200),100,(220,130,200),-1)
font=cv2.FONT_HERSHEY_SIMPLEX
# start_point,font,font_size
cv2.putText(img,'GOOGLE',(10,150),font,3,(0,0,255),10,cv2.LINE_AA)
cv2.imshow('draw',img)
k=cv2.waitKey(0)
if k == 27 : # for esc button
cv2.destroyAllWindows()
elif k == ord('s') :
cv2.imwrite('save.jpg',img1)
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
3be5af56db9452ae4f52966fd2ebab0bcda7af70 | 6c1ca203fef93643ea0e4d5ef11d19f54d041504 | /airflow_python_sdk/model/dag_collection_all_of.py | d8f0c3977266455d63558894d437e8944922bafc | [
"MIT"
] | permissive | marmikreal/airflow-python-sdk | 0ad03d7213101dc4d4a4dbbf654d8c1c4f78e395 | 70b5d08955a9e218fa6163b0612856e09eedce63 | refs/heads/master | 2023-03-26T19:53:21.083972 | 2021-04-02T16:31:18 | 2021-04-02T16:31:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,221 | py | """
Airflow API (Stable)
# Overview To facilitate management, Apache Airflow supports a range of REST API endpoints across its objects. This section provides an overview of the API design, methods, and supported use cases. Most of the endpoints accept `JSON` as input and return `JSON` responses. This means that you must usually add the following headers to your request: ``` Content-type: application/json Accept: application/json ``` ## Resources The term `resource` refers to a single type of object in the Airflow metadata. An API is broken up by its endpoint's corresponding resource. The name of a resource is typically plural and expressed in camelCase. Example: `dagRuns`. Resource names are used as part of endpoint URLs, as well as in API parameters and responses. ## CRUD Operations The platform supports **C**reate, **R**ead, **U**pdate, and **D**elete operations on most resources. You can review the standards for these operations and their standard parameters below. Some endpoints have special behavior as exceptions. ### Create To create a resource, you typically submit an HTTP `POST` request with the resource's required metadata in the request body. The response returns a `201 Created` response code upon success with the resource's metadata, including its internal `id`, in the response body. ### Read The HTTP `GET` request can be used to read a resource or to list a number of resources. A resource's `id` can be submitted in the request parameters to read a specific resource. The response usually returns a `200 OK` response code upon success, with the resource's metadata in the response body. If a `GET` request does not include a specific resource `id`, it is treated as a list request. The response usually returns a `200 OK` response code upon success, with an object containing a list of resources' metadata in the response body. When reading resources, some common query parameters are usually available. e.g.: ``` v1/connections?limit=25&offset=25 ``` |Query Parameter|Type|Description| |---------------|----|-----------| |limit|integer|Maximum number of objects to fetch. Usually 25 by default| |offset|integer|Offset after which to start returning objects. For use with limit query parameter.| ### Update Updating a resource requires the resource `id`, and is typically done using an HTTP `PATCH` request, with the fields to modify in the request body. The response usually returns a `200 OK` response code upon success, with information about the modified resource in the response body. ### Delete Deleting a resource requires the resource `id` and is typically executing via an HTTP `DELETE` request. The response usually returns a `204 No Content` response code upon success. ## Conventions - Resource names are plural and expressed in camelCase. - Names are consistent between URL parameter name and field name. - Field names are in snake_case. ```json { \"name\": \"string\", \"slots\": 0, \"occupied_slots\": 0, \"used_slots\": 0, \"queued_slots\": 0, \"open_slots\": 0 } ``` ### Update Mask Update mask is available as a query parameter in patch endpoints. It is used to notify the API which fields you want to update. Using `update_mask` makes it easier to update objects by helping the server know which fields to update in an object instead of updating all fields. The update request ignores any fields that aren't specified in the field mask, leaving them with their current values. Example: ``` resource = request.get('/resource/my-id').json() resource['my_field'] = 'new-value' request.patch('/resource/my-id?update_mask=my_field', data=json.dumps(resource)) ``` ## Versioning and Endpoint Lifecycle - API versioning is not synchronized to specific releases of the Apache Airflow. - APIs are designed to be backward compatible. - Any changes to the API will first go through a deprecation phase. # Summary of Changes | Airflow version | Description | |-|-| | v2.0 | Initial release | # Trying the API You can use a third party client, such as [curl](https://curl.haxx.se/), [HTTPie](https://httpie.org/), [Postman](https://www.postman.com/) or [the Insomnia rest client](https://insomnia.rest/) to test the Apache Airflow API. Note that you will need to pass credentials data. For e.g., here is how to pause a DAG with [curl](https://curl.haxx.se/), when basic authorization is used: ```bash curl -X POST 'https://example.com/api/v1/dags/{dag_id}?update_mask=is_paused' \\ -H 'Content-Type: application/json' \\ --user \"username:password\" \\ -d '{ \"is_paused\": true }' ``` Using a graphical tool such as [Postman](https://www.postman.com/) or [Insomnia](https://insomnia.rest/), it is possible to import the API specifications directly: 1. Download the API specification by clicking the **Download** button at top of this document 2. Import the JSON specification in the graphical tool of your choice. - In *Postman*, you can click the **import** button at the top - With *Insomnia*, you can just drag-and-drop the file on the UI Note that with *Postman*, you can also generate code snippets by selecting a request and clicking on the **Code** button. # Authentication To be able to meet the requirements of many organizations, Airflow supports many authentication methods, and it is even possible to add your own method. If you want to check which auth backend is currently set, you can use `airflow config get-value api auth_backend` command as in the example below. ```bash $ airflow config get-value api auth_backend airflow.api.auth.backend.basic_auth ``` The default is to deny all requests. For details on configuring the authentication, see [API Authorization](https://airflow.apache.org/docs/apache-airflow/stable/security/api.html). # Errors We follow the error response format proposed in [RFC 7807](https://tools.ietf.org/html/rfc7807) also known as Problem Details for HTTP APIs. As with our normal API responses, your client must be prepared to gracefully handle additional members of the response. ## Unauthenticated This indicates that the request has not been applied because it lacks valid authentication credentials for the target resource. Please check that you have valid credentials. ## PermissionDenied This response means that the server understood the request but refuses to authorize it because it lacks sufficient rights to the resource. It happens when you do not have the necessary permission to execute the action you performed. You need to get the appropriate permissions in other to resolve this error. ## BadRequest This response means that the server cannot or will not process the request due to something that is perceived to be a client error (e.g., malformed request syntax, invalid request message framing, or deceptive request routing). To resolve this, please ensure that your syntax is correct. ## NotFound This client error response indicates that the server cannot find the requested resource. ## MethodNotAllowed Indicates that the request method is known by the server but is not supported by the target resource. ## NotAcceptable The target resource does not have a current representation that would be acceptable to the user agent, according to the proactive negotiation header fields received in the request, and the server is unwilling to supply a default representation. ## AlreadyExists The request could not be completed due to a conflict with the current state of the target resource, meaning that the resource already exists ## Unknown This means that the server encountered an unexpected condition that prevented it from fulfilling the request. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from airflow_python_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from airflow_python_sdk.model.dag import DAG
globals()['DAG'] = DAG
class DAGCollectionAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'dags': ([DAG],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'dags': 'dags', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""DAGCollectionAllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
dags ([DAG]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| [
"[email protected]"
] | |
bb6bf34f2ba9f5ee7aa802041d3c119fe086696b | b88bab8d5146f35c84b08c065f8d325b32c78421 | /Functions Advanced - Exercise/Min Max Sum.py | 0e86867375fdf297c90c7d5115b20621e3fd9ab7 | [
"MIT"
] | permissive | DiyanKalaydzhiev23/Advanced---Python | f929436b8a54e8a6e431882d7c818d152f01a01b | ed2c60bb887c49e5a87624719633e2b8432f6f6b | refs/heads/main | 2023-06-09T19:20:16.063345 | 2021-06-26T10:27:49 | 2021-06-26T10:27:49 | 357,164,435 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | nums = [int(n) for n in input().split()]
print(f"The minimum number is {min(nums)}")
print(f"The maximum number is {max(nums)}")
print(f"The sum number is: {sum(nums)}")
| [
"[email protected]"
] | |
a7e316b3e4294deab2c4be72af3994d2504b8d49 | ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3 | /python/baiduads-sdk-auto/test/test_update_creative_response_wrapper.py | cca6a9dbe84cb7e4f9ee1164d8a578b819dd20ce | [
"Apache-2.0"
] | permissive | baidu/baiduads-sdk | 24c36b5cf3da9362ec5c8ecd417ff280421198ff | 176363de5e8a4e98aaca039e4300703c3964c1c7 | refs/heads/main | 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 | Apache-2.0 | 2023-06-02T05:19:40 | 2022-01-11T07:23:17 | Python | UTF-8 | Python | false | false | 1,076 | py | """
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import baiduads
from baiduads.common.model.api_response_header import ApiResponseHeader
from baiduads.creative.model.update_creative_response_wrapper_body import UpdateCreativeResponseWrapperBody
globals()['ApiResponseHeader'] = ApiResponseHeader
globals()['UpdateCreativeResponseWrapperBody'] = UpdateCreativeResponseWrapperBody
from baiduads.creative.model.update_creative_response_wrapper import UpdateCreativeResponseWrapper
class TestUpdateCreativeResponseWrapper(unittest.TestCase):
"""UpdateCreativeResponseWrapper unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUpdateCreativeResponseWrapper(self):
"""Test UpdateCreativeResponseWrapper"""
# FIXME: construct object with mandatory attributes with example values
# model = UpdateCreativeResponseWrapper() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
8eb8984f35bfb3da7d3dba72c285b6487d85fc09 | 5725113fd0e49a2ce139f61f933aee5321bb2ebe | /models/LanguageModels.py | d3c60d67467828a3c5ce73439654e36534eea0bc | [] | no_license | SaqibMamoon/VG-Bert | b3225088b5df9b5b689d6aa168943117d4b2c096 | 89a7a563096cf2729a1a8c8c75d8a36a822f56a1 | refs/heads/main | 2023-08-28T12:23:16.553825 | 2021-10-27T05:02:54 | 2021-10-27T05:02:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,834 | py | import torch
import torch.nn as nn
from transformers import BertTokenizer, BertModel, BertForMaskedLM, BertConfig
from collections import OrderedDict
class Bert_base(nn.Module):
def __init__(self, dropout_rate=0.3):
super(Bert_base, self).__init__()
config = BertConfig.from_pretrained('bert-base-uncased')
config.hidden_dropout_prob = dropout_rate
config.attention_probs_dropout_prob = dropout_rate
seed_model = BertModel(config)
base_model = BertModel.from_pretrained('bert-base-uncased')
# load weight from base model to seed model
new_state_dict = OrderedDict()
for k, v in base_model.state_dict().items():
name = k # remove `module.`
new_state_dict[name] = v
# load params
seed_model.load_state_dict(new_state_dict)
self.language_model = seed_model
def forward(self, x, x_type, x_mask):
outputs = self.language_model(x, token_type_ids=x_type, attention_mask=x_mask)
encoded_layers = outputs[0]
return encoded_layers
class Bert_object(nn.Module):
def __init__(self, embedding_dim=768, dropout_rate=0.1):
super(Bert_object, self).__init__()
config = BertConfig.from_pretrained('bert-base-uncased')
config.hidden_dropout_prob = dropout_rate
config.attention_probs_dropout_prob = dropout_rate
seed_model = BertModel(config)
base_model = BertModel.from_pretrained('bert-base-uncased')
base_model.to('cpu')
# load weight from base model to seed model
new_state_dict = OrderedDict()
for k, v in base_model.state_dict().items():
name = k # remove `module.`
new_state_dict[name] = v
# load params
seed_model.load_state_dict(new_state_dict)
self.language_model = seed_model
self.embedding_dim = embedding_dim
def forward(self, x, x_segments_tensors, x_token_mask, x_mask):
batch_num = x.size()[0]
obj_num = x.size()[1]
seq_len = x.size()[2]
feature_size = self.embedding_dim
# flatten batch input to make each phrase (attribute + object) as independent input sequence for Bert
x = x.view(batch_num*obj_num,seq_len)
x_segments_tensors = x_segments_tensors.view(batch_num*obj_num,seq_len)
x_token_mask = x_token_mask.view(batch_num*obj_num,seq_len)
outputs = self.language_model(input_ids=x, token_type_ids=x_segments_tensors, attention_mask=x_token_mask)
# pooling
outputs = outputs[0] * x_token_mask.unsqueeze(-1)
outputs = torch.mean(outputs,dim=1) # pool language representation as (attribute + object)
outputs = outputs.view(batch_num, obj_num, feature_size)
outputs = outputs * x_mask.unsqueeze(-1)
return outputs
| [
"[email protected]"
] | |
0e5255d021aeda561c8b57f0a5c987ba0541f366 | 57a8b30e515239f0e4626425a757bc67d2ea1b57 | /py2neo/legacy/index.py | d30a293ad0052f2ffd55cbca7af332d83e85815b | [
"Apache-2.0"
] | permissive | devocasion/py2neo | adbf02d505ae8bcd0726f69db5e15642f7e107ae | 34d4cb1e4582f695734d55329f28a6c278d951ee | refs/heads/master | 2022-09-20T02:52:13.346363 | 2014-09-12T21:58:11 | 2014-09-12T22:01:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,934 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011-2014, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, unicode_literals
from py2neo.legacy.batch import LegacyWriteBatch
from py2neo.core import Service, Node, Relationship, Resource, ResourceTemplate
from py2neo.packages.jsonstream import assembled, grouped
from py2neo.packages.httpstream.numbers import CREATED
from py2neo.packages.httpstream.packages.urimagic import percent_encode, URI
__all__ = ["Index"]
class Index(Service):
""" Searchable database index which can contain either nodes or
relationships.
.. seealso:: :py:func:`Graph.get_or_create_index`
"""
__instances = {}
def __new__(cls, content_type, uri, name=None):
""" Fetch a cached instance if one is available, otherwise create,
cache and return a new instance.
:param uri: URI of the cached resource
:return: a resource instance
"""
inst = super(Index, cls).__new__(cls)
return cls.__instances.setdefault(uri, inst)
def __init__(self, content_type, uri, name=None):
Service.__init__(self)
self._content_type = content_type
key_value_pos = uri.find("/{key}/{value}")
if key_value_pos >= 0:
self._searcher = ResourceTemplate(uri)
self.bind(uri[:key_value_pos])
else:
self.bind(uri)
self._searcher = ResourceTemplate(uri.string + "/{key}/{value}")
uri = self.resource.uri
if self.graph.neo4j_version >= (1, 9):
self._create_or_fail = Resource(uri.resolve("?uniqueness=create_or_fail"))
self._get_or_create = Resource(uri.resolve("?uniqueness=get_or_create"))
else:
self._create_or_fail = None
self._get_or_create = Resource(uri.resolve("?unique"))
self._query_template = ResourceTemplate(uri.string + "{?query,order}")
self._name = name or uri.path.segments[-1]
self.__searcher_stem_cache = {}
def __repr__(self):
return "{0}({1}, {2})".format(
self.__class__.__name__,
self._content_type.__name__,
repr(self.uri.string)
)
def _searcher_stem_for_key(self, key):
if key not in self.__searcher_stem_cache:
stem = self._searcher.uri_template.string.partition("{key}")[0]
self.__searcher_stem_cache[key] = stem + percent_encode(key) + "/"
return self.__searcher_stem_cache[key]
def add(self, key, value, entity):
""" Add an entity to this index under the `key`:`value` pair supplied::
# create a node and obtain a reference to the "People" node index
alice, = graph.create({"name": "Alice Smith"})
people = graph.get_or_create_index(neo4j.Node, "People")
# add the node to the index
people.add("family_name", "Smith", alice)
Note that while Neo4j indexes allow multiple entities to be added under
a particular key:value, the same entity may only be represented once;
this method is therefore idempotent.
"""
self.resource.post({
"key": key,
"value": value,
"uri": entity.uri.string,
})
return entity
def add_if_none(self, key, value, entity):
""" Add an entity to this index under the `key`:`value` pair
supplied if no entry already exists at that point::
# obtain a reference to the "Rooms" node index and
# add node `alice` to room 100 if empty
rooms = graph.get_or_create_index(neo4j.Node, "Rooms")
rooms.add_if_none("room", 100, alice)
If added, this method returns the entity, otherwise :py:const:`None`
is returned.
"""
rs = self._get_or_create.post({
"key": key,
"value": value,
"uri": entity.uri.string,
})
if rs.status_code == CREATED:
return entity
else:
return None
@property
def content_type(self):
""" Return the type of entity contained within this index. Will return
either :py:class:`Node` or :py:class:`Relationship`.
"""
return self._content_type
@property
def name(self):
""" Return the name of this index.
"""
return self._name
def get(self, key, value):
""" Fetch a list of all entities from the index which are associated
with the `key`:`value` pair supplied::
# obtain a reference to the "People" node index and
# get all nodes where `family_name` equals "Smith"
people = graph.get_or_create_index(neo4j.Node, "People")
smiths = people.get("family_name", "Smith")
..
"""
return [
self.graph.hydrate(assembled(result))
for i, result in grouped(self._searcher.expand(key=key, value=value).get())
]
def create(self, key, value, abstract):
""" Create and index a new node or relationship using the abstract
provided.
"""
batch = LegacyWriteBatch(self.graph)
if self._content_type is Node:
batch.create(abstract)
batch.add_to_index(Node, self, key, value, 0)
elif self._content_type is Relationship:
batch.create(abstract)
batch.add_to_index(Relationship, self, key, value, 0)
else:
raise TypeError(self._content_type)
entity, index_entry = batch.submit()
return entity
def _create_unique(self, key, value, abstract):
""" Internal method to support `get_or_create` and `create_if_none`.
"""
if self._content_type is Node:
body = {
"key": key,
"value": value,
"properties": abstract
}
elif self._content_type is Relationship:
body = {
"key": key,
"value": value,
"start": abstract[0].uri.string,
"type": abstract[1],
"end": abstract[2].uri.string,
"properties": abstract[3] if len(abstract) > 3 else None
}
else:
raise TypeError(self._content_type)
return self._get_or_create.post(body)
def get_or_create(self, key, value, abstract):
""" Fetch a single entity from the index which is associated with the
`key`:`value` pair supplied, creating a new entity with the supplied
details if none exists::
# obtain a reference to the "Contacts" node index and
# ensure that Alice exists therein
contacts = graph.get_or_create_index(neo4j.Node, "Contacts")
alice = contacts.get_or_create("name", "SMITH, Alice", {
"given_name": "Alice Jane", "family_name": "Smith",
"phone": "01234 567 890", "mobile": "07890 123 456"
})
# obtain a reference to the "Friendships" relationship index and
# ensure that Alice and Bob's friendship is registered (`alice`
# and `bob` refer to existing nodes)
friendships = graph.get_or_create_index(neo4j.Relationship, "Friendships")
alice_and_bob = friendships.get_or_create(
"friends", "Alice & Bob", (alice, "KNOWS", bob)
)
..
"""
return self.graph.hydrate(assembled(self._create_unique(key, value, abstract)))
def create_if_none(self, key, value, abstract):
""" Create a new entity with the specified details within the current
index, under the `key`:`value` pair supplied, if no such entity already
exists. If creation occurs, the new entity will be returned, otherwise
:py:const:`None` will be returned::
# obtain a reference to the "Contacts" node index and
# create a node for Alice if one does not already exist
contacts = graph.get_or_create_index(neo4j.Node, "Contacts")
alice = contacts.create_if_none("name", "SMITH, Alice", {
"given_name": "Alice Jane", "family_name": "Smith",
"phone": "01234 567 890", "mobile": "07890 123 456"
})
..
"""
rs = self._create_unique(key, value, abstract)
if rs.status_code == CREATED:
return self.graph.hydrate(assembled(rs))
else:
return None
def remove(self, key=None, value=None, entity=None):
""" Remove any entries from the index which match the parameters
supplied. The allowed parameter combinations are:
`key`, `value`, `entity`
remove a specific entity indexed under a given key-value pair
`key`, `value`
remove all entities indexed under a given key-value pair
`key`, `entity`
remove a specific entity indexed against a given key but with
any value
`entity`
remove all occurrences of a specific entity regardless of
key and value
"""
if key and value and entity:
t = ResourceTemplate(self.resource.uri.string + "/{key}/{value}/{entity}")
t.expand(key=key, value=value, entity=entity._id).delete()
elif key and value:
uris = [
URI(entity.resource.metadata["indexed"])
for entity in self.get(key, value)
]
batch = LegacyWriteBatch(self.graph)
for uri in uris:
batch.append_delete(uri)
batch.run()
elif key and entity:
t = ResourceTemplate(self.resource.uri.string + "/{key}/{entity}")
t.expand(key=key, entity=entity._id).delete()
elif entity:
t = ResourceTemplate(self.resource.uri.string + "/{entity}")
t.expand(entity=entity._id).delete()
else:
raise TypeError("Illegal parameter combination for index removal")
def query(self, query):
""" Query the index according to the supplied query criteria, returning
a list of matched entities::
# obtain a reference to the "People" node index and
# get all nodes where `family_name` equals "Smith"
people = graph.get_or_create_index(neo4j.Node, "People")
s_people = people.query("family_name:S*")
The query syntax used should be appropriate for the configuration of
the index being queried. For indexes with default configuration, this
should be Apache Lucene query syntax.
"""
resource = self._query_template.expand(query=query)
for i, result in grouped(resource.get()):
yield self.graph.hydrate(assembled(result))
def _query_with_score(self, query, order):
resource = self._query_template.expand(query=query, order=order)
for i, result in grouped(resource.get()):
meta = assembled(result)
yield self.graph.hydrate(meta), meta["score"]
def query_by_index(self, query):
return self._query_with_score(query, "index")
def query_by_relevance(self, query):
return self._query_with_score(query, "relevance")
def query_by_score(self, query):
return self._query_with_score(query, "score")
| [
"[email protected]"
] | |
54f9c26e1c1a8e2e8d1f6c35b715163db168de74 | ac3227ef8da077bfb871f8e515bda92ac96385f9 | /pressure17.py | 3631628a3c32772c9b545461956a5c1f062cb4a9 | [] | no_license | anlitsai/ngc2146-moment | 07dac25a981b404a55e00b5a17f314861324efd0 | 6060239f1919bb7c316dfe93465d1f5fa240bd56 | refs/heads/master | 2022-05-20T18:05:26.343243 | 2020-04-12T21:15:30 | 2020-04-12T21:15:30 | 255,170,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,365 | py | #!/usr/bin/env python
# NGC 2146
# --- import constant ------------------------- #
import math
#import pyfits
# --- constant -------------------------------- #
c=3.0e10 # light speed
pc=3.26*c*365*86400
kpc=pc*1.0e3
ev=1.6e-12
G=6.67e-8 # gravitational constant
k_B=1.38e-16 # boltzmann constant
Msun=1.99e33 # solar mass
Msun_pc2=Msun/pc**2
Lsun=3.9e33 # solar luminosity
N_A=6.02e23 # Avogadro constant
m_p=1.67e-24
E_ion_ev=13.6 # [eV]
E_bnd_ev=4.52 # [eV]
# --- parameter ------------------------------- #
print "============"
print "NGC 2146"
print "============"
print "parameters"
print "------------"
i_deg=70.0
i_pi=i_deg/180.0*math.pi
sin_i=math.sin(i_pi)
cos_i=math.cos(i_pi)
D_Mpc=17.2
X_CO2H2=1.4/3.0
H22mol=1.36
XH=X_CO2H2*H22mol
print "inclination",i_pi,sin_i,cos_i
v_motf_kms=200.0 # km/s (Tsai et al. 2009)
v_sound_kms=1.0e3
T_mol=30.0 # K (Tsai et al. 2009)
T_xotf=1.0e6 # K
EI=8.0e62 # cm^-3 (Inui et al. 2005)
R_xotf_pc=6.3*1000*4.8/3.5 # (Inui et al. 2005)
R_xotf=R_xotf_pc*pc
V_xotf=(4.0/3.0*math.pi)*R_xotf**3 # (Inui et al. 2005)
V_xotf_Inui=(4.0/3.0*math.pi)*(6.3e3*pc)**3 # (Inui et al. 2005)
fill=0.01 # (Inui et al. 2005)
print "filling factor =", fill
n_mol=100.0 # cm^-3 (Tsai et al. 2009)
rho_mol=n_mol/N_A
n_xotf=(EI/V_xotf/fill)**(0.5) # cm^-3 (Iuni et al. 2005)
print "n_xotf =", n_xotf, "[cm^-3]"
print "n_xotf (Inui) =", 5.1e-3*fill**(-0.5), "[cm^-3]"
print "n_mol =", n_mol, "[cm^-3]"
print "rho_mol", '%.2e' %(rho_mol), "[g cm^-3]"
kT_xotf_ev=0.5e3
kT_xotf=kT_xotf_ev*ev
print "kT [ev] of xray outflow =", kT_xotf_ev,"ev"
print "kT [erg] of xray outflow =", kT_xotf,"erg"
print "V_xotf =", V_xotf
print "V_xotf (Inui) =", V_xotf_Inui
M_xotf=n_xotf*m_p*V_xotf*fill
M_xotf_Msun=M_xotf/Msun
M_xotf_Msun_Inui=1.3e8*fill**0.5
M_xotf_Inui=M_xotf_Msun_Inui*Msun
M_galdsk_Msun=8.67e10
M_galdsk_Msun=2.18e11
M_galdsk=M_galdsk_Msun*Msun
L_xotf=1.3e40 # (Inui et al. 2005)
E_xotf=1.0e56 # (Inui et al. 2005)
print "L_xotf =", L_xotf, "[erg/s]"
print "E_xotf =", E_xotf, "[erg]"
effi_x_thrm=0.3 # (Strickland & Stevens 2000)
effi_x_mech=0.01 # (Strickland & Stevens 2000)
R_motf=2.0e3*pc # (Tsai et al. 2009)
SFR=10.0 # Msun/yr (Greve et al. 2000)
SNrate= 0.15 # yr^-1 (Tarchi et al. 2000)
print "SN rate =", SNrate, "[yr^-1]"
effi_mass2rad=0.001 # (Thompson et al. 2005)
as2pc=80.0
px2as=0.2
bm2as2=3.3*2.8
R_starburst_pc=700.0 # (@tau=1# Thompson et al. 2005)
# R_starburst_pc=1000.0 # (our data)
R_starburst_pc_greve=1250.0 # (Greve 2000)
R_conti_pc=2560.0/2 # (our data) (/iapetus/data/satoki_data/ngc2146/20100130.conti89GHz )
V_starburst_pc3_greve=2.0e10 # (Greve 2000)
z_starburst_pc=40.0 # (our calculation) (iapetus:/iapetus/thesis/phd/calculation/veldisp13.sh)
z_starburst_pc_greve=500.0 # (Greve 2000)
#z_starburst_pc_greve=500.0 # (Greve 2000)
tau=10.0
d_mpc=17.2 # Mpc
alpha=1.0 # (Chevalier 1985)
beta=1.0 # (Chevalier 1985)
a1=0.3 # Tsai 2009
b1=0.32 # Tsai 2009
c1=256.39 # Tsai 2009
timescale=1.0e7 # [yr] ; ourdata
v_rms_kms=11.16
v_rms=v_rms_kms*1.0e5
def surf_den_dyn(r_kpc):
v_kms=r_kpc*c1/(r_kpc**a1+r_kpc**(1-b1))/sin_i
v=v_kms*1.0e5
r=r_kpc*kpc
r_pc=r_kpc*1000
m_dyn=r*v**2/G # Orange book p.958
m_dyn_Msun=m_dyn/Msun
sd_dyn=m_dyn/(math.pi*r**2)
sd_dyn_Msunpc2=m_dyn_Msun/(math.pi*r_pc**2)
# sd_dyn_Msunpc2=sd_dyn/Msun_pc2
z_pc=(v_rms**2/(2*math.pi*G*sd_dyn))/pc
print '%.2f' %(sd_dyn_Msunpc2),"[Msun/pc2] (r <",r_kpc,"kpc ; z =",'%.2f' %(z_pc),"pc)",'%.2f' %v_kms,"[km/s]", '%.2e' %(m_dyn_Msun),"[Msun]"
return sd_dyn_Msunpc2
sd05=surf_den_dyn(0.5)
sd08=surf_den_dyn(0.8)
sd10=surf_den_dyn(1.0)
sd12=surf_den_dyn(1.2)
sd_dyn=sd12
sd15=surf_den_dyn(1.5)
sd24=surf_den_dyn(2.4)
sd28=surf_den_dyn(2.8)
sd30=surf_den_dyn(3.0)
sd32=surf_den_dyn(3.2)
#sd35=surf_den_dyn(3.5)
#sd50=surf_den_dyn(5)
#sd100=surf_den_dyn(10)
# --- gravitational pressure (Gunn & Gott 1972) ------- #
#def surf_den_kazushi(S_CO_JyBmMSpx,px,i_deg,D_Mpc):
def surf_den_kazushi(S_CO_JyBmMS,px,type):
n_as=px*px2as**2
n_bm=n_as/bm2as2
S_CO=S_CO_JyBmMS/1000*n_bm
I_CO=S_CO/n_as
sd_Msunpc2=(5.0e2*cos_i*I_CO)*XH
M_gas_Msun=(1.18e4*D_Mpc**2*S_CO)*XH#*cos_i
M_gas=M_gas_Msun*Msun
print '%.2f' %I_CO,"[Jy km/s as-2]", '%.2e' %S_CO,"[Jy km/s]",'%.0f' %n_as,"[as2]",'%.0f' %n_bm,"[beam]",'%.2f' %sd_Msunpc2,"[Msun/pc2]",'%.2e' %M_gas_Msun,"[Msun]",type
return sd_Msunpc2,M_gas
sd=surf_den_kazushi(4.1472E+02,11022,"motf")
sd=surf_den_kazushi(1.429E+03,84370,"motf")
sd=surf_den_kazushi(7.481E+02,46830,"motf")
sd=surf_den_kazushi(1.327E+03,69990,"motf")
sd=surf_den_kazushi(6.669E+02,57300,"motf")
sd_motf_Msunpc2=sd[0]
M_motf=sd[1]
sd=surf_den_kazushi(1.1243E+04,38685,"unkown")
sd=surf_den_kazushi(1.3559E+04,30802,"45as")
sd=surf_den_kazushi(1.3182E+04,31100,"mdsk")
sd=surf_den_kazushi(2.054E+04,186790,"mdsk")
sd=surf_den_kazushi(2.111E+04,180400,"mdsk")
sd=surf_den_kazushi(3.730E+03,53030,"mbbl")
sd=surf_den_kazushi(3.356E+04,64050,"0.8kpc")
sd=surf_den_kazushi(3.652E+04,54540,"0.8kpc")
sd=surf_den_kazushi(3.049E+04,93940,"1.2kpc")
sd=surf_den_kazushi(2.431E+04,128520,"1.2kpc")
sd=surf_den_kazushi(2.139E+04,166460,"1.6kpc")
sd=surf_den_kazushi(1.723E+04,214640,"1.6kpc")
sd=surf_den_kazushi(1.579E+04,256930,"2.4kpc")
sd=surf_den_kazushi(1.381E+04,301520,"2.4kpc")
sd=surf_den_kazushi(1.466E+04,281700,"2.4kpc")
sd=surf_den_kazushi(1.364E+04,306950,"2.8kpc")
sd=surf_den_kazushi(1.364E+04,306950,"tvbox;30as")
sd_motf=sd_motf_Msunpc2*Msun_pc2
sd_xotf=sd_motf*(n_xotf/n_mol)*(M_xotf/M_motf)
sd_xotf_Msunpc2=sd_xotf*Msun_pc2
print "------------"
print "calculation results"
print "------------"
print "+[ grav P = 2*pi*G*(surf_den_xry)*(surf_den_dynamical_mass) ]+"
def p_grav(sd,type):
gravP=2*math.pi*G*sd*sd_dyn*Msun_pc2
print " o", type, "grav P:", '%.2e' %(gravP)
return gravP
p_grav_motf=p_grav(sd_motf,"mol. outflow")
p_grav_xotf=p_grav(sd_xotf, "xray outflow")
# unit checked
# --- ram pressure (Gunn & Gott 1972) ----------------- #
print "------------"
print "+[ ram P = rho*(relative velocity)^2 ]+"
def v_esc(M_Msun,R_kpc):
v=math.sqrt(2*G*M_Msun*Msun/(R_kpc*1.0e3*pc))/1.0e5
print "escape velocity",v,"[km/s]"
return v
v_2kpc=v_esc(8.67e10,2)
v_1kpc=v_esc(8.67e10,1)
#E_bnd_ion_particle=(E_bnd_ev+E_ion_ev)*ev
#E_bnd_ion_mass=E_bnd_ion_particle*N_A*M_xotf_g
def ramP(effi_x1,effi_x2,type1,type2):
print " ", type1, effi_x1
print " ", type2, effi_x2
effi_x=effi_x1*effi_x2
E_ttl=E_xotf/effi_x
print " total Energy =",'%.2f' %(E_ttl), "[erg]"
E_mech=E_ttl*(1-effi_x)
v_xry_kms=math.sqrt(E_mech*2/M_xotf)/1.0e5
print " v_xry = ", '%.2f' %(v_xry_kms), "[km/s]"
v_rel_kms=v_xry_kms-v_motf_kms
v_rel=v_rel_kms*1.0e5
p=rho_mol*v_rel**2
p_K=p/k_B
print " v_rel = ", '%.2f' %(v_rel_kms), "[km/s]"
print " o ram P: ", '%.2e' %(p),"[dyne cm^-2]"
print " o ram P: ", '%.2e' %(p_K), "[K cm^-3]"
return p
# unit checked
type2="Lx increasing factor"
print "the total energy released by SNe and stellar winds (doesn't include the K.E)"
p_ram1=ramP(0.1,1,"thermalization efficiency (lower)",type2)
p_ram1=ramP(0.3,1,"thermalization efficiency",type2)
p_ram1=ramP(0.5,1,"thermalization efficiency",type2)
p_ram1=ramP(1,1,"thermalization efficiency (upper)",type2)
print "radiating the mechanical energy supplied by the starburst"
p_ram1=ramP(0.01,10,"starburst energy injection rate (thin-disk)",type2)
p_ram1=ramP(0.05,3,"starburst energy injection rate (thick-disk, lower)",type2)
p_ram1=ramP(0.2,3,"starburst energy injection rate (thick-disk, upper)",type2)
# --- SN explosions pressure effect on hot ISM (Thompson et al 2005) -- #
print "------------"
print "+[ shock P = 10^-12*E_xry^(17/14)*n_mol^(-4/7)*SNrate/Vol ]+"
def A_pc2(R):
A=math.pi*R**2
# print "Starburst Area = ", '%.2e' %(A) , "[pc^2]"
return A
def V_pc3(R,z):
V=math.pi*R**2*z
return V
# SNrate_V=SNrate/V_starburst
V_conti=V_pc3(R_conti_pc,z_starburst_pc)
V_greve1=V_pc3(R_starburst_pc_greve,z_starburst_pc_greve)
V_greve2=2.0e10 # (Greve 2000)
E_init=E_xotf
n_ambi=n_mol
def p_SNhot(V,type):
rate_V=SNrate/V
P=1.0e-12*(E_init/1.0e51)**(17.0/14)*(n_ambi/0.01)**(-4.0/7)*(rate_V/1.0e-13) # [erg cm^-3]
print " data from", type
print " starburst Volume = ", '%.2e' %(V), "[pc^3]"
print " o SN shock-heated P: ", '%.2e' %(P)
# print "SN explosion P (shock-heated hot ISM): ", P
return P
# E_init [erg]
# n_ambi [cm^-3]
# SNrate_V [yr^-1 pc^-3]
# SNrate_V=SNrateate per volume
p_SNhot_conti=p_SNhot(V_conti,"89GHz Continuum")
p_SNhot_greve1=p_SNhot(V_greve1,"Greve 2000 (1)")
p_SNhot_greve2=p_SNhot(V_greve2, "Greve 2000 (2)")
# unit checked
# --- thermal pressure -------------------------------- #
print "------------"
print "+[ thermal P = n*k*T ]+"
p_thm_mol=n_mol*k_B*T_mol
p1=1000*k_B*100
p2=100*k_B*10
print "pppp",p1,p2
p_thm_xotf=2*n_xotf*kT_xotf
p_thm_xotf_inui=4.9e-12*fill**(-0.5)
print " o molecular gas thermal P: ", p_thm_mol
print " o ionized gas thermal P: ", '%.2e' %(p_thm_xotf)
print " o ionized gas thermal P (Inui): ", p_thm_xotf_inui
# unit checked
# --- radiation pressure (Thompson et al 2005) -------- #
print "------------"
print "+[ radiation P = effic*SFR/Area*c ]+"
# SFR_A=SFR/(A_starburst)
A_starburst=A_pc2(R_conti_pc)*pc**2
SFR_A=SFR/(A_starburst)
# when tau << 1
p_rad_thin=c*effi_mass2rad*SFR_A
# when tau >= 1
p_rad_thick=c*effi_mass2rad*SFR_A*(1.0/2)*tau
#print " o optically thin radiation P: ", '%.2e' %(p_rad_thin)
print " o optically thick radiation P: ", '%.2e' %(p_rad_thick)
# unit checked
# --- SN explosion pressure affect on cold ISM (Thompson et al 2005) -- #
print "------------"
print "+[ SN explosion P = 5*n_mol^(-1/4)*E_xry^(13/14)*P_rad ]+"
# E_init=E_xotf
# n_ambi=n_mol
p_SN_cold=5*(n_ambi/1.0)**(-1.0/4)*(E_init/1.0e51)**(13.0/14)*p_rad_thick
print " o SN explosion P (cold ISM): ", '%.2e' %(p_SN_cold)
print "------------"
# unit checked
# --------------------------------------------- #
# --- reference ------------------------------- #
# Chevalier et al. 2001, ApJ, 558, L27
# Chevalier et al. 1985, Nature, 317, 44
# Greve et al. 2000, AA, 364, 409
# Gunn & Gott 1972, ApJ, 176, 1
# Inui et al. 2005, P: ASJ, 57, 135
# Sakamoto et al. 1995, AJ, 110, 2075
# Strickland & Stevens 2000, MNRAS, 314, 511
# Tarchi et al. 2000, 358, 95
# Tsai et al. 2009, P: ASJ, 61, 237
# Thompson et al. 2005, ApJ, 630, 167
# Vollmer et al. 2001, ApJ, 561, 708
# Vollmer et al. 2005, AA, 441, 473
exit
| [
"[email protected]"
] | |
228b011f6c42454451196d21cac044fbf57031d5 | 9df267922befa6405ee4bca2066763507703f50f | /luckBalance.py | 710bc3b3f80dd9a93e6ed17a9973c67d6d761a69 | [] | no_license | sujiea/hackerrank-codility | 6b807506edc8cec46bb1cc9ffc922caa41435f69 | d8a8d903227ecf892ba2a7aba497245812e0de62 | refs/heads/main | 2023-04-01T11:41:04.453250 | 2021-04-02T14:04:59 | 2021-04-02T14:04:59 | 345,200,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | def luckBalance(k, contests):
loss = 0
contests.sort(key=lambda a: a[0])
lossnum = 0
for i in range(len(contests)-1, -1, -1):
curl = contests[i][0]
if contests[i][1] == 1:
if lossnum < k:
lossnum += 1
loss += curl
else:
loss -= curl
else:
loss += curl
return loss
print(luckBalance(5,[[13,1],[10,1],[9,1],[8,1],
[13,1],
[12,1],
[18,1],
[13,1]]))
print(luckBalance(3,[[5,1], [2,1], [1,1], [8,1], [10,0], [5,0]]))
| [
"[email protected]"
] | |
f38c1c3c095e858982af91a7da7943c0b9f6fb8a | 91a35132e1aa7235615b5735899fa5522647369a | /visualization/assign.py | c1696f9fef42cc09c58d44f73f33a4b7a057a427 | [] | no_license | bana513/TextSummarization | ceb7857d9d3eeebb061d2feab5a5efe03dcc2300 | 80a6da328095eb4381361b1afb52074b2bc60383 | refs/heads/master | 2020-12-31T10:20:06.845664 | 2020-05-23T22:25:05 | 2020-05-23T22:25:05 | 238,997,311 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py | from langdetect import detect
from transformers import BertTokenizer
import pandas as pd
def assign_lang(x):
try:
res = detect(x)
except Exception as e:
res = "unk"
return res
def contains_ekezet(x):
if 'á' in x:
return True
elif 'é' in x:
return True
elif 'í' in x:
return True
elif 'ó' in x:
return True
elif 'ú' in x:
return True
elif 'ü' in x:
return True
elif 'ü' in x:
return True
elif 'ö' in x:
return True
elif 'ő' in x:
return True
return False
tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-cased', do_lower_case=False)
def tokenize(x):
content, summary = x
if pd.isna(content) or pd.isna(summary): return None, None
content = tokenizer.encode(content)
if 20 < len(content) <= 512:
summary = tokenizer.encode(summary)
if 8 < len(summary) <= len(content) // 2:
return content, summary
return None, None | [
"[email protected]"
] | |
9f77a46db14ad5aed4ec12a3273abab3140a8cc8 | 33118eb668f600e7635e72b6caaf47cc893741ce | /Day3/day3.py | 7aa407edbcc53f2bd1e23f5e0cd3247919c7b8c9 | [] | no_license | darpan45/100DaysofCode | c5010bf55ac1330a837212020172c1044895f5f6 | afcd7edbabb77525fb583843b09fc21e94c5d8bc | refs/heads/master | 2023-04-07T07:34:16.483521 | 2021-04-21T15:57:47 | 2021-04-21T15:57:47 | 355,512,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,530 | py | ##Basic If Else Statement
# print("Welcome to RollerCoaster!")
# height=int(input("What is your height ? "))
# if height >= 120 :
# print("You are eligible for rollercoaster")
# else:
# print("You are not eligible for rollercoaster")
##Odd or Even Number
# num=int(input("Enter a number : "))
# if num % 2==0:
# print("Number is even.")
# else:
# print("Number is odd.")
##Nested If Else
# print("Welcome to RollerCoaster!")
# height=int(input("What is your height ? "))
# if height >= 120 :
# print("You are eligible for rollercoaster")
# age=int(input("What is your age ? "))
# if age >18:
# print("You need to pay 12$")
# elif age >12 and age <18 :
# print("You need to pay 7$")
# else:
# print("You need to pay 5$")
# else:
# print("You are not eligible for rollercoaster")
#BMI Calculator 2.0
# weight=float(input("Enter your weight in kgs : "))
# height=float(input("Enter your height in m : "))
# bmi=weight/height**2
# bmi_round =float("{:.2f}".format(bmi))
# print(bmi_round)
# if bmi_round < 18.5:
# print("You are underweight.")
# elif bmi_round >=18.5 and bmi_round <25:
# print("You have normal weight.")
# elif bmi_round >=25 and bmi_round <30:
# print("You are overweight.")
# elif bmi_round >=30 and bmi_round <35:
# print("You are obese.")
# elif bmi_round >= 35:
# print("You are clinically obese.")
#Leap year or not
# year =int(input("Enter a year : "))
# if year %4 ==0 :
# if year % 400 !=0 and year %100 ==0:
# print(f"{year} is not a leap year ")
# else:
# print(f"{year} is a leap year ")
# else:
# print(f"{year} is not a leap year ")
#PIZZA ORDER
# print("Welcome to Python Pizza Deliveries!!")
# size=input("What size pizza do you want ? S, M or L ")
# add_pepperoni = input("Do you want pepperoni ? Y or N ")
# extra_cheese=input("Do you want extra cheese ? Y or N ")
# bill=0
# if size == "S" :
# bill +=15
# if add_pepperoni == "Y" :
# bill +=2
# if extra_cheese == "Y":
# bill +=1
# elif size == "M" :
# bill +=20
# if add_pepperoni == "Y" :
# bill +=3
# if extra_cheese == "Y":
# bill +=1
# elif size == "L" :
# bill +=25
# if add_pepperoni == "Y" :
# bill +=3
# if extra_cheese == "Y":
# bill +=1
# print(f"Total bill is {bill} .")
##LOVE CALCULATOR
# print("Welcome to Love Calculator!")
# name1=(input("Enter your name : "))
# name2=(input("Enter their name : "))
# lower_name1=name1.lower()
# lower_name2=name2.lower()
# T=lower_name1.count('t')+lower_name2.count('t')
# R=lower_name1.count('r')+lower_name2.count('r')
# U=lower_name1.count('u')+lower_name2.count('u')
# E=lower_name1.count('e')+lower_name2.count('e')
# print(f"T occurs {T} times .")
# print(f"R occurs {R} times .")
# print(f"U occurs {U} times .")
# print(f"E occurs {E} times .")
# TRUE=T+R+U+E
# print(f"Total {TRUE}")
# L=lower_name1.count('l')+lower_name2.count('l')
# O=lower_name1.count('o')+lower_name2.count('o')
# V=lower_name1.count('v')+lower_name2.count('v')
# E=lower_name1.count('e')+lower_name2.count('e')
# print(f"L occurs {L} times .")
# print(f"O occurs {O} times .")
# print(f"V occurs {V} times .")
# print(f"E occurs {E} times .")
# LOVE=L+O+V+E
# print(f"Total {LOVE}")
# score=str(TRUE)+str(LOVE)
# scr=int(score)
# # print(f"Your score is {score}")
# if scr<10 or scr>90:
# print(f"Your score is {scr} ,you go together like coke and mentos.")
# elif scr >40 and scr <50 :
# print(f"Your score is {scr} , you are alright together.")
# else:
# print(f"Your score is {scr}.")
##TREASURE ISLAND GAME
# print('''
# *******************************************************************************
# | | | |
# _________|________________.=""_;=.______________|_____________________|_______
# | | ,-"_,="" `"=.| |
# |___________________|__"=._o`"-._ `"=.______________|___________________
# | `"=._o`"=._ _`"=._ |
# _________|_____________________:=._o "=._."_.-="'"=.__________________|_______
# | | __.--" , ; `"=._o." ,-"""-._ ". |
# |___________________|_._" ,. .` ` `` , `"-._"-._ ". '__|___________________
# | |o`"=._` , "` `; .". , "-._"-._; ; |
# _________|___________| ;`-.o`"=._; ." ` '`."\` . "-._ /_______________|_______
# | | |o; `"-.o`"=._`` '` " ,__.--o; |
# |___________________|_| ; (#) `-.o `"=.`_.--"_o.-; ;___|___________________
# ____/______/______/___|o;._ " `".o|o_.--" ;o;____/______/______/____
# /______/______/______/_"=._o--._ ; | ; ; ;/______/______/______/_
# ____/______/______/______/__"=._o--._ ;o|o; _._;o;____/______/______/____
# /______/______/______/______/____"=._o._; | ;_.--"o.--"_/______/______/______/_
# ____/______/______/______/______/_____"=.o|o_.--""___/______/______/______/____
# /______/______/______/______/______/______/______/______/______/______/_____ /
# *******************************************************************************
# ''')
# print("Welcome to Treasure Island !!")
# print("Your mission is to find the treasure.")
# direction=input("You're at a cross road.Where do you want to go. Type 'left' or 'right' ")
# if direction == 'left':
# lake=input("You come to a lake.There is an island in the middle of lake. Type 'wait' to wait for the boat. Type 'swim' to swim across . ")
# if lake =="wait":
# door =input("You arrive at an island unharmed . There is a house with 3 doors. One red,one yellow and one blue.Which colour do you choose? ")
# if door == "red":
# print("You are burned in fire.")
# print("GAME OVER!")
# elif door =="blue":
# print("You have been eaten by Beasts.")
# print("GAME OVER!")
# elif door=="yellow":
# print("Amazing!! You have won the game .")
# else:
# print("GAME OVER!!")
# else:
# print("You have been attacked by trout.")
# print("GAME OVER!")
# else:
# print("You have fallen into hole.")
# print("GAME OVER!")
# print("CONGRATULATIONS DAY3 COMPLETE!!!!!!!!!!!") | [
"[email protected]"
] | |
4c9202401909ac467c699ae421769fbf548167c7 | 32bbbaf51f8951352aa4d9dcc9e96268a0caeed8 | /src/utilty/sql_connection.py | 28db10f01247a2fa10c8a9a016a89518fc2c0f1d | [] | no_license | saurav01235/incubyte-task | 0e1f8b0e97e34fe1ec960fd13bddb114a2b5bc5f | 9b1f569ed7f67e149356a4d5c02d29fca3b59d77 | refs/heads/main | 2023-06-02T05:00:20.975028 | 2021-06-20T20:20:47 | 2021-06-20T20:20:47 | 378,730,792 | 0 | 0 | null | 2021-06-20T20:20:47 | 2021-06-20T20:06:12 | null | UTF-8 | Python | false | false | 441 | py | import mysql.connector
class MySqlConnection:
def __init__(self, host, user_name, password, db_name):
self.mydb = mysql.connector.connect(
host=host,
user=user_name,
password=password,
database=db_name
)
def get_connection(self):
return self.mydb
def get_cursor(self):
return self.mydb.cursor()
def commit(self):
self.mydb.commit()
| [
"[email protected]"
] | |
6a02b1b9b4039b5905a62c954a5fa994a2226388 | d0397940ac1267c40db974e54ddf3181380370d7 | /website_personal/urls.py | f42508d6a7c4accf845696fde4ac9dc67cae0b76 | [] | no_license | ngvan98/WebPersonal | c632e4b7bd4de5d5915c05747ed953c46bbcab9d | 06a8b039547c44c3faab210aa0c78ccb6a3e8f6e | refs/heads/master | 2023-05-02T14:17:04.278066 | 2021-05-25T14:30:46 | 2021-05-25T14:30:46 | 370,722,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | """website_personal URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('base.urls'))
]
| [
"[email protected]"
] | |
c343d8ab5d38217cccb70f3d7d86c16fa61d74e1 | cdf541125b94bb01a4068d1a7b4d171139e5ef00 | /eml2txt.pye | ced4f4b58385dcd1bec7e04847b7d1dc675a521e | [
"MIT"
] | permissive | u1and0/eml2txt | 3f8ac2b19065001963d197ef6a4e9f3153db3afa | 52eb53fc708fb3de478f2113f8abdae9f1ce58d3 | refs/heads/master | 2023-05-30T22:37:54.096578 | 2021-06-08T01:47:23 | 2021-06-08T01:47:23 | 278,624,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,807 | pye | #!/usr/bin/env python3
# coding:utf-8
"""
emlファイルを元に扱いやすい様にデータを取得します。
Usage:
$ python -m eml2txt foo.eml bar.eml ... # => Dump to foo.txt, bar.txt
$ python -m eml2txt *.eml - # => Concat whole eml and dump to STDOUT
"""
import sys
import re
from dateutil.parser import parse
import email
from email.header import decode_header
VERSION = "eml2ext v2.1.0"
class MailParser:
"""
メールファイルのパスを受け取り、それを解析するクラス
"""
def __init__(self, mail_file_path):
self.mail_file_path = mail_file_path
# emlファイルからemail.message.Messageインスタンスの取得
with open(mail_file_path, 'rb') as email_file:
self.email_message = email.message_from_bytes(email_file.read())
self.subject = None
self.to_address = None
self.cc_address = None
self.from_address = None
self.date = None
self.body = ""
# 添付ファイル関連の情報
# {name: file_name, data: data}
self.attach_file_list = []
# emlの解釈
self._parse()
def get_attr_data(self):
"""
メールデータの取得
"""
result = """\
DATE: {}
FROM: {}
TO: {}
CC: {}
-----------------------
SUBJECT: {}
BODY:
{}
-----------------------
ATTACH_FILE_NAME:
{}
""".format(self.date, self.from_address, self.to_address, self.cc_address,
self.subject, self.body,
",".join([x["name"] for x in self.attach_file_list]))
return result
def _parse(self):
"""
メールファイルの解析
__init__内で呼び出している
"""
self.subject = self._get_decoded_header("Subject")
self.to_address = self._get_decoded_header("To")
self.cc_address = self._get_decoded_header("Cc")
self.from_address = self._get_decoded_header("From")
self.date = parse(
self._get_decoded_header("Date"),
dayfirst=True,
fuzzy=True,
).isoformat()
# メッセージ本文部分の処理
for part in self.email_message.walk():
# ContentTypeがmultipartの場合は実際のコンテンツはさらに
# 中のpartにあるので読み飛ばす
if part.get_content_maintype() == 'multipart':
continue
# ファイル名の取得
attach_fname = part.get_filename()
# ファイル名がない場合は本文のはず
if not attach_fname:
charset = part.get_content_charset()
if charset:
self.body += part.get_payload(decode=True).decode(
str(charset), errors="replace")
else:
# ファイル名があるならそれは添付ファイルなので
# データを取得する
self.attach_file_list.append({
"name":
attach_fname,
"data":
part.get_payload(decode=True)
})
def _get_decoded_header(self, key_name):
"""
ヘッダーオブジェクトからデコード済の結果を取得する
"""
ret = ""
# 該当項目がないkeyは空文字を戻す
raw_obj = self.email_message.get(key_name)
if raw_obj is None:
return ""
# デコードした結果をunicodeにする
for fragment, encoding in decode_header(raw_obj):
if not hasattr(fragment, "decode"):
ret += fragment
continue
# encodeがなければとりあえずUTF-8でデコードする
ret += fragment.decode(encoding if encoding else 'UTF-8',
errors='replace')
return ret
@staticmethod
def help(exitcode):
"""Show help"""
print(__doc__)
sys.exit(exitcode)
@staticmethod
def version():
"""Show version"""
print(VERSION)
sys.exit(0)
@classmethod
def dump2stdout(cls, argv):
"""Dump messages to STDOUT"""
argv.remove('-')
for filename in argv[1:]:
result = cls(filename).get_attr_data()
print(result)
@classmethod
def dump2txt(cls, argv):
"""Dump messages to TEXT"""
try:
for filename in argv[1:]:
parser = cls(filename)
invalid_str = r"[\\/:*?\"<>|]" # Not allowed to use filename
# Remove invalid text
subject = re.sub(invalid_str, "", parser.subject)
# Remove local time "+09:00", "-"
title_date = parser.date[:-len("+09:00")].replace("-", "")
# Remove invalid strings
date = re.sub(invalid_str, "", title_date)
result = parser.get_attr_data()
# Overwrite same date+subject eml
with open(f'{date}_{subject}.txt', 'w',
encoding='utf-8') as _f:
_f.write(result)
except BaseException as e:
with open('eml2ext_error.txt', 'w', encoding='utf-8') as _f:
print(f'error {e}')
# _f.write(e)
def main():
"""Entry point"""
if len(sys.argv) < 2: # No args
MailParser.help(1)
elif sys.argv[1] == '-v' or sys.argv[1] == '--version':
MailParser.version()
elif sys.argv[1] == '-h' or sys.argv[1] == '--help':
MailParser.help(0)
elif '-' in sys.argv:
MailParser.dump2stdout(sys.argv)
else:
MailParser.dump2txt(sys.argv)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
49d99d025201045a3100ae9ab2515e297887e22a | 9b10d8482a7af9c90766747f5f2ddc343871d5fa | /Gemtek/AutoTest/Sprinkler-Auto-Test/appium/modules/android/main_screen.py | 53dfab1b87c290967aa74c0b91465fb24e0c9366 | [] | no_license | DarcyChang/MyProjects | 86d33f5cf8bdfd4b21e64922e4eb25c1afc3c135 | 47efb2dfe13ace264f8943b59b701f39f23c4c17 | refs/heads/master | 2021-05-12T12:43:39.255082 | 2020-09-23T06:42:03 | 2020-09-23T06:42:03 | 117,419,269 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,525 | py | import unittest
from time import sleep
from appium import webdriver
import android.verify.exist
import android.verify.next_page
from appium.webdriver.common.touch_action import TouchAction
def add_device(self):
el = self.driver.find_element_by_id("com.blackloud.wetti:id/ivAddSprinkler")
self.assertIsNotNone(el)
el.click()
sleep(1)
def choose_device(self):
# TODO timeout 30 seconds
el = self.driver.find_element_by_id("com.blackloud.wetti:id/ivThum")
# "com.blackloud.wetti:id/tvName" is too.
self.assertIsNotNone(el)
action = TouchAction(self.driver)
i = 1
while(1):
try:
action.tap(el).perform()
# el.click()
sleep(1)
try:
android.verify.next_page.verify_binging_success(self)
except:
android.verify.next_page.verify_binging_network_success(self)
break
except:
sleep(1)
i += 1
if(i == 30):
print("[Gemtek] choose device TIME OUT !")
break
sleep(1)
# TODO : 1. There are four point can touch that choose sprinkler function.
# two are recourse-id, another are class.
# Maybe we can do it by random.
# 2. Binging two or more devices.
def my_account(self):
el = self.driver.find_element_by_id("com.blackloud.wetti:id/imvAbLeft")
self.assertIsNotNone(el)
el.click()
sleep(2)
def buy_blackloud_sprinkler(self):
el = self.driver.find_element_by_id("com.blackloud.wetti:id/tvBuyNetti")
self.assertIsNotNone(el)
el.click()
sleep(5)
def user_manual(self):
el = self.driver.find_element_by_id("com.blackloud.wetti:id/tvUserManual")
self.assertIsNotNone(el)
el.click()
sleep(5)
def feature_introduction(self):
el = self.driver.find_element_by_id("com.blackloud.wetti:id/tvTourGuide")
self.assertIsNotNone(el)
el.click()
sleep(1)
def contact_us(self):
el = self.driver.find_element_by_id("com.blackloud.wetti:id/tvContactUs")
self.assertIsNotNone(el)
el.click()
sleep(5)
def about_blackloud(self):
el = self.driver.find_element_by_id("com.blackloud.wetti:id/tvAbout")
self.assertIsNotNone(el)
el.click()
sleep(5)
def legal_and_privacy_policy(self):
el = self.driver.find_element_by_id("com.blackloud.wetti:id/tvUnderText")
self.assertIsNotNone(el)
el.click()
sleep(5)
if __name__ == '__main__':
print("[Gemtek] main_screen.py")
| [
"[email protected]"
] | |
ebbc23d30dbea2dafb4b6a71b92a5ccb4c9bb341 | d1c352676563b2decacfad19120001959b043f05 | /superset/migrations/versions/a33a03f16c4a_add_extra_column_to_savedquery.py | 07e0b05a1c0c6e035dd0a4931949130430b03579 | [
"Apache-2.0",
"CC-BY-4.0",
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | permissive | Affirm/incubator-superset | c9a09a10289b4ebf8a09284a483bca93725a4b51 | 421183d3f46c48215e88e9d7d285f2dc6c7ccfe6 | refs/heads/master | 2023-07-06T11:34:38.538178 | 2019-05-22T23:39:01 | 2019-05-22T23:39:01 | 128,005,001 | 1 | 3 | Apache-2.0 | 2023-03-20T19:49:14 | 2018-04-04T04:02:42 | JavaScript | UTF-8 | Python | false | false | 2,090 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add extra column to SavedQuery
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Revision ID: a33a03f16c4a
Revises: fb13d49b72f9
Create Date: 2019-01-14 16:00:26.344439
"""
# revision identifiers, used by Alembic.
revision = 'a33a03f16c4a'
down_revision = 'fb13d49b72f9'
from alembic import op
import sqlalchemy as sa
def upgrade():
with op.batch_alter_table('saved_query') as batch_op:
batch_op.add_column(sa.Column('extra_json', sa.Text(), nullable=True))
def downgrade():
with op.batch_alter_table('saved_query') as batch_op:
batch_op.drop_column('extra_json')
| [
"[email protected]"
] | |
16599cd9a30d5c2433b18282333b8ef74be14640 | e39a35e314e4dd4ecad543f8fbb5a704d9b4edec | /pipeline/data/hierarchical.py | 90511c7c08089bfcdad9214dd2ac4cae4fcc2abd | [] | no_license | 11mhg/OpenImageCompetition | f422e6e2034360ff6215546b47965327f2d16b5c | 604c6fc206009b476208c02f4d1cad7b9a87057b | refs/heads/master | 2020-03-24T18:45:08.519916 | 2018-11-10T00:55:22 | 2018-11-10T00:55:22 | 142,898,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,638 | py |
class Tree(object):
def __init__(self,hierarchy_dict=None):
self.hierarchy_dict = hierarchy_dict
self.root_node = Node(name='root',level=0)
self.parse_level(sub=self.hierarchy_dict,parent=self.root_node)
def parse_level(self,sub=None,parent=None):
if 'LabelName' in sub:
ind = parent.add_child(sub['LabelName'])
if 'Subcategory' in sub:
l = sub['Subcategory']
for elem in l:
self.parse_level(elem,parent.children[ind])
def max_level(self,n):
if n.leaf:
return n.level
values = []
for c in n.children:
values.append(self.max_level(c))
return max(values)
def get_num_level(self,n=None,level=0):
if not n:
n=self.root_node.children[0]
s = 0
if n.level <= level:
s+=1
for child in n.children:
s+= self.get_num_level(child,level)
return s
def get_class_list(self,n=None,level=0):
if not n:
n = self.root_node.children[0]
if n.level <= level:
cl = [n.name]
else:
cl = []
for child in n.children:
cl += self.get_class_list(child,level)
return cl
class Node(object):
def __init__(self,name=None,level=0):
self.name=name
self.level=level
self.children=[]
self.leaf = True
def add_child(self,child_name):
self.leaf = False
node = Node(name=child_name,level=self.level+1)
self.children.append(node)
return len(self.children)-1
| [
"[email protected]"
] | |
478aba4708b468d17b216fbe2e5cd9462bb06649 | 7ce02f61e90efaa3811420bab40227c6d82ec7c4 | /PDD/heartrate_monitor.py | 54139090b034130e9d385a11e76d7a96914e8c65 | [] | no_license | Leonlidawn/raspberrypi-flask-project | e00600310709dab2fe1354b62e4032bb186e5d76 | 4e17969a384cf3eb7899b2d39c94c40ec8c01185 | refs/heads/main | 2023-02-03T12:50:15.842881 | 2020-12-08T03:26:27 | 2020-12-08T03:26:27 | 309,027,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,352 | py |
from max30102 import MAX30102
import hrcalc
import threading
import time
import numpy as np
import os
import json
import datetime
class HeartRateMonitor(object):
"""
A class that encapsulates the max30102 device into a thread
"""
LOOP_TIME = 0.01
def __init__(self, print_raw=False, print_result=False):
self.bpm = 0
if print_raw is True:
print('IR, Red')
self.print_raw = print_raw
self.print_result = print_result
def run_sensor(self):
sensor = MAX30102()
ir_data = []
red_data = []
bpms = []
path_to_script = os.path.dirname(os.path.abspath(__file__))
#print(path_to_script)
try:
os.mkdir("logs")
except:
pass
new_file_path = os.path.join(path_to_script,"logs")
#print(new_file_path)
# run until told to stop
while not self._thread.stopped:
# check if any data is available
num_bytes = sensor.get_data_present()
if num_bytes > 0:
# grab all the data and stash it into arrays
while num_bytes > 0:
red, ir = sensor.read_fifo()
num_bytes -= 1
ir_data.append(ir)
red_data.append(red)
if self.print_raw:
print("{0}, {1}".format(ir, red))
while len(ir_data) > 100:
ir_data.pop(0)
red_data.pop(0)
if len(ir_data) == 100:
bpm, valid_bpm, spo2, valid_spo2 = hrcalc.calc_hr_and_spo2(ir_data, red_data)
if valid_bpm:
bpms.append(bpm)
while len(bpms) > 4:
bpms.pop(0)
self.bpm = np.mean(bpms)
if (np.mean(ir_data) < 50000 and np.mean(red_data) < 50000):
self.bpm = 0
if self.print_result:
print("Finger not detected")
if self.print_result:
print("BPM: {0}, SpO2: {1}".format(self.bpm, spo2))
if not ((spo2 == -999) or (np.mean(ir_data) < 50000 and np.mean(red_data) < 50000)):
data = {}
data['Time'] = str(datetime.datetime.now())
data['BPM'] = bpm
data['SPO2'] = spo2
json_data = json.dumps(data)
with open(os.path.join(new_file_path, 'spo2log.json'), 'a') as f:
json.dump(data,f)
f.write("\n")
# json.dump("\n",f)
#f.write("SPO2: "+ str(spo2) +"\n")
time.sleep(self.LOOP_TIME)
sensor.shutdown()
def start_sensor(self):
self._thread = threading.Thread(target=self.run_sensor)
self._thread.stopped = False
self._thread.start()
def stop_sensor(self, timeout=2.0):
self._thread.stopped = True
self.bpm = 0
self._thread.join(timeout)
| [
"[email protected]"
] |
Subsets and Splits