blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d7fff55e5019e9289ac54ee81bc8c3b1546d4e16 | 91a9f5a7afb398f4238527708cbc155dc972cbfa | /data_analysis/third_party_code/rrt.py | 081eec2a0241365b13ab63425ad28e696f407a8b | [] | no_license | bddmodelcar/kzpy3.2 | cd6f9bf6b7b8b920c79b4ee36c2592b992ae4332 | b044b26649b19b240bd580feca20424a237374b1 | refs/heads/master | 2021-01-19T21:01:58.687712 | 2017-08-23T22:39:56 | 2017-08-23T22:39:56 | 101,243,308 | 0 | 1 | null | 2017-08-24T02:04:50 | 2017-08-24T02:04:50 | null | UTF-8 | Python | false | false | 1,651 | py | #!/usr/bin/env python
# rrt.py
# This program generates a simple rapidly
# exploring random tree (RRT) in a rectangular region.
#
# Written by Steve LaValle
# May 2011
import sys, random, math, pygame
from pygame.locals import *
from math import sqrt, cos, sin, atan2
# constants
XDIM = 640
YDIM = 480
WINSIZE = [XDIM, YDIM]
EPSILON = 7.0
NUMNODES = 5000
def dist(p1, p2):
return sqrt((p1[0] - p2[0]) * (p1[0] - p2[0]) + (p1[1] - p2[1]) * (p1[1] - p2[1]))
def step_from_to(p1, p2):
if dist(p1, p2) < EPSILON:
return p2
else:
theta = atan2(p2[1] - p1[1], p2[0] - p1[0])
return p1[0] + EPSILON * cos(theta), p1[1] + EPSILON * sin(theta)
def main():
# initialize and prepare screen
pygame.init()
screen = pygame.display.set_mode(WINSIZE)
pygame.display.set_caption('RRT S. LaValle May 2011')
white = 255, 240, 200
black = 20, 20, 40
screen.fill(black)
nodes = []
nodes.append((XDIM / 2.0, YDIM / 2.0)) # Start in the center
# nodes.append((0.0,0.0)) # Start in the corner
for i in range(NUMNODES):
rand = random.random() * 640.0, random.random() * 480.0
nn = nodes[0]
for p in nodes:
if dist(p, rand) < dist(nn, rand):
nn = p
newnode = step_from_to(nn, rand)
nodes.append(newnode)
pygame.draw.line(screen, white, nn, newnode)
pygame.display.update()
# print i, " ", nodes
for e in pygame.event.get():
if e.type == QUIT or (e.type == KEYUP and e.key == K_ESCAPE):
sys.exit("Leaving because you requested it.")
# if python says run, then we should run
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
ec6f2aa9f46031fe2b4e7abef4c1302d1c1116dd | b4ea052a5c9d9602ac7a3d7c3d341ef13c0f7b64 | /tuple.py | 9beabaddaeeddf86af319b30c7863c3ff432841a | [] | no_license | iehoshia/math | e74409d68ebd60d8deb1c4a41b0dc0dd96772b94 | d9139fd7de15a10230fc3d76c0f57c5f66be66ef | refs/heads/master | 2021-06-27T08:37:48.210371 | 2020-07-05T18:01:48 | 2020-07-05T18:01:48 | 221,271,881 | 0 | 0 | null | 2021-03-20T03:08:07 | 2019-11-12T17:16:35 | Python | UTF-8 | Python | false | false | 137 | py | msg = "('UserError', ('El campo de es obligatorio.', ''))"
print(type(msg))
ts = eval(msg)
print(type(ts))
print(ts[0])
print(ts[1][0]) | [
"[email protected]"
] | |
16eec80ce78aa696fd2396c731446151f4389a8b | 1811d37ed6474ab7eaeafff3c82d3bb7c0466e3d | /parts/zodiac/zope/interface/tests/test_declarations.py | 8bb3d87f13dddd842e81770795fb0b6a5d5c142f | [] | no_license | bernatcortina/zodiac | ed384fe96f6739d841a3a777d10bad4b33fd0e78 | aa0ecb2c386fc5b54ff60ba94e0a1bc5a7493f17 | refs/heads/master | 2021-01-18T14:02:44.978553 | 2014-02-07T17:33:27 | 2014-02-07T17:33:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | /Users/Bernat/GitHub/zodiac/eggs/zope.interface-4.0.5-py2.7-macosx-10.9-intel.egg/zope/interface/tests/test_declarations.py | [
"[email protected]"
] | |
536902aa44d7fda7d42026285dc4430c6bd5e55e | e57d7785276053332c633b57f6925c90ad660580 | /sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2021_04_01_preview/operations/_private_endpoint_connections_operations.py | d2a6e50bd6a44b4f4111b3ebe257923746ea36d5 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | adriananeci/azure-sdk-for-python | 0d560308497616a563b6afecbb494a88535da4c5 | b2bdfe659210998d6d479e73b133b6c51eb2c009 | refs/heads/main | 2023-08-18T11:12:21.271042 | 2021-09-10T18:48:44 | 2021-09-10T18:48:44 | 405,684,423 | 1 | 0 | MIT | 2021-09-12T15:51:51 | 2021-09-12T15:51:50 | null | UTF-8 | Python | false | false | 20,701 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionsOperations(object):
"""PrivateEndpointConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.keyvault.v2021_04_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
vault_name, # type: str
private_endpoint_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.PrivateEndpointConnection"]
"""Gets the specified private endpoint connection associated with the key vault.
:param resource_group_name: Name of the resource group that contains the key vault.
:type resource_group_name: str
:param vault_name: The name of the key vault.
:type vault_name: str
:param private_endpoint_connection_name: Name of the private endpoint connection associated
with the key vault.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_04_01_preview.models.PrivateEndpointConnection or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PrivateEndpointConnection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def put(
self,
resource_group_name, # type: str
vault_name, # type: str
private_endpoint_connection_name, # type: str
properties, # type: "_models.PrivateEndpointConnection"
**kwargs # type: Any
):
# type: (...) -> "_models.PrivateEndpointConnection"
"""Updates the specified private endpoint connection associated with the key vault.
:param resource_group_name: Name of the resource group that contains the key vault.
:type resource_group_name: str
:param vault_name: The name of the key vault.
:type vault_name: str
:param private_endpoint_connection_name: Name of the private endpoint connection associated
with the key vault.
:type private_endpoint_connection_name: str
:param properties: The intended state of private endpoint connection.
:type properties: ~azure.mgmt.keyvault.v2021_04_01_preview.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_04_01_preview.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.put.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(properties, 'PrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
put.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
vault_name, # type: str
private_endpoint_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.PrivateEndpointConnection"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PrivateEndpointConnection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if response.status_code == 202:
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
vault_name, # type: str
private_endpoint_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.PrivateEndpointConnection"]
"""Deletes the specified private endpoint connection associated with the key vault.
:param resource_group_name: Name of the resource group that contains the key vault.
:type resource_group_name: str
:param vault_name: The name of the key vault.
:type vault_name: str
:param private_endpoint_connection_name: Name of the private endpoint connection associated
with the key vault.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.keyvault.v2021_04_01_preview.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
vault_name=vault_name,
private_endpoint_connection_name=private_endpoint_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def list_by_resource(
self,
resource_group_name, # type: str
vault_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PrivateEndpointConnectionListResult"]
"""The List operation gets information about the private endpoint connections associated with the
vault.
:param resource_group_name: Name of the resource group that contains the key vault.
:type resource_group_name: str
:param vault_name: The name of the key vault.
:type vault_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnectionListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.keyvault.v2021_04_01_preview.models.PrivateEndpointConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/privateEndpointConnections'} # type: ignore
| [
"[email protected]"
] | |
e950a96f99338a6581c70f7a239071ec9857cc9a | 2876a5a8e7d50d97039b4e63c25f5eaf1cc20808 | /src/odontology/person/migrations/0006_remove_patient_reciently_added.py | fa65de12e835d148fed6cd7e7a0620ba29ebeb5c | [
"Apache-2.0"
] | permissive | nanomolina/JP | 6fcd01b75d71aa560781d4c0350ff76025f85f92 | 248a47bced4dac850f85d28968ddf279cd123400 | refs/heads/master | 2022-11-29T09:31:43.449654 | 2019-07-16T18:25:20 | 2019-07-16T18:25:20 | 51,620,989 | 2 | 0 | Apache-2.0 | 2022-11-22T01:11:25 | 2016-02-12T22:33:24 | HTML | UTF-8 | Python | false | false | 401 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-14 08:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('person', '0005_auto_20160214_0517'),
]
operations = [
migrations.RemoveField(
model_name='patient',
name='reciently_added',
),
]
| [
"[email protected]"
] | |
30f8c99090912bd768c74d849e02e4c932b3ae47 | 2c872fedcdc12c89742d10c2f1c821eed0470726 | /pbase/day03/jiangyi/day03/code/str2.py | 414e203cb3fc038b2e548595d370a0630461fc59 | [] | no_license | zuigehulu/AID1811 | 581c3c7a37df9fa928bc632e4891fc9bafe69201 | 10cab0869875290646a9e5d815ff159d0116990e | refs/heads/master | 2020-04-19T16:33:04.174841 | 2019-01-30T07:58:24 | 2019-01-30T07:58:24 | 168,307,918 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | # str.py
print('ABC\n123')
print('ABC\t123')
print("ABCDE\rab")
print("ABCDE\b\babcd")
print("==ABCD==")
print("==\x41\x42\x43\x44==") # ABCD
print('\x68\x65\x6c\x6c\x6f')
print("hello")
| [
"[email protected]"
] | |
701edda3fc08ca5e204e91994d360e36880706aa | 532a28255249530c98eea8985cdcfb093dbf28b1 | /testing/test_boxed.py | bee936735f1ff0a109346bf20f8150639e74bb0f | [
"MIT"
] | permissive | lukas-bednar/pytest-xdist-convert | 8dc09ebae810344f8ebf031b3d04de4e9a62602c | 255c0617159b611eaa94b80c7b61568c5c8ce082 | refs/heads/master | 2021-01-23T12:18:04.783539 | 2015-09-01T15:28:21 | 2015-09-01T15:28:21 | 41,746,483 | 1 | 0 | null | 2015-09-01T15:21:30 | 2015-09-01T15:21:30 | null | UTF-8 | Python | false | false | 1,610 | py | import pytest
import os
needsfork = pytest.mark.skipif(not hasattr(os, "fork"),
reason="os.fork required")
@needsfork
def test_functional_boxed(testdir):
p1 = testdir.makepyfile("""
import os
def test_function():
os.kill(os.getpid(), 15)
""")
result = testdir.runpytest(p1, "--boxed")
result.stdout.fnmatch_lines([
"*CRASHED*",
"*1 failed*"
])
@needsfork
@pytest.mark.parametrize("capmode", [
"no",
pytest.mark.xfail("sys", reason="capture cleanup needed"),
pytest.mark.xfail("fd", reason="capture cleanup needed")])
def test_functional_boxed_capturing(testdir, capmode):
p1 = testdir.makepyfile("""
import os
import sys
def test_function():
sys.stdout.write("hello\\n")
sys.stderr.write("world\\n")
os.kill(os.getpid(), 15)
""")
result = testdir.runpytest(p1, "--boxed", "--capture=%s" % capmode)
result.stdout.fnmatch_lines("""
*CRASHED*
*stdout*
hello
*stderr*
world
*1 failed*
""")
class TestOptionEffects:
def test_boxed_option_default(self, testdir):
tmpdir = testdir.tmpdir.ensure("subdir", dir=1)
config = testdir.parseconfig()
assert not config.option.boxed
pytest.importorskip("execnet")
config = testdir.parseconfig('-d', tmpdir)
assert not config.option.boxed
def test_is_not_boxed_by_default(self, testdir):
config = testdir.parseconfig(testdir.tmpdir)
assert not config.option.boxed
| [
"[email protected]"
] | |
3b71545bbfa3cf0b81dc43c505f9b0ee6d8e556f | 5ded398a05f59f08f2add076fa50e42bfcb5cc92 | /home/migrations/0002_load_initial_data.py | 3fb748150d45554121b7e71e3ea15175052fd015 | [] | no_license | crowdbotics-apps/vool-22192 | 1dc764992f07ce542f6691693503375007175a6d | d8c48223d5df02577fe997f6a50fdbd83eb96dce | refs/heads/master | 2023-01-09T11:42:10.989994 | 2020-11-01T19:20:38 | 2020-11-01T19:20:38 | 309,170,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,270 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "vool"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">vool</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "vool-22192.botics.co"
site_params = {
"name": "vool",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"[email protected]"
] | |
21aa8958edca79602fd8a9062a0ca2fb5d5a8527 | c06d18ac5b87b3b82fc486454c422b119d6c1ee9 | /src/demo/_tensorflow/notebooks/2_BasicModels/linear_regression.py | db1a4d9133a9a5e089a49002de6f171371fd87ae | [
"MIT"
] | permissive | tangermi/nlp | b3a4c9612e6049463bf12bc9abb7aff06a084ace | aa36b8b20e8c91807be73a252ff7799789514302 | refs/heads/master | 2022-12-09T12:33:15.009413 | 2020-04-03T04:03:24 | 2020-04-03T04:03:24 | 252,056,010 | 0 | 0 | null | 2022-12-08T07:26:55 | 2020-04-01T02:55:05 | Jupyter Notebook | UTF-8 | Python | false | false | 2,211 | py | # -*-coding:utf-8-*-
from __future__ import absolute_import, division, print_function
# %%
import tensorflow as tf
import numpy as np
rng = np.random
# %%
# Parameters.
learning_rate = 0.01#学习率
training_steps = 1000#总共训练次数
display_step = 50
# %%
# Training Data.
X = np.array([3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167,
7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1])
Y = np.array([1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221,
2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3])
n_samples = X.shape[0]
# %%
# Weight and Bias, initialized randomly.
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
# Linear regression (Wx + b).
def linear_regression(x):
return W * x + b
# Mean square error.
def mean_square(y_pred, y_true):
return tf.reduce_sum(tf.pow(y_pred - y_true, 2)) / (2 * n_samples)
# Stochastic Gradient Descent Optimizer.
optimizer = tf.optimizers.SGD(learning_rate)
# %%
# Optimization process.
def run_optimization():
# Wrap computation inside a GradientTape for automatic differentiation.
with tf.GradientTape() as g:
pred = linear_regression(X)
loss = mean_square(pred, Y)
# Compute gradients.
gradients = g.gradient(loss, [W, b])#计算loss函数的梯度,沿着该梯度向量的方向可以使函数函数的减小最多
# Update W and b following gradients.
optimizer.apply_gradients(zip(gradients, [W, b]))#学习率决定移动的方向,梯度决定移动的方向,总体可以将参数[w,b]向使得函数loss减小最快的方向移动相应的距离
# %%
# Run training for the given number of steps.
for step in range(1, training_steps + 1):
# Run the optimization to update W and b values.
run_optimization()
if step % display_step == 0:
pred = linear_regression(X)
loss = mean_square(pred, Y)
print("step: %i, loss: %f, W: %f, b: %f" % (step, loss, W.numpy(), b.numpy()))
# %%
import matplotlib.pyplot as plt
# %%
# Graphic display
plt.plot(X, Y, 'ro', label='Original data')
plt.plot(X, np.array(W * X + b), label='Fitted line')
plt.legend()
plt.show()
| [
"[email protected]"
] | |
d2f91d9e02964c206df16fb3598b24bd1f3a4f8e | ee22ec2076a79e8de3011377fe205bc87163ab9f | /src/basic-c3/dockstring.py | 5ada006cc7d290f3d8a9d64f64306ebce1e919b9 | [] | no_license | n18018/programming-term2 | 039a95c67372a38a34e2aa8c5975045a9fc731be | 86c455269eed312def529604e1ac3b00f476226c | refs/heads/master | 2020-03-22T08:59:29.545280 | 2018-08-29T07:57:37 | 2018-08-29T07:57:37 | 139,806,131 | 0 | 0 | null | 2018-07-05T06:42:11 | 2018-07-05T06:42:11 | null | UTF-8 | Python | false | false | 53 | py | def a (hiki_1, hiki_2, hiki_3):
'''
return i
| [
"[email protected]"
] | |
3ae8830fd94c0cd38223d716948c6ba72b2243b7 | cda43bf6a84f7e55fab26aa70cda934683a51fe5 | /MyWork/moModel.py | f0583bfc4748b1285e8ed8c12677936f09f2dcf3 | [] | no_license | nikolaosdionelis/NeuralNetworksNNs | abb55622882e31c8d130a8986868b3d19ede186f | 8a217490ad5bb3f7fccf4002c6b43a06c1e562fc | refs/heads/master | 2022-11-13T00:50:23.578197 | 2020-07-12T18:52:20 | 2020-07-12T18:52:20 | 279,042,013 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 113,860 | py | from __future__ import division
from __future__ import print_function
import os
import time
import math
from glob import glob
import tensorflow as tf
import numpy as np
from six.moves import xrange
#import real_nvp.model as nvp
import real_nvp.nn as nvp_op
#import imageio
#imageio.imwrite('filename.jpg', array)
#from ops import *
from utils2 import *
#from ops import *
from ops2 import *
def conv_out_size_same(size, stride):
return int(math.ceil(float(size) / float(stride)))
def gen_random(mode, size):
if mode == 'normal01': return np.random.normal(0, 1, size=size)
if mode == 'uniform_signed': return np.random.uniform(-1, 1, size=size)
if mode == 'uniform_unsigned': return np.random.uniform(0, 1, size=size)
class DCDCDCGAN(object):
def __init__(self, sess, dcDcgan, input_height=108, input_width=108, crop=True,
batch_size=64, sample_num=64, output_height=64, output_width=64,
y_dim=None, z_dim=100, gf_dim=64, df_dim=64,
gfc_dim=1024, dfc_dim=1024, c_dim=3, dataset_name='default',
max_to_keep=1,
input_fname_pattern='*.jpg', checkpoint_dir='ckpts', sample_dir='samples', out_dir='./out',
data_dir='./data'):
"""
Args:
sess: TensorFlow session
batch_size: The size of batch. Should be specified before training.
y_dim: (optional) Dimension of dim for y. [None]
z_dim: (optional) Dimension of dim for Z. [100]
gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]
dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]
c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]
"""
self.sess = sess
self.crop = crop
self.batch_size = batch_size
self.sample_num = sample_num
self.input_height = input_height
self.input_width = input_width
self.output_height = output_height
self.output_width = output_width
self.y_dim = y_dim
self.z_dim = z_dim
self.gf_dim = gf_dim
self.df_dim = df_dim
self.gfc_dim = gfc_dim
self.dfc_dim = dfc_dim
# batch normalization : deals with poor initialization helps gradient flow
self.d_bn1 = batch_norm(name='d_bn1')
self.d_bn2 = batch_norm(name='d_bn2')
if not self.y_dim:
self.d_bn3 = batch_norm(name='d_bn3')
self.g_bn0 = batch_norm(name='g_bn0')
self.g_bn1 = batch_norm(name='g_bn1')
self.g_bn2 = batch_norm(name='g_bn2')
if not self.y_dim:
self.g_bn3 = batch_norm(name='g_bn3')
self.dataset_name = dataset_name
self.input_fname_pattern = input_fname_pattern
self.checkpoint_dir = checkpoint_dir
self.data_dir = data_dir
self.out_dir = out_dir
self.max_to_keep = max_to_keep
if self.dataset_name == 'mnist':
#self.data_X, self.data_y = self.load_mnist()
#self.c_dim = self.data_X[0].shape[-1]
#self.data_X, self.data_y = self.load_mnist()
#self.c_dim = self.data_X[0].shape[-1]
#self.data_X, self.data_y = self.load_mnist()
#self.c_dim = self.data_X[0].shape[-1]
#print('')
#print(self.data_X.shape)
#print(self.data_y.shape)
#print(self.c_dim)
#print('')
#import dataset_loaders.cifar_loader as cifar_data
#import dataset_loaders.mnist_loader as mnist_data
#data_X, val_data, test_data, train_dist = mnist_data.load_mnist()
#data_X, val_data, test_data = cifar_data.load_cifar()
#data_X, val_data, test_data, train_dist = mnist_data.load_mnist()
#data_X, val_data, test_data, train_dist = dataset_loaders.mnist_loader.load_mnist()
#data_X, val_data, test_data, train_dist = dataset_loaders.mnist_loader.load_mnist()
#self.data_X, _, _, _ = dataset_loaders.mnist_loader.load_mnist()
#return train_data, val_data, test_data, train_labels, val_labels, test_labels
#self.data_X, _, _, self.data_y, _, _ = dataset_loaders.mnist_loader.load_mnist(send_labels=True)
#import dataset_loaders.mnist_loader as mnist_data
#self.data_X, _, _, self.data_y, _, _ = mnist_data.load_mnist(send_labels=True)
#self.c_dim = 1
#data_X, val_data, test_data = cifar_data.load_cifar()
#data_X, val_data, test_data = dataset_loaders.mnist_loader.load_cifar()
"""
import dataset_loaders.mnist_loader as mnist_data
self.data_X, _, _, self.data_y, _, _ = mnist_data.load_mnist(send_labels=True)
self.c_dim = 1
"""
import dataset_loaders.mnist_loader as mnist_data
self.data_X, _, _, self.data_y, _, _ = mnist_data.load_mnist(send_labels=True)
self.c_dim = 1
y = self.data_y
y_vec = np.zeros((len(y), self.y_dim), dtype=np.float)
for i, label in enumerate(y):
y_vec[i, y[i]] = 1.0
self.data_y = y_vec
#print(self.data_X.shape)
#print(self.data_y.shape)
#print(self.data_y)
#self.data_X, self.data_y = self.load_mnist()
#self.c_dim = self.data_X[0].shape[-1]
#self.data_X, self.data_y = self.load_mnist()
#self.c_dim = self.data_X[0].shape[-1]
#print(self.data_X.shape)
#print(self.data_y.shape)
#print(self.data_y)
#asdfdsfs
#asdf
#asdfs
#import dataset_loaders.mnist_loader as mnist_data
#self.data_X, _, _, self.data_y, _, _ = mnist_data.load_mnist(send_labels=True)
#self.c_dim = 1
#print('')
#print(self.data_X.shape)
#print(self.data_y.shape)
#print(self.c_dim)
#print('')
"""
import dataset_loaders.cifar_loader as cifar_data
self.data_X, _, _, self.data_y, _, _ = cifar_data.load_cifar(sendLabels=True)
self.c_dim = 3
"""
"""
import dataset_loaders.cifar_loader as cifar_data
self.data_X, _, _, self.data_y, _, _ = cifar_data.load_cifar(sendLabels=True)
self.c_dim = 3
"""
#import dataset_loaders.cifar_loader as cifar_data
#self.data_X, _, _, self.data_y, _, _ = cifar_data.load_cifar(sendLabels=True)
#self.c_dim = 3
#import dataset_loaders.cifar_loader as cifar_data
#self.data_X, _, _, = cifar_data.load_cifar()
# use: sendLabels=True
#self.data_X, _, _, self.data_y, _, _ = cifar_data.load_cifar(sendLabels=True)
#self.c_dim = 3
# data_X, val_data, test_data = cifar_data.load_cifar()
# data_X, val_data, test_data = dataset_loaders.mnist_loader.load_cifar()
#print('')
#print(self.data_X.shape)
#print(self.data_y.shape)
#print(self.c_dim)
#print('')
#print(self.data_X.shape)
#print(self.data_y.shape)
#print(self.c_dim)
#asdfasfs
#zsdfsdfsdfzs
#asdfadsfas
#asdfasdf
#asdfxszfs
else:
data_path = os.path.join(self.data_dir, self.dataset_name, self.input_fname_pattern)
self.data = glob(data_path)
if len(self.data) == 0:
raise Exception("[!] No data found in '" + data_path + "'")
np.random.shuffle(self.data)
imreadImg = imread(self.data[0])
if len(imreadImg.shape) >= 3: # check if image is a non-grayscale image by checking channel number
self.c_dim = imread(self.data[0]).shape[-1]
else:
self.c_dim = 1
if len(self.data) < self.batch_size:
raise Exception("[!] Entire dataset size is less than the configured batch_size")
self.grayscale = (self.c_dim == 1)
self.build_model(dcDcgan)
def build_model(self, dcDcgan):
if self.y_dim:
#self.y = tf.placeholder(tf.float32, [self.batch_size, self.y_dim], name='y')
#self.y = tf.placeholder(tf.float32, [self.batch_size, self.y_dim], name='y')
self.y = tf.placeholder(tf.float32, [self.sample_num, self.y_dim], name='y')
else:
self.y = None
if self.crop:
image_dims = [self.output_height, self.output_width, self.c_dim]
else:
image_dims = [self.input_height, self.input_width, self.c_dim]
#self.y = tf.placeholder(tf.float32, [self.batch_size, self.y_dim], name='y')
#self.y = tf.placeholder(tf.float32, [self.batch_size, self.y_dim], name='y')
#self.y = tf.placeholder(tf.float32, [self.batch_size, self.y_dim], name='y')
#self.y = tf.placeholder(tf.float32, [self.batch_size, self.y_dim], name='y')
#self.firstTerm = tf.placeholder(tf.float32, [1], name='first_term')
#self.firstTerm = tf.placeholder(tf.float32, [1], name='first_term')
#self.firstTerm = tf.placeholder(tf.float32, name='first_term')
#self.firstTerm = tf.placeholder(tf.float32, name='first_term')
#self.firstTerm = tf.placeholder(tf.float32, name='first_term')
#self.firstTerm = tf.placeholder(tf.float32, name='first_term')
#self.firstTerm = tf.placeholder(tf.float32, name='first_term')
#self.firstTerm = tf.placeholder(tf.float32, name='first_term')
#self.secondTerm = tf.placeholder(tf.float32, name='first_term')
#self.thirdTerm = tf.placeholder(tf.float32, name='first_term')
#self.inputs = tf.placeholder(
# tf.float32, [self.batch_size] + image_dims, name='real_images')
#self.inputs = tf.placeholder(
# tf.float32, [self.batch_size] + image_dims, name='real_images')
self.inputs = tf.placeholder(
tf.float32, [self.sample_num] + image_dims, name='real_images')
inputs = self.inputs
self.z = tf.placeholder(
tf.float32, [None, self.z_dim], name='z')
self.z_sum = histogram_summary("z", self.z)
self.G = self.generator(self.z, self.y)
#self.D, self.D_logits = self.discriminator(inputs, self.y, reuse=False)
self.sampler = self.sampler(self.z, self.y)
#self.D_, self.D_logits_ = self.discriminator(self.G, self.y, reuse=True)
#self.d_sum = histogram_summary("d", self.D)
#self.d__sum = histogram_summary("d_", self.D_)
self.G_sum = image_summary("G", self.G)
def sigmoid_cross_entropy_with_logits(x, y):
try:
return tf.nn.sigmoid_cross_entropy_with_logits(logits=x, labels=y)
except:
return tf.nn.sigmoid_cross_entropy_with_logits(logits=x, targets=y)
#self.d_loss_real = tf.reduce_mean(
# sigmoid_cross_entropy_with_logits(self.D_logits, tf.ones_like(self.D)))
#self.d_loss_fake = tf.reduce_mean(
# sigmoid_cross_entropy_with_logits(self.D_logits_, tf.zeros_like(self.D_)))
#self.g_loss = tf.reduce_mean(
# sigmoid_cross_entropy_with_logits(self.D_logits_, tf.ones_like(self.D_)))
#self.g_loss = tf.reduce_mean(
# sigmoid_cross_entropy_with_logits(self.D_logits_, tf.ones_like(self.D_)))
#self.g_loss = tf.reduce_mean(
# sigmoid_cross_entropy_with_logits(self.G, tf.ones_like(self.G)))
#self.g_loss = tf.reduce_mean(
# sigmoid_cross_entropy_with_logits(self.G, tf.ones_like(self.G)))
#self.g_loss = (self.firstTerm) + (tf.reduce_mean()) + (tf.reduce_mean())
#self.g_loss = (self.firstTerm) + (tf.reduce_mean()) + (tf.reduce_mean())
#self.g_loss = (self.firstTerm) + (tf.reduce_mean()) + (tf.reduce_mean())
#self.g_loss = (self.firstTerm) + (tf.reduce_mean()) + (tf.reduce_mean())
#self.g_loss = (self.firstTerm) + (tf.reduce_mean(sigmoid_cross_entropy_with_logits(self.G, tf.ones_like(self.G))))
#self.g_loss = (self.firstTerm) + (tf.reduce_mean(sigmoid_cross_entropy_with_logits(self.G, tf.ones_like(self.G))))
#self.g_loss = (self.firstTerm) + (self.secondTerm) + (self.thirdTerm)
#self.g_loss = (self.firstTerm) + (self.secondTerm) + (self.thirdTerm)
#self.g_loss = (self.firstTerm) + (self.secondTerm) + (self.thirdTerm)
#self.g_loss = (self.firstTerm) + (self.secondTerm) + (self.thirdTerm) + (tf.reduce_mean(sigmoid_cross_entropy_with_logits(self.G, tf.ones_like(self.G))))
#self.g_loss = (self.firstTerm) + (self.secondTerm) + (self.thirdTerm)
#self.g_loss = (self.firstTerm) + (self.secondTerm) + (self.thirdTerm) + tf.reduce_mean(self.G)
#self.g_loss = (self.firstTerm) + (self.secondTerm) + (self.thirdTerm)
#self.g_loss = (self.firstTerm) + (self.secondTerm) + (self.thirdTerm)
#adfasdbf
#asdfa
#asdfzs
#self.g_loss = (self.firstTerm) + (self.secondTerm) + (self.thirdTerm)
#self.g_loss = (self.firstTerm) + (self.secondTerm) + (self.thirdTerm)
#self.g_loss = (self.firstTerm) + (self.secondTerm) + (self.thirdTerm)
#self.g_loss = (self.firstTerm) + (self.secondTerm) + (self.thirdTerm)
#self.g_loss = (self.secondTerm)
#self.g_loss = (self.firstTerm) + (self.secondTerm) + (self.thirdTerm)
#self.g_loss = (self.firstTerm) + (self.secondTerm) + (self.thirdTerm)
# inputs or self.inputs
# use inputs or self.inputs
#xData = tf.reshape(inputs, [-1, 28 * 28])
xData = tf.reshape(self.inputs, [-1, 28 * 28])
# inputs or self.inputs
# use inputs or self.inputs
genFGen2 = tf.reshape(self.G, [-1, 28 * 28])
genFGen3 = tf.reshape(self.z, [-1, 100])
second_term_loss2 = tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[0, :] - xData), 2), 1)) ** 2)
for i in range(1, self.batch_size):
second_term_loss2 += tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[i, :] - xData), 2), 1)) ** 2)
second_term_loLoss2 = second_term_loss2 / self.batch_size
third_term_loss32 = tf.reduce_mean(
(tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen3[0, :] - genFGen3), 2), 1))) / (
1e-17 + tf.sqrt(
1e-17 + tf.reduce_sum(tf.pow((genFGen2[0, :] - genFGen2), 2), 1))))
for i in range(1, self.batch_size):
third_term_loss32 += tf.reduce_mean(
(tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen3[i, :] - genFGen3), 2), 1))) / (
1e-17 + tf.sqrt(
1e-17 + tf.reduce_sum(tf.pow((genFGen2[i, :] - genFGen2), 2), 1))))
third_term_loss12 = third_term_loss32 / self.batch_size
train_gen_para, train_jac = dcDcgan.flow_model(genFGen2)
train_nlli = nvp_op.log_likelihood(train_gen_para, train_jac,
"logistic") / self.batch_size
traTrain_nlli = tf.exp(train_nlli)
self.g_loss = second_term_loLoss2 + third_term_loss12 + traTrain_nlli
#print('')
#print(xData.shape)
#print(genFGen2.shape)
#print(genFGen3.shape)
#asdfasdfzs
#print(traTrain_nlli.shape)
#print(second_term_loLoss2.shape)
#print(third_term_loss12.shape)
#asdfasfs
"""
# inputs or self.inputs
# use inputs or self.inputs
#xData = tf.reshape(inputs, [-1, 28 * 28])
xData = tf.reshape(self.inputs, [-1, 28 * 28])
# inputs or self.inputs
# use inputs or self.inputs
genFGen2 = tf.reshape(self.G, [-1, 28 * 28])
genFGen3 = tf.reshape(self.z, [-1, 100])
second_term_loss2 = tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[0, :] - xData), 2), 1)) ** 2)
for i in range(1, self.batch_size):
second_term_loss2 += tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[i, :] - xData), 2), 1)) ** 2)
second_term_loLoss2 = second_term_loss2 / self.batch_size
third_term_loss32 = tf.reduce_mean(
(tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen3[0, :] - genFGen3), 2), 1))) / (
1e-17 + tf.sqrt(
1e-17 + tf.reduce_sum(tf.pow((genFGen2[0, :] - genFGen2), 2), 1))))
for i in range(1, self.batch_size):
third_term_loss32 += tf.reduce_mean(
(tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen3[i, :] - genFGen3), 2), 1))) / (
1e-17 + tf.sqrt(
1e-17 + tf.reduce_sum(tf.pow((genFGen2[i, :] - genFGen2), 2), 1))))
third_term_loss12 = third_term_loss32 / self.batch_size
train_gen_para, train_jac = dcDcgan.flow_model(genFGen2)
train_nlli = nvp_op.log_likelihood(train_gen_para, train_jac,
"logistic") / self.batch_size
traTrain_nlli = tf.exp(train_nlli)
self.g_loss = second_term_loLoss2 + third_term_loss12 + traTrain_nlli
"""
'''
# inputs or self.inputs
# use inputs or self.inputs
#xData = tf.reshape(inputs, [-1, 28 * 28])
xData = tf.reshape(self.inputs, [-1, 28 * 28])
# inputs or self.inputs
# use inputs or self.inputs
genFGen2 = tf.reshape(self.G, [-1, 28 * 28])
genFGen3 = tf.reshape(self.z, [-1, 100])
second_term_loss2 = tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[0, :] - xData), 2), 1)) ** 2)
for i in range(1, self.batch_size):
second_term_loss2 += tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[i, :] - xData), 2), 1)) ** 2)
second_term_loLoss2 = second_term_loss2 / self.batch_size
third_term_loss32 = tf.reduce_mean(
(tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen3[0, :] - genFGen3), 2), 1))) / (
1e-17 + tf.sqrt(
1e-17 + tf.reduce_sum(tf.pow((genFGen2[0, :] - genFGen2), 2), 1))))
for i in range(1, self.batch_size):
third_term_loss32 += tf.reduce_mean(
(tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen3[i, :] - genFGen3), 2), 1))) / (
1e-17 + tf.sqrt(
1e-17 + tf.reduce_sum(tf.pow((genFGen2[i, :] - genFGen2), 2), 1))))
third_term_loss12 = third_term_loss32 / self.batch_size
train_gen_para, train_jac = dcDcgan.flow_model(genFGen2)
train_nlli = nvp_op.log_likelihood(train_gen_para, train_jac,
"logistic") / self.batch_size
traTrain_nlli = tf.exp(train_nlli)
#self.g_loss = second_term_loLoss2 + third_term_loss12 + traTrain_nlli
#self.g_loss = second_term_loLoss2 + third_term_loss12 + traTrain_nlli
#self.g_loss = second_term_loLoss2 + third_term_loss12 + traTrain_nlli
#self.g_loss = second_term_loLoss2 + third_term_loss12 + traTrain_nlli
#self.g_loss = traTrain_nlli
#self.g_loss = second_term_loLoss2 + third_term_loss12 + traTrain_nlli
self.g_loss = second_term_loLoss2 + third_term_loss12 + traTrain_nlli
'''
"""
# inputs or self.inputs
# use inputs or self.inputs
#xData = tf.reshape(inputs, [-1, 28 * 28])
xData = tf.reshape(self.inputs, [-1, 28 * 28])
# inputs or self.inputs
# use inputs or self.inputs
genFGen2 = tf.reshape(self.G, [-1, 28 * 28])
genFGen3 = tf.reshape(self.z, [-1, 100])
second_term_loss2 = tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[0, :] - xData), 2), 1)) ** 2)
for i in range(1, self.batch_size):
second_term_loss2 += tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[i, :] - xData), 2), 1)) ** 2)
second_term_loLoss2 = second_term_loss2 / self.batch_size
third_term_loss32 = tf.reduce_mean(
(tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen3[0, :] - genFGen3), 2), 1))) / (
1e-17 + tf.sqrt(
1e-17 + tf.reduce_sum(tf.pow((genFGen2[0, :] - genFGen2), 2), 1))))
for i in range(1, self.batch_size):
third_term_loss32 += tf.reduce_mean(
(tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen3[i, :] - genFGen3), 2), 1))) / (
1e-17 + tf.sqrt(
1e-17 + tf.reduce_sum(tf.pow((genFGen2[i, :] - genFGen2), 2), 1))))
third_term_loss12 = third_term_loss32 / self.batch_size
#self.g_loss = second_term_loss2 + third_term_loss12
#self.g_loss = second_term_loss2 + third_term_loss12
#self.g_loss = second_term_loss2 + third_term_loss12
#myMyFake_images = self.sess.run([self.G], feed_dict={self.inputs: batch_images, self.z: batch_z,
# self.y: batch_labels})
#myFake_images = np.reshape(np.squeeze(myMyFake_images), (-1, dcgan.image_size))
#train_gen_para, train_jac = dcgan.flow_model(tf.convert_to_tensor(myFake_images, np.float32))
# use: genFGen2
train_gen_para, train_jac = dcgan.flow_model(genFGen2)
train_nlli = nvp_op.log_likelihood(train_gen_para, train_jac,
FLAGS31.prior) / config.batch_size2
print('')
traTrain_nlli = tf.exp(train_nlli)
"""
'''
# inputs or self.inputs
# use inputs or self.inputs
#xData = tf.reshape(inputs, [-1, 28 * 28])
xData = tf.reshape(self.inputs, [-1, 28 * 28])
# inputs or self.inputs
# use inputs or self.inputs
genFGen2 = tf.reshape(self.G, [-1, 28 * 28])
#genFGen3 = tf.reshape(self.z, [-1, 28 * 28])
#genFGen3 = tf.reshape(self.z, [-1, 28 * 28])
genFGen3 = tf.reshape(self.z, [-1, 100])
second_term_loss2 = tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[0, :] - xData), 2), 1)) ** 2)
for i in range(1, self.batch_size):
second_term_loss2 += tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[i, :] - xData), 2), 1)) ** 2)
second_term_loLoss2 = second_term_loss2 / self.batch_size
third_term_loss32 = tf.reduce_mean(
(tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen3[0, :] - genFGen3), 2), 1))) / (
1e-17 + tf.sqrt(
1e-17 + tf.reduce_sum(tf.pow((genFGen2[0, :] - genFGen2), 2), 1))))
for i in range(1, self.batch_size):
third_term_loss32 += tf.reduce_mean(
(tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen3[i, :] - genFGen3), 2), 1))) / (
1e-17 + tf.sqrt(
1e-17 + tf.reduce_sum(tf.pow((genFGen2[i, :] - genFGen2), 2), 1))))
third_term_loss12 = third_term_loss32 / self.batch_size
self.g_loss = second_term_loss2 + third_term_loss12
'''
"""
# inputs or self.inputs
# use inputs or self.inputs
#xData = tf.reshape(inputs, [-1, 28 * 28])
xData = tf.reshape(self.inputs, [-1, 28 * 28])
# inputs or self.inputs
# use inputs or self.inputs
genFGen2 = tf.reshape(self.G, [-1, 28 * 28])
genFGen3 = tf.reshape(self.z, [-1, 28 * 28])
second_term_loss2 = tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[0, :] - xData), 2), 1)) ** 2)
for i in range(1, self.batch_size):
second_term_loss2 += tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[i, :] - xData), 2), 1)) ** 2)
second_term_loss2 /= self.batch_size
self.g_loss = second_term_loss2
"""
'''
# inputs or self.inputs
# use inputs or self.inputs
#xData = tf.reshape(inputs, [-1, 28 * 28])
xData = tf.reshape(self.inputs, [-1, 28 * 28])
# inputs or self.inputs
# use inputs or self.inputs
genFGen2 = tf.reshape(self.G, [-1, 28 * 28])
genFGen3 = tf.reshape(self.z, [-1, 28 * 28])
second_term_loss2 = tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[0, :] - xData), 2), 1)) ** 2)
for i in range(1, self.batch_size):
second_term_loss2 += tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[i, :] - xData), 2), 1)) ** 2)
second_term_loss2 /= self.batch_size
self.g_loss = second_term_loss2
'''
"""
#self.g_loss = (self.firstTerm) + (self.secondTerm) + (self.thirdTerm)
#self.g_loss = (self.firstTerm) + (self.secondTerm) + (self.thirdTerm)
#self.g_loss = (self.firstTerm) + (self.secondTerm) + (self.thirdTerm)
#self.g_loss = (self.firstTerm) + (self.secondTerm) + (self.thirdTerm)
#self.g_loss = (self.secondTerm)
# inputs or self.inputs
# use inputs or self.inputs
#xData = tf.reshape(batch_images, [-1, 28 * 28])
xData = tf.reshape(self.inputs, [-1, 28 * 28])
# inputs or self.inputs
# use inputs or self.inputs
#xData = tf.reshape(self.inputs, [-1, 28 * 28])
#xData = tf.reshape(self.inputs, [-1, 28 * 28])
#xData = tf.reshape(self.inputs, [-1, 28 * 28])
#genFGen2 = myFake_images
#genFGen3 = batch_z
#genFGen2 = self.G
#genFGen3 = self.z
genFGen2 = tf.reshape(self.G, [-1, 28 * 28])
genFGen3 = tf.reshape(self.z, [-1, 28 * 28])
second_term_loss2 = tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[0, :] - xData), 2), 1)) ** 2)
for i in range(1, self.batch_size):
second_term_loss2 += tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[i, :] - xData), 2), 1)) ** 2)
second_term_loss2 /= self.batch_size
#self.g_loss = (self.secondTerm)
self.g_loss = second_term_loss2
#self.g_loss = (self.firstTerm) + (self.secondTerm) + (self.thirdTerm)
#self.g_loss = (self.firstTerm) + (self.secondTerm) + (self.thirdTerm)
#adfasdbf
#asdfa
#asdfzs
#self.g_loss = (self.firstTerm) + (tf.reduce_mean()) + (tf.reduce_mean())
#self.g_loss = (self.firstTerm) + (tf.reduce_mean()) + (tf.reduce_mean())
#self.g_loss = (self.firstTerm) + (tf.reduce_mean()) + (tf.reduce_mean())
#self.g_loss = (self.firstTerm)
"""
#self.d_loss_real_sum = scalar_summary("d_loss_real", self.d_loss_real)
#self.d_loss_fake_sum = scalar_summary("d_loss_fake", self.d_loss_fake)
#self.d_loss = self.d_loss_real + self.d_loss_fake
self.g_loss_sum = scalar_summary("g_loss", self.g_loss)
#self.d_loss_sum = scalar_summary("d_loss", self.d_loss)
t_vars = tf.trainable_variables()
#self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
self.saver = tf.train.Saver(max_to_keep=self.max_to_keep)
#def train(self, config, dcgan, FLAGS31):
#def train(self, config, dcgan, FLAGS31):
def train(self, config):
#d_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1) \
# .minimize(self.d_loss, var_list=self.d_vars)
#sdfgdsgdsz
#asdf
#asdfs
#train_nlli = dcgan.evaluate_neg_loglikelihood(np.tile(train_data[0, :], (FLAGS.batch_size, 1)), FLAGS)
#print(train_nlli)
#asdfdasfz
g_optim = tf.train.AdamOptimizer(config.learning_rate2, beta1=config.beta12) \
.minimize(self.g_loss, var_list=self.g_vars)
try:
tf.global_variables_initializer().run()
except:
tf.initialize_all_variables().run()
if config.G_img_sum2:
#self.g_sum = merge_summary([self.z_sum, self.d__sum, self.G_sum, self.d_loss_fake_sum, self.g_loss_sum])
self.g_sum = merge_summary([self.z_sum, self.G_sum, self.g_loss_sum])
else:
#self.g_sum = merge_summary([self.z_sum, self.d__sum, self.d_loss_fake_sum, self.g_loss_sum])
self.g_sum = merge_summary([self.z_sum, self.g_loss_sum])
#self.d_sum = merge_summary(
# [self.z_sum, self.d_sum, self.d_loss_real_sum, self.d_loss_sum])
self.d_sum = merge_summary(
[self.z_sum])
self.writer = SummaryWriter(os.path.join(self.out_dir, "logs"), self.sess.graph)
#sample_z = gen_random(config.z_dist2, size=(self.sample_num, self.z_dim))
#sample_z = gen_random(config.z_dist2, size=(self.sample_num, self.z_dim))
sample_z = gen_random(config.z_dist2, size=(self.batch_size, self.z_dim))
if config.dataset2 == 'mnist':
#print(self.data_X.shape)
#asdfasdf
sample_inputs = self.data_X[0:self.sample_num]
sample_labels = self.data_y[0:self.sample_num]
else:
sample_files = self.data[0:self.sample_num]
sample = [
get_image(sample_file,
input_height=self.input_height,
input_width=self.input_width,
resize_height=self.output_height,
resize_width=self.output_width,
crop=self.crop,
grayscale=self.grayscale) for sample_file in sample_files]
if (self.grayscale):
sample_inputs = np.array(sample).astype(np.float32)[:, :, :, None]
else:
sample_inputs = np.array(sample).astype(np.float32)
counter = 1
start_time = time.time()
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
counter = checkpoint_counter
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
for epoch in xrange(config.epoch2):
if config.dataset2 == 'mnist':
batch_idxs = min(len(self.data_X), config.train_size2) // config.batch_size32
#print(batch_idxs)
#asdfasfs
else:
self.data = glob(os.path.join(
config.data_dir2, config.dataset2, self.input_fname_pattern))
np.random.shuffle(self.data)
batch_idxs = min(len(self.data), config.train_size2) // config.batch_size2
for idx in xrange(0, int(batch_idxs)):
if config.dataset2 == 'mnist':
batch_images = self.data_X[idx * config.batch_size32:(idx + 1) * config.batch_size32]
batch_labels = self.data_y[idx * config.batch_size32:(idx + 1) * config.batch_size32]
else:
batch_files = self.data[idx * config.batch_size2:(idx + 1) * config.batch_size2]
batch = [
get_image(batch_file,
input_height=self.input_height,
input_width=self.input_width,
resize_height=self.output_height,
resize_width=self.output_width,
crop=self.crop,
grayscale=self.grayscale) for batch_file in batch_files]
if self.grayscale:
batch_images = np.array(batch).astype(np.float32)[:, :, :, None]
else:
batch_images = np.array(batch).astype(np.float32)
batch_z = gen_random(config.z_dist2, size=[config.batch_size2, self.z_dim]) \
.astype(np.float32)
if config.dataset2 == 'mnist':
# Update D network
#_, summary_str = self.sess.run([d_optim, self.d_sum],
# feed_dict={
# self.inputs: batch_images,
# self.z: batch_z,
# self.y: batch_labels,
# })
#self.writer.add_summary(summary_str, counter)
# Update D network
#_, summary_str = self.sess.run([d_optim, self.d_sum],
# feed_dict={
# self.inputs: batch_images,
# self.z: batch_z,
# self.y: batch_labels,
# })
#self.writer.add_summary(summary_str, counter)
#_, summary_str = self.sess.run([g_optim, self.g_sum],
# feed_dict={
# self.inputs: batch_images,
# self.z: batch_z,
# self.y: batch_labels,
# })
# self.writer.add_summary(summary_str, counter)
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={
self.inputs: batch_images,
self.z: batch_z,
self.y: batch_labels,
})
self.writer.add_summary(summary_str, counter)
"""
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={
self.inputs: batch_images,
self.z: batch_z,
self.y: batch_labels,
})
self.writer.add_summary(summary_str, counter)
"""
'''
#myMyFake_images = self.sess.run([self.G], feed_dict={self.inputs: batch_images, self.z: batch_z,
# self.y: batch_labels})
#myFake_images = np.reshape(np.squeeze(myMyFake_images), (-1, dcgan.image_size))
#train_gen_para, train_jac = dcgan.flow_model(tf.convert_to_tensor(myFake_images, np.float32))
#train_nlli = nvp_op.log_likelihood(train_gen_para, train_jac,
# FLAGS31.prior) / config.batch_size2
#print('')
#traTrain_nlli = tf.exp(train_nlli)
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={
self.inputs: batch_images,
self.z: batch_z,
self.y: batch_labels,
})
self.writer.add_summary(summary_str, counter)
'''
"""
myMyFake_images = self.sess.run([self.G], feed_dict={self.inputs: batch_images, self.z: batch_z,
self.y: batch_labels})
myFake_images = np.reshape(np.squeeze(myMyFake_images), (-1, dcgan.image_size))
train_gen_para, train_jac = dcgan.flow_model(tf.convert_to_tensor(myFake_images, np.float32))
train_nlli = nvp_op.log_likelihood(train_gen_para, train_jac,
FLAGS31.prior) / config.batch_size2
print('')
traTrain_nlli = tf.exp(train_nlli)
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={
self.firstTerm: self.sess.run(traTrain_nlli),
self.inputs: batch_images,
self.z: batch_z,
self.y: batch_labels,
})
self.writer.add_summary(summary_str, counter)
"""
'''
myMyFake_images = self.sess.run([self.G], feed_dict={self.inputs: batch_images, self.z: batch_z,
self.y: batch_labels})
myFake_images = np.reshape(np.squeeze(myMyFake_images), (-1, dcgan.image_size))
train_gen_para, train_jac = dcgan.flow_model(tf.convert_to_tensor(myFake_images, np.float32))
train_nlli = nvp_op.log_likelihood(train_gen_para, train_jac,
FLAGS31.prior) / config.batch_size2
print('')
traTrain_nlli = tf.exp(train_nlli)
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={
self.firstTerm: self.sess.run(traTrain_nlli),
self.inputs: batch_images,
self.z: batch_z,
self.y: batch_labels,
})
self.writer.add_summary(summary_str, counter)
'''
"""
myMyFake_images = self.sess.run([self.G], feed_dict={self.inputs: batch_images, self.z: batch_z,
self.y: batch_labels})
myFake_images = np.reshape(np.squeeze(myMyFake_images), (-1, dcgan.image_size))
train_gen_para, train_jac = dcgan.flow_model(tf.convert_to_tensor(myFake_images, np.float32))
train_nlli = nvp_op.log_likelihood(train_gen_para, train_jac,
FLAGS31.prior) / FLAGS31.batch_size
print('')
traTrain_nlli = tf.exp(train_nlli)
print('')
xData = tf.reshape(batch_images, [-1, 28 * 28])
genFGen2 = myFake_images
genFGen3 = batch_z
second_term_loss2 = tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[0, :] - xData), 2), 1)) ** 2)
for i in range(1, config.batch_size2):
second_term_loss2 += tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[i, :] - xData), 2), 1)) ** 2)
second_term_loss2 /= config.batch_size2
third_term_loss32 = tf.reduce_mean(
(tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen3[0, :] - genFGen3), 2), 1))) / (
1e-17 + tf.sqrt(
1e-17 + tf.reduce_sum(tf.pow((genFGen2[0, :] - genFGen2), 2), 1))))
for i in range(1, config.batch_size2):
third_term_loss32 += tf.reduce_mean(
(tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen3[i, :] - genFGen3), 2), 1))) / (
1e-17 + tf.sqrt(
1e-17 + tf.reduce_sum(tf.pow((genFGen2[i, :] - genFGen2), 2), 1))))
third_term_loss12 = third_term_loss32 / config.batch_size2
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={
self.firstTerm: self.sess.run(traTrain_nlli),
self.secondTerm: self.sess.run(second_term_loss2),
self.thirdTerm: self.sess.run(third_term_loss12),
self.inputs: batch_images,
self.z: batch_z,
self.y: batch_labels,
})
self.writer.add_summary(summary_str, counter)
"""
'''
myMyFake_images = self.sess.run([self.G], feed_dict={self.inputs: batch_images, self.z: batch_z,
self.y: batch_labels})
myFake_images = np.reshape(np.squeeze(myMyFake_images), (-1, dcgan.image_size))
train_gen_para, train_jac = dcgan.flow_model(tf.convert_to_tensor(myFake_images, np.float32))
train_nlli = nvp_op.log_likelihood(train_gen_para, train_jac,
FLAGS31.prior) / FLAGS31.batch_size
print('')
traTrain_nlli = tf.exp(train_nlli)
print('')
xData = tf.reshape(batch_images, [-1, 28 * 28])
genFGen2 = myFake_images
genFGen3 = batch_z
second_term_loss2 = tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[0, :] - xData), 2), 1)) ** 2)
for i in range(1, config.batch_size2):
second_term_loss2 += tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[i, :] - xData), 2), 1)) ** 2)
second_term_loss2 /= config.batch_size2
third_term_loss32 = tf.reduce_mean(
(tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen3[0, :] - genFGen3), 2), 1))) / (
1e-17 + tf.sqrt(
1e-17 + tf.reduce_sum(tf.pow((genFGen2[0, :] - genFGen2), 2), 1))))
for i in range(1, config.batch_size2):
third_term_loss32 += tf.reduce_mean(
(tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen3[i, :] - genFGen3), 2), 1))) / (
1e-17 + tf.sqrt(
1e-17 + tf.reduce_sum(tf.pow((genFGen2[i, :] - genFGen2), 2), 1))))
third_term_loss12 = third_term_loss32 / config.batch_size2
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={
self.firstTerm: self.sess.run(traTrain_nlli),
self.secondTerm: self.sess.run(second_term_loss2),
self.thirdTerm: self.sess.run(third_term_loss12),
self.inputs: batch_images,
self.z: batch_z,
self.y: batch_labels,
})
self.writer.add_summary(summary_str, counter)
'''
"""
myFake_images = self.sess.run([self.G], feed_dict={self.inputs: batch_images, self.z: batch_z,
self.y: batch_labels})
myFake_images = np.squeeze(myFake_images)
myFake_images = np.reshape(myFake_images, (-1, dcgan.image_size))
train_gen_para, train_jac = dcgan.flow_model(tf.convert_to_tensor(myFake_images, np.float32))
train_nlli = nvp_op.log_likelihood(train_gen_para, train_jac,
FLAGS31.prior) / FLAGS31.batch_size
print('')
traTrain_nlli = tf.exp(train_nlli)
print('')
xData = tf.reshape(batch_images, [-1, 28 * 28])
genFGen2 = myFake_images
genFGen3 = batch_z
second_term_loss2 = tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[0, :] - xData), 2), 1)) ** 2)
for i in range(1, config.batch_size2):
second_term_loss2 += tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[i, :] - xData), 2), 1)) ** 2)
second_term_loss2 /= config.batch_size2
third_term_loss32 = tf.reduce_mean(
(tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen3[0, :] - genFGen3), 2), 1))) / (
1e-17 + tf.sqrt(
1e-17 + tf.reduce_sum(tf.pow((genFGen2[0, :] - genFGen2), 2), 1))))
for i in range(1, config.batch_size2):
third_term_loss32 += tf.reduce_mean(
(tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen3[i, :] - genFGen3), 2), 1))) / (
1e-17 + tf.sqrt(
1e-17 + tf.reduce_sum(tf.pow((genFGen2[i, :] - genFGen2), 2), 1))))
third_term_loss12 = third_term_loss32 / config.batch_size2
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={
self.firstTerm: self.sess.run(traTrain_nlli),
self.secondTerm: self.sess.run(second_term_loss2),
self.thirdTerm: self.sess.run(third_term_loss12),
self.inputs: batch_images,
self.z: batch_z,
self.y: batch_labels,
})
self.writer.add_summary(summary_str, counter)
"""
'''
#train_nlli = dcgan.evaluate_neg_loglikelihood(np.tile(train_data[0, :], (FLAGS31.batch_size, 1)),
# FLAGS31)
#print(batch_images)
#print(batch_images.shape)
#batch_images = np.reshape(batch_images, (-1, dcgan.image_size))
#print(batch_images.shape)
#batch_images = np.reshape(batch_images, (-1, dcgan.image_size))
#batch_images = np.reshape(batch_images, (-1, dcgan.image_size))
#batch_images = np.reshape(batch_images, (-1, dcgan.image_size))
#train_nlli = dcgan.evaluate_neg_loglikelihood(np.tile(batch_images, (FLAGS31.batch_size, 1)),
# FLAGS31)
#print(train_nlli)
#asdfdasfz
#print(train_nlli)
#trTrain_nlli = tf.exp(train_nlli)
#train_nlli = dcgan.evaluate_neg_loglikelihood2(np.tile(batch_images, (FLAGS31.batch_size, 1)),
# FLAGS31)
#print(train_nlli)
#print(trTrain_nlli)
#train_nlli = dcgan.evaluate_neg_loglikelihood(np.tile(batch_images, (FLAGS31.batch_size, 1)),
# FLAGS31)
#trTrain_nlli = tf.exp(train_nlli)
#print('')
#print(train_nlli)
#print(trTrain_nlli)
#asdfasfzs
#train_gen_para, train_jac = self.trainable_flow_model(inputs_tr_flow)
#self.train_log_likelihood = nvp_op.log_likelihood(train_gen_para, train_jac, self.prior) / self.batch_size
#train_gen_para, train_jac = dcgan.trainable_flow_model(inputs_tr_flow)
#train_gen_para, train_jac = dcgan.trainable_flow_model(inputs_tr_flow)
#train_gen_para, train_jac = dcgan.trainable_flow_model(inputs_tr_flow)
#train_gen_para, train_jac = dcgan.flow_model(inputs_tr_flow)
#train_gen_para, train_jac = dcgan.flow_model(inputs_tr_flow)
#train_gen_para, train_jac = dcgan.flow_model(inputs_tr_flow)
#train_gen_para, train_jac = dcgan.flow_model(inputs_tr_flow)
#train_gen_para, train_jac = dcgan.flow_model(self.generator(batch_z, self.y))
#train_gen_para, train_jac = dcgan.flow_model(self.generator(batch_z, self.y))
#train_gen_para, train_jac = dcgan.flow_model(self.generator(batch_z, self.y))
#train_gen_para, train_jac = dcgan.trainable_flow_model(self.generator(batch_z, self.y))
#train_gen_para, train_jac = dcgan.flow_model(self.generator(batch_z, self.y))
#train_gen_para, train_jac = dcgan.flow_model(self.G)
#batch_images = np.reshape(batch_images, (-1, dcgan.image_size))
#batch_images = np.reshape(batch_images, (-1, dcgan.image_size))
#batch_images = np.reshape(batch_images, (-1, dcgan.image_size))
#train_gen_para, train_jac = dcgan.flow_model(tf.convert_to_tensor(batch_images, np.float32))
#train_gen_para, train_jac = dcgan.flow_model(tf.convert_to_tensor(batch_images, np.float32))
#train_gen_para, train_jac = dcgan.flow_model(tf.convert_to_tensor(batch_images, np.float32))
#train_gen_para, train_jac = dcgan.flow_model(tf.convert_to_tensor(batch_images, np.float32))
myFake_images = self.sess.run([self.G], feed_dict={self.inputs: batch_images, self.z: batch_z,
self.y: batch_labels})
#myFake_images = np.reshape(myFake_images, (-1, dcgan.image_size))
#print(np.shape(myFake_images))
myFake_images = np.squeeze(myFake_images)
myFake_images = np.reshape(myFake_images, (-1, dcgan.image_size))
#print(np.shape(batch_images))
#print(np.shape(myFake_images))
#print(np.shape(myFake_images))
#print(myFake_images.size())
#print(myFake_images.shape)
#asdfasdfas
#train_gen_para, train_jac = dcgan.flow_model(tf.convert_to_tensor(batch_images, np.float32))
train_gen_para, train_jac = dcgan.flow_model(tf.convert_to_tensor(myFake_images, np.float32))
# _, summary_str = self.sess.run([d_optim, self.d_sum],
# feed_dict={
# self.inputs: batch_images,
# self.z: batch_z,
# self.y: batch_labels,
# })
#dcgan.train_log_likelihood = nvp_op.log_likelihood(train_gen_para, train_jac,
# self.prior) / self.batch_size
# use: batch_z
# now use: batch_z
#dcgan.train_log_likelihood = nvp_op.log_likelihood(train_gen_para, train_jac,
# batch_z) / config.batch_size2
#train_nlli = nvp_op.log_likelihood(train_gen_para, train_jac,
# batch_z) / config.batch_size2
train_nlli = nvp_op.log_likelihood(train_gen_para, train_jac,
FLAGS31.prior) / FLAGS31.batch_size
#print(train_nlli)
#print(train_nlli.Print())
#print(train_nlli.Print())
#print(train_nlli.eval())
#print(train_nlli)
#print(train_nlli.eval())
print('')
traTrain_nlli = tf.exp(train_nlli)
#print(traTrain_nlli)
#print(traTrain_nlli.eval())
#print(train_nlli)
#print('')
#print(train_nlli)
#print(traTrain_nlli)
#print('')
#print(batch_images)
#print(batch_z)
#print(batch_labels)
print('')
# Once you have launched a sess, you can use your_tensor.eval(session=sess)
# or sess.run(your_tensor) to get you feed tensor into the format
# of numpy.array and then feed it to your placeholder.
#_, summary_str = self.sess.run([g_optim, self.g_sum],
# feed_dict={
# self.firstTerm: traTrain_nlli,
# self.inputs: batch_images,
# self.z: batch_z,
# self.y: batch_labels,
# })
#self.writer.add_summary(summary_str, counter)
# Once you have launched a sess, you can use your_tensor.eval(session=sess)
# or sess.run(your_tensor) to get you feed tensor into the format
# of numpy.array and then feed it to your placeholder.
# now use: sess.run(your_tensor)
# use: your_tensor.eval(session=sess)
#_, summary_str = self.sess.run([g_optim, self.g_sum],
# feed_dict={
# self.firstTerm: self.sess.run(traTrain_nlli),
# self.inputs: batch_images,
# self.z: batch_z,
# self.y: batch_labels,
# })
#self.writer.add_summary(summary_str, counter)
"""
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={
self.firstTerm: self.sess.run(traTrain_nlli),
self.inputs: batch_images,
self.z: batch_z,
self.y: batch_labels,
})
self.writer.add_summary(summary_str, counter)
"""
#_, summary_str = self.sess.run([g_optim, self.g_sum],
# feed_dict={
# self.firstTerm: self.sess.run(traTrain_nlli),
# self.inputs: batch_images,
# self.z: batch_z,
# self.y: batch_labels,
# })
#self.writer.add_summary(summary_str, counter)
# we use: self.sess.run(your_tensor)
# use: your_tensor.eval(session=self.sess)
#_, summary_str = self.sess.run([g_optim, self.g_sum],
# feed_dict={
# self.firstTerm: traTrain_nlli,
# self.inputs: batch_images,
# self.z: batch_z,
# self.y: batch_labels,
# })
# self.writer.add_summary(summary_str, counter)
#_, summary_str = self.sess.run([g_optim, self.g_sum],
# feed_dict={
# self.inputs: batch_images,
# self.z: batch_z,
# self.y: batch_labels,
# })
#self.writer.add_summary(summary_str, counter)
# Update D network
#_, summary_str = self.sess.run([d_optim, self.d_sum],
# feed_dict={
# self.inputs: batch_images,
# self.z: batch_z,
# self.y: batch_labels,
# })
#self.writer.add_summary(summary_str, counter)
# Update G network
#_, summary_str = self.sess.run([g_optim, self.g_sum],
# feed_dict={
# self.z: batch_z,
# self.y: batch_labels,
# })
#self.writer.add_summary(summary_str, counter)
#self.xData = self.inputs
# xData is now batch_images
#print(np.shape(batch_images))
# batch_images is (1024, 28, 28, 1)
#self.genFgenFGen2 = self.flow_inv_model(self.z)
# genFgenFGen2 is now myFake_images
#print(np.shape(myFake_images))
#asdfasfszsdf
#print(np.shape(myFake_images))
# here, myFake_images is (1024, 784)
#self.xData = tf.reshape(self.xData, [-1, 28 * 28])
xData = tf.reshape(batch_images, [-1, 28 * 28])
#self.xData = tf.reshape(self.xData, [-1, 28 * 28])
#self.genFGen2 = tf.reshape(self.genFgenFGen2, [-1, 28 * 28])
#self.genFGen2 = tf.reshape(self.genFgenFGen2, [-1, 28 * 28])
genFGen2 = myFake_images
#self.genFGen3 = self.z
# genFGen3 is now batch_z
#print(np.shape(batch_z))
# here, batch_z is (1024, 100)
#self.genFGen3 = self.z
#self.genFGen3 = tf.reshape(self.genFGen3, [-1, 28 * 28])
#self.genFGen3 = tf.reshape(self.genFGen3, [-1, 28 * 28])
genFGen3 = batch_z
#self.second_term_loss2 = tf.reduce_min(
# tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((self.genFGen2[0, :] - self.xData), 2), 1)) ** 2)
#for i in range(1, self.batch_size):
# self.second_term_loss2 += tf.reduce_min(
# tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((self.genFGen2[i, :] - self.xData), 2), 1)) ** 2)
#self.second_term_loss2 /= self.batch_size
second_term_loss2 = tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[0, :] - xData), 2), 1)) ** 2)
for i in range(1, config.batch_size2):
#for i in range(1, config.batch_size2+1):
second_term_loss2 += tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[i, :] - xData), 2), 1)) ** 2)
second_term_loss2 /= config.batch_size2
#self.third_term_loss32 = tf.reduce_mean(
# (tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((self.genFGen3[0, :] - self.genFGen3), 2), 1))) / (
# 1e-17 + tf.sqrt(
# 1e-17 + tf.reduce_sum(tf.pow((self.genFGen2[0, :] - self.genFGen2), 2), 1))))
#for i in range(1, self.batch_size):
# self.third_term_loss32 += tf.reduce_mean(
# (tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((self.genFGen3[i, :] - self.genFGen3), 2), 1))) / (
# 1e-17 + tf.sqrt(
# 1e-17 + tf.reduce_sum(tf.pow((self.genFGen2[i, :] - self.genFGen2), 2), 1))))
#self.third_term_loss12 = self.third_term_loss32 / self.batch_size
third_term_loss32 = tf.reduce_mean(
(tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen3[0, :] - genFGen3), 2), 1))) / (
1e-17 + tf.sqrt(
1e-17 + tf.reduce_sum(tf.pow((genFGen2[0, :] - genFGen2), 2), 1))))
for i in range(1, config.batch_size2):
#for i in range(1, config.batch_size2+1):
third_term_loss32 += tf.reduce_mean(
(tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen3[i, :] - genFGen3), 2), 1))) / (
1e-17 + tf.sqrt(
1e-17 + tf.reduce_sum(tf.pow((genFGen2[i, :] - genFGen2), 2), 1))))
third_term_loss12 = third_term_loss32 / config.batch_size2
# range(1, config.batch_size2)
# or range(1, config.batch_size2+1)?
# use range(1, config.batch_size2+1)?
# now use range(1, config.batch_size2+1)?
#print(traTrain_nlli)
#print(second_term_loss2)
#print(third_term_loss12)
#print('')
#print(traTrain_nlli.eval())
#print(second_term_loss2.eval())
#print(third_term_loss12.eval())
#print('')
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={
self.firstTerm: self.sess.run(traTrain_nlli),
self.secondTerm: self.sess.run(second_term_loss2),
self.thirdTerm: self.sess.run(third_term_loss12),
self.inputs: batch_images,
self.z: batch_z,
self.y: batch_labels,
})
self.writer.add_summary(summary_str, counter)
'''
"""
#train_nlli = dcgan.evaluate_neg_loglikelihood(np.tile(train_data[0, :], (FLAGS31.batch_size, 1)),
# FLAGS31)
#print(batch_images)
#print(batch_images.shape)
#batch_images = np.reshape(batch_images, (-1, dcgan.image_size))
#print(batch_images.shape)
#batch_images = np.reshape(batch_images, (-1, dcgan.image_size))
#batch_images = np.reshape(batch_images, (-1, dcgan.image_size))
#batch_images = np.reshape(batch_images, (-1, dcgan.image_size))
#train_nlli = dcgan.evaluate_neg_loglikelihood(np.tile(batch_images, (FLAGS31.batch_size, 1)),
# FLAGS31)
#print(train_nlli)
#asdfdasfz
#print(train_nlli)
#trTrain_nlli = tf.exp(train_nlli)
#train_nlli = dcgan.evaluate_neg_loglikelihood2(np.tile(batch_images, (FLAGS31.batch_size, 1)),
# FLAGS31)
#print(train_nlli)
#print(trTrain_nlli)
#train_nlli = dcgan.evaluate_neg_loglikelihood(np.tile(batch_images, (FLAGS31.batch_size, 1)),
# FLAGS31)
#trTrain_nlli = tf.exp(train_nlli)
#print('')
#print(train_nlli)
#print(trTrain_nlli)
#asdfasfzs
#train_gen_para, train_jac = self.trainable_flow_model(inputs_tr_flow)
#self.train_log_likelihood = nvp_op.log_likelihood(train_gen_para, train_jac, self.prior) / self.batch_size
#train_gen_para, train_jac = dcgan.trainable_flow_model(inputs_tr_flow)
#train_gen_para, train_jac = dcgan.trainable_flow_model(inputs_tr_flow)
#train_gen_para, train_jac = dcgan.trainable_flow_model(inputs_tr_flow)
#train_gen_para, train_jac = dcgan.flow_model(inputs_tr_flow)
#train_gen_para, train_jac = dcgan.flow_model(inputs_tr_flow)
#train_gen_para, train_jac = dcgan.flow_model(inputs_tr_flow)
#train_gen_para, train_jac = dcgan.flow_model(inputs_tr_flow)
#train_gen_para, train_jac = dcgan.flow_model(self.generator(batch_z, self.y))
#train_gen_para, train_jac = dcgan.flow_model(self.generator(batch_z, self.y))
#train_gen_para, train_jac = dcgan.flow_model(self.generator(batch_z, self.y))
#train_gen_para, train_jac = dcgan.trainable_flow_model(self.generator(batch_z, self.y))
#train_gen_para, train_jac = dcgan.flow_model(self.generator(batch_z, self.y))
#train_gen_para, train_jac = dcgan.flow_model(self.G)
#batch_images = np.reshape(batch_images, (-1, dcgan.image_size))
#batch_images = np.reshape(batch_images, (-1, dcgan.image_size))
#batch_images = np.reshape(batch_images, (-1, dcgan.image_size))
#train_gen_para, train_jac = dcgan.flow_model(tf.convert_to_tensor(batch_images, np.float32))
#train_gen_para, train_jac = dcgan.flow_model(tf.convert_to_tensor(batch_images, np.float32))
#train_gen_para, train_jac = dcgan.flow_model(tf.convert_to_tensor(batch_images, np.float32))
#train_gen_para, train_jac = dcgan.flow_model(tf.convert_to_tensor(batch_images, np.float32))
myFake_images = self.sess.run([self.G], feed_dict={self.inputs: batch_images, self.z: batch_z,
self.y: batch_labels})
#myFake_images = np.reshape(myFake_images, (-1, dcgan.image_size))
#print(np.shape(myFake_images))
myFake_images = np.squeeze(myFake_images)
myFake_images = np.reshape(myFake_images, (-1, dcgan.image_size))
#print(np.shape(batch_images))
#print(np.shape(myFake_images))
#print(np.shape(myFake_images))
#print(myFake_images.size())
#print(myFake_images.shape)
#asdfasdfas
#train_gen_para, train_jac = dcgan.flow_model(tf.convert_to_tensor(batch_images, np.float32))
train_gen_para, train_jac = dcgan.flow_model(tf.convert_to_tensor(myFake_images, np.float32))
# _, summary_str = self.sess.run([d_optim, self.d_sum],
# feed_dict={
# self.inputs: batch_images,
# self.z: batch_z,
# self.y: batch_labels,
# })
#dcgan.train_log_likelihood = nvp_op.log_likelihood(train_gen_para, train_jac,
# self.prior) / self.batch_size
# use: batch_z
# now use: batch_z
#dcgan.train_log_likelihood = nvp_op.log_likelihood(train_gen_para, train_jac,
# batch_z) / config.batch_size2
#train_nlli = nvp_op.log_likelihood(train_gen_para, train_jac,
# batch_z) / config.batch_size2
train_nlli = nvp_op.log_likelihood(train_gen_para, train_jac,
FLAGS31.prior) / FLAGS31.batch_size
#print(train_nlli)
#print(train_nlli.Print())
#print(train_nlli.Print())
#print(train_nlli.eval())
#print(train_nlli)
#print(train_nlli.eval())
print('')
traTrain_nlli = tf.exp(train_nlli)
#print(traTrain_nlli)
#print(traTrain_nlli.eval())
#print(train_nlli)
#print('')
#print(train_nlli)
#print(traTrain_nlli)
#print('')
#print(batch_images)
#print(batch_z)
#print(batch_labels)
print('')
# Once you have launched a sess, you can use your_tensor.eval(session=sess)
# or sess.run(your_tensor) to get you feed tensor into the format
# of numpy.array and then feed it to your placeholder.
#_, summary_str = self.sess.run([g_optim, self.g_sum],
# feed_dict={
# self.firstTerm: traTrain_nlli,
# self.inputs: batch_images,
# self.z: batch_z,
# self.y: batch_labels,
# })
#self.writer.add_summary(summary_str, counter)
# Once you have launched a sess, you can use your_tensor.eval(session=sess)
# or sess.run(your_tensor) to get you feed tensor into the format
# of numpy.array and then feed it to your placeholder.
# now use: sess.run(your_tensor)
# use: your_tensor.eval(session=sess)
#_, summary_str = self.sess.run([g_optim, self.g_sum],
# feed_dict={
# self.firstTerm: self.sess.run(traTrain_nlli),
# self.inputs: batch_images,
# self.z: batch_z,
# self.y: batch_labels,
# })
#self.writer.add_summary(summary_str, counter)
#_, summary_str = self.sess.run([g_optim, self.g_sum],
# feed_dict={
# self.firstTerm: self.sess.run(traTrain_nlli),
# self.inputs: batch_images,
# self.z: batch_z,
# self.y: batch_labels,
# })
#self.writer.add_summary(summary_str, counter)
#_, summary_str = self.sess.run([g_optim, self.g_sum],
# feed_dict={
# self.firstTerm: self.sess.run(traTrain_nlli),
# self.inputs: batch_images,
# self.z: batch_z,
# self.y: batch_labels,
# })
#self.writer.add_summary(summary_str, counter)
# we use: self.sess.run(your_tensor)
# use: your_tensor.eval(session=self.sess)
#_, summary_str = self.sess.run([g_optim, self.g_sum],
# feed_dict={
# self.firstTerm: traTrain_nlli,
# self.inputs: batch_images,
# self.z: batch_z,
# self.y: batch_labels,
# })
# self.writer.add_summary(summary_str, counter)
#_, summary_str = self.sess.run([g_optim, self.g_sum],
# feed_dict={
# self.inputs: batch_images,
# self.z: batch_z,
# self.y: batch_labels,
# })
#self.writer.add_summary(summary_str, counter)
# Update D network
#_, summary_str = self.sess.run([d_optim, self.d_sum],
# feed_dict={
# self.inputs: batch_images,
# self.z: batch_z,
# self.y: batch_labels,
# })
#self.writer.add_summary(summary_str, counter)
# Update G network
#_, summary_str = self.sess.run([g_optim, self.g_sum],
# feed_dict={
# self.z: batch_z,
# self.y: batch_labels,
# })
#self.writer.add_summary(summary_str, counter)
#self.xData = self.inputs
# xData is now batch_images
#print(np.shape(batch_images))
# batch_images is (1024, 28, 28, 1)
#self.genFgenFGen2 = self.flow_inv_model(self.z)
# genFgenFGen2 is now myFake_images
#print(np.shape(myFake_images))
#asdfasfszsdf
#print(np.shape(myFake_images))
# here, myFake_images is (1024, 784)
#self.xData = tf.reshape(self.xData, [-1, 28 * 28])
xData = tf.reshape(batch_images, [-1, 28 * 28])
#self.xData = tf.reshape(self.xData, [-1, 28 * 28])
#self.genFGen2 = tf.reshape(self.genFgenFGen2, [-1, 28 * 28])
#self.genFGen2 = tf.reshape(self.genFgenFGen2, [-1, 28 * 28])
genFGen2 = myFake_images
#self.genFGen3 = self.z
# genFGen3 is now batch_z
#print(np.shape(batch_z))
# here, batch_z is (1024, 100)
#self.genFGen3 = self.z
#self.genFGen3 = tf.reshape(self.genFGen3, [-1, 28 * 28])
#self.genFGen3 = tf.reshape(self.genFGen3, [-1, 28 * 28])
genFGen3 = batch_z
#self.second_term_loss2 = tf.reduce_min(
# tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((self.genFGen2[0, :] - self.xData), 2), 1)) ** 2)
#for i in range(1, self.batch_size):
# self.second_term_loss2 += tf.reduce_min(
# tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((self.genFGen2[i, :] - self.xData), 2), 1)) ** 2)
#self.second_term_loss2 /= self.batch_size
second_term_loss2 = tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[0, :] - xData), 2), 1)) ** 2)
for i in range(1, config.batch_size2):
#for i in range(1, config.batch_size2+1):
second_term_loss2 += tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen2[i, :] - xData), 2), 1)) ** 2)
second_term_loss2 /= config.batch_size2
#self.third_term_loss32 = tf.reduce_mean(
# (tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((self.genFGen3[0, :] - self.genFGen3), 2), 1))) / (
# 1e-17 + tf.sqrt(
# 1e-17 + tf.reduce_sum(tf.pow((self.genFGen2[0, :] - self.genFGen2), 2), 1))))
#for i in range(1, self.batch_size):
# self.third_term_loss32 += tf.reduce_mean(
# (tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((self.genFGen3[i, :] - self.genFGen3), 2), 1))) / (
# 1e-17 + tf.sqrt(
# 1e-17 + tf.reduce_sum(tf.pow((self.genFGen2[i, :] - self.genFGen2), 2), 1))))
#self.third_term_loss12 = self.third_term_loss32 / self.batch_size
third_term_loss32 = tf.reduce_mean(
(tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen3[0, :] - genFGen3), 2), 1))) / (
1e-17 + tf.sqrt(
1e-17 + tf.reduce_sum(tf.pow((genFGen2[0, :] - genFGen2), 2), 1))))
for i in range(1, config.batch_size2):
#for i in range(1, config.batch_size2+1):
third_term_loss32 += tf.reduce_mean(
(tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((genFGen3[i, :] - genFGen3), 2), 1))) / (
1e-17 + tf.sqrt(
1e-17 + tf.reduce_sum(tf.pow((genFGen2[i, :] - genFGen2), 2), 1))))
third_term_loss12 = third_term_loss32 / config.batch_size2
# range(1, config.batch_size2)
# or range(1, config.batch_size2+1)?
# use range(1, config.batch_size2+1)?
# now use range(1, config.batch_size2+1)?
#print(traTrain_nlli)
#print(second_term_loss2)
#print(third_term_loss12)
#print('')
#print(traTrain_nlli.eval())
#print(second_term_loss2.eval())
#print(third_term_loss12.eval())
#print('')
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={
self.firstTerm: self.sess.run(traTrain_nlli),
self.secondTerm: self.sess.run(second_term_loss2),
self.thirdTerm: self.sess.run(third_term_loss12),
self.inputs: batch_images,
self.z: batch_z,
self.y: batch_labels,
})
self.writer.add_summary(summary_str, counter)
"""
#asdfsfs
#asfkz
#askdfs
#asdfsasdfs
#asdfasdfksz
#train_gen_para, train_jac = self.trainable_flow_model(self.genFgenFGen2)
#self.train_log_likelihood = nvp_op.log_likelihood(train_gen_para, train_jac,
# self.prior) / self.batch_size
#self.train_log_likelihood = (tf.reduce_mean(tf.exp(-self.train_log_likelihood / 10000000))) + (
# self.second_term_loss2) + (self.third_term_loss12)
'''
self.xData = self.inputs
self.genFgenFGen2 = self.flow_inv_model(self.z)
self.xData = tf.reshape(self.xData, [-1, 28 * 28])
self.genFGen2 = tf.reshape(self.genFgenFGen2, [-1, 28 * 28])
self.genFGen3 = self.z
self.genFGen3 = tf.reshape(self.genFGen3, [-1, 28 * 28])
self.second_term_loss2 = tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((self.genFGen2[0, :] - self.xData), 2), 1)) ** 2)
for i in range(1, self.batch_size):
self.second_term_loss2 += tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((self.genFGen2[i, :] - self.xData), 2), 1)) ** 2)
self.second_term_loss2 /= self.batch_size
self.third_term_loss32 = tf.reduce_mean(
(tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((self.genFGen3[0, :] - self.genFGen3), 2), 1))) / (
1e-17 + tf.sqrt(
1e-17 + tf.reduce_sum(tf.pow((self.genFGen2[0, :] - self.genFGen2), 2), 1))))
for i in range(1, self.batch_size):
self.third_term_loss32 += tf.reduce_mean(
(tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((self.genFGen3[i, :] - self.genFGen3), 2), 1))) / (
1e-17 + tf.sqrt(
1e-17 + tf.reduce_sum(tf.pow((self.genFGen2[i, :] - self.genFGen2), 2), 1))))
self.third_term_loss12 = self.third_term_loss32 / self.batch_size
train_gen_para, train_jac = self.trainable_flow_model(self.genFgenFGen2)
self.train_log_likelihood = nvp_op.log_likelihood(train_gen_para, train_jac,
self.prior) / self.batch_size
self.train_log_likelihood = (tf.reduce_mean(tf.exp(-self.train_log_likelihood / 10000000))) + (
self.second_term_loss2) + (self.third_term_loss12)
'''
"""
#train_gen_para, train_jac = self.trainable_flow_model(inputs_tr_flow)
#self.train_log_likelihood = nvp_op.log_likelihood(train_gen_para, train_jac, self.prior) / self.batch_size
#z_myZ_myMyZ = np.random.logistic(loc=0., scale=1., size=(self.sample_num , self.z_dim))
#train_gen_para, train_jac = self.trainable_flow_model(self.flow_inv_model(z_myZ_myMyZ))
#print(self.inputs)
#print(self.sample_inputs)
#print(self.batch_size)
#print(self.sample_num)
#adfasdfsfsdfs
self.xData = self.inputs
#xData = xData.view(-1, 28 * 28)
#genFGen2 = genFGen2.view(-1, 28 * 28)
#genFGen3 = genFGen3.squeeze()
#self.genFgenFGen2 = self.flow_inv_model(self.z)
#self.genFgenFGen2 = self.flow_inv_model(self.z)
self.genFgenFGen2 = self.flow_inv_model(self.z)
#self.genFgenFGen2 = self.flow_inv_model(self.z)
#self.genFgenFGen2 = self.sampler_function(self.z)
#self.genFgenFGen2 = self.flow_inv_model(self.z)
#genFGen2 = genFgenFGen2
self.xData = tf.reshape(self.xData, [-1, 28*28])
self.genFGen2 = tf.reshape(self.genFgenFGen2, [-1, 28 * 28])
#print(self.z)
#adfasdfs
self.genFGen3 = self.z
self.genFGen3 = tf.reshape(self.genFGen3, [-1, 28 * 28])
#device = args.device
#second_term_loss2 = tf.zeros(1, device=device, requires_grad=False)
#print(tf.pow((genFGen2[0, :] - xData), 2))
#print(tf.reduce_sum(tf.pow((genFGen2[0, :] - xData), 2), 1))
#asdfadsfdsaf
#self.second_term_loss2 = tf.reduce_min(tf.sqrt(tf.reduce_sum(tf.pow((self.genFGen2[0, :] - self.xData), 2), 1)) ** 2)
self.second_term_loss2 = tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((self.genFGen2[0, :] - self.xData), 2), 1)) ** 2)
#for i in range(self.batch_size):
for i in range(1, self.batch_size):
#second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)
#self.second_term_loss2 += tf.reduce_min(tf.sqrt(tf.reduce_sum(tf.pow((self.genFGen2[i, :] - self.xData), 2), 1)) ** 2)
self.second_term_loss2 += tf.reduce_min(
tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((self.genFGen2[i, :] - self.xData), 2), 1)) ** 2)
self.second_term_loss2 /= self.batch_size
#second_term_loss2 = second_term_loss2.squeeze()
#third_term_loss32 = torch.empty(self.batch_size, device=device, requires_grad=False)
self.third_term_loss32 = tf.reduce_mean((tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((self.genFGen3[0, :] - self.genFGen3), 2), 1))) / (
1e-17 + tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((self.genFGen2[0, :] - self.genFGen2), 2), 1))))
#for i in range(self.batch_size):
for i in range(1, self.batch_size):
self.third_term_loss32 += tf.reduce_mean((tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((self.genFGen3[i, :] - self.genFGen3), 2), 1))) / (
1e-17 + tf.sqrt(1e-17 + tf.reduce_sum(tf.pow((self.genFGen2[i, :] - self.genFGen2), 2), 1))))
#third_term_loss32[i] = torch.mean(third_term_loss22)
#third_term_loss12 = torch.mean(third_term_loss32)
self.third_term_loss12 = self.third_term_loss32 / self.batch_size
#print(third_term_loss12)
#print(second_term_loss2)
#print(third_term_loss12)
#asdfasdf
#train_gen_para, train_jac = self.trainable_flow_model(self.flow_inv_model(self.z))
#train_gen_para, train_jac = self.trainable_flow_model(genFgenFGen2)
#train_gen_para, train_jac = self.trainable_flow_model(genFgenFGen2)
#train_gen_para, train_jac = self.trainable_flow_model(genFgenFGen2)
#train_gen_para, train_jac = self.flow_model(genFgenFGen2)
#asdfzsfd
#dfasz
#zdfasf
#train_gen_para, train_jac = self.flow_model(genFgenFGen2)
#train_gen_para, train_jac = self.flow_model(self.genFgenFGen2)
#train_gen_para, train_jac = self.flow_model(self.genFgenFGen2)
#train_gen_para, train_jac = self.flow_model(self.genFgenFGen2)
train_gen_para, train_jac = self.trainable_flow_model(self.genFgenFGen2)
#train_gen_para, train_jac = self.trainable_flow_model(self.flow_inv_model(self.z))
self.train_log_likelihood = nvp_op.log_likelihood(train_gen_para, train_jac, self.prior) / self.batch_size
#print((tf.reduce_mean(tf.exp(-self.train_log_likelihood))))
#asdfasdfasdfs
#self.train_log_likelihood = (tf.reduce_mean(tf.exp(-self.train_log_likelihood))) + (secondTerm) + (thirdTerm)
#self.train_log_likelihood = (tf.reduce_mean(tf.exp(-self.train_log_likelihood))) + (self.second_term_loss2) + (self.third_term_loss12)
#self.train_log_likelihood = (tf.reduce_mean(tf.exp(-self.train_log_likelihood))) + (self.second_term_loss2) + (self.third_term_loss12)
#self.train_log_likelihood = (tf.reduce_mean(tf.exp(-self.train_log_likelihood))) + (self.second_term_loss2) + (self.third_term_loss12)
self.train_log_likelihood = (tf.reduce_mean(tf.exp(-self.train_log_likelihood / 10000000))) + (self.second_term_loss2) + (
self.third_term_loss12)
#self.evaluate_neg_loglikelihood22(out, config)
#self.evaluate_neg_loglikelihood22(out, config)
#self.evaluate_neg_loglikelihood22(out, config)
"""
#asdfasfzs
#asdfasdfasz
#asdfasfasdfz
# -0.34090483
# -0.90332794
# -0.90332794
# 0.38768163
#asdfas
#asdfasf
#asdfz
#asdkfx
# Update G network
#_, summary_str = self.sess.run([g_optim, self.g_sum],
# feed_dict={
# self.z: batch_z,
# self.y: batch_labels,
# })
#self.writer.add_summary(summary_str, counter)
# Run g_optim twice to make sure that d_loss does not go to zero (different from paper)
#_, summary_str = self.sess.run([g_optim, self.g_sum],
# feed_dict={self.z: batch_z, self.y: batch_labels})
#self.writer.add_summary(summary_str, counter)
#errD_fake = self.d_loss_fake.eval({
# self.z: batch_z,
# self.y: batch_labels
#})
#errD_real = self.d_loss_real.eval({
# self.inputs: batch_images,
# self.y: batch_labels
#})
#errG = self.g_loss.eval({
# self.z: batch_z,
# self.y: batch_labels
#})
#errG = self.g_loss.eval({
# self.z: batch_z,
# self.y: batch_labels
#})
errG = self.g_loss.eval({
self.inputs: batch_images,
self.z: batch_z,
self.y: batch_labels
})
#_, summary_str = self.sess.run([g_optim, self.g_sum],
# feed_dict={
# self.inputs: batch_images,
# self.z: batch_z,
# self.y: batch_labels,
# })
#self.writer.add_summary(summary_str, counter)
'''
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={
self.inputs: batch_images,
self.z: batch_z,
self.y: batch_labels,
})
self.writer.add_summary(summary_str, counter)
'''
"""
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={
self.inputs: batch_images,
self.z: batch_z,
self.y: batch_labels,
})
self.writer.add_summary(summary_str, counter)
"""
else:
# Update D network
_, summary_str = self.sess.run([d_optim, self.d_sum],
feed_dict={self.inputs: batch_images, self.z: batch_z})
self.writer.add_summary(summary_str, counter)
# Update G network
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={self.z: batch_z})
self.writer.add_summary(summary_str, counter)
# Run g_optim twice to make sure that d_loss does not go to zero (different from paper)
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={self.z: batch_z})
self.writer.add_summary(summary_str, counter)
errD_fake = self.d_loss_fake.eval({self.z: batch_z})
errD_real = self.d_loss_real.eval({self.inputs: batch_images})
errG = self.g_loss.eval({self.z: batch_z})
#print("[%8d Epoch:[%2d/%2d] [%4d/%4d] time: %4.4f, g_loss: %.8f" \
# % (counter, epoch, config.epoch2, idx, batch_idxs,
# time.time() - start_time, errG))
#print("[%8d Epoch:[%2d/%2d] [%4d/%4d] time: %4.4f, g_loss: %.8f" \
# % (counter, epoch, config.epoch2, idx, batch_idxs,
# time.time() - start_time, errG))
print("[%8d Epoch:[%2d/%2d] [%4d/%4d] time: %4.4f, g_loss: %.8f" \
% (counter, epoch, config.epoch2, idx, batch_idxs,
time.time() - start_time, errG))
#asdfsxdfsz
#asdfzs
#sadfsfds
# You must feed a value for placeholder tensor 'real_images_1' with dtype float and shape [512,28,28,1]
# [[node real_images_1 (defined at /home/ndioneli/dirDirMyDir/mmNewFlow/moModel.py:152) ]]
# [[node add_3081 (defined at /home/ndioneli/dirDirMyDir/mmNewFlow/moModel.py:268) ]]
# You must feed a value for placeholder tensor 'real_images_1' with dtype float and shape [512,28,28,1]
# [[node real_images_1 (defined at /home/ndioneli/dirDirMyDir/mmNewFlow/moModel.py:152) ]]
# [[node add_3081 (defined at /home/ndioneli/dirDirMyDir/mmNewFlow/moModel.py:268) ]]
#print("[%8d Epoch:[%2d/%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f, g_loss: %.8f" \
# % (counter, epoch, config.epoch2, idx, batch_idxs,
# time.time() - start_time, errD_fake + errD_real, errG))
if np.mod(counter, config.sample_freq2) == 0:
if config.dataset2 == 'mnist':
#errG = self.g_loss.eval({
# self.inputs: batch_images,
# self.z: batch_z,
# self.y: batch_labels
#})
#errG = self.g_loss.eval({
# self.inputs: batch_images,
# self.z: batch_z,
# self.y: batch_labels
#})
#samples, g_loss = self.sess.run(
# [self.sampler, self.g_loss],
# feed_dict={
# self.z: sample_z,
# self.inputs: sample_inputs,
# self.y: sample_labels,
# }
#)
samples, g_loss = self.sess.run(
[self.sampler, self.g_loss],
feed_dict={
self.inputs: sample_inputs,
self.z: sample_z,
self.y: sample_labels,
}
)
#print(np.shape(samples))
#asdfasfsadfsz
# (512, 28, 28, 1)
# here, (512, 28, 28, 1)
#print(samples.shape)
#print(samples.shape[0])
# (512, 28, 28, 1)
# here, (512, 28, 28, 1)
# (1024, 28, 28, 1)
# here, (1024, 28, 28, 1)
#samples, d_loss, g_loss = self.sess.run(
# [self.sampler, self.d_loss, self.g_loss],
# feed_dict={
# self.z: sample_z,
# self.inputs: sample_inputs,
# self.y: sample_labels,
# }
#)
#save_images(samples, image_manifold_size(samples.shape[0]),
# './{}/train_{:08d}.png'.format(config.sample_dir2, counter))
#save_images(samples, image_manifold_size(samples.shape[0]),
# './{}/train_{:08d}.png'.format(config.sample_dir2, counter))
#save_images(samples, image_manifold_size(samples.shape[0]),
# './{}/train_{:08d}.png'.format(config.sample_dir2, counter))
#print("[Sample] g_loss: %.8f" % (g_loss))
print("[Sample] g_loss: %.8f" % (g_loss))
#print("[Sample] g_loss: %.8f" % (g_loss))
#print("[Sample] g_loss: %.8f" % (g_loss))
#print("[Sample] g_loss: %.8f" % (g_loss))
#print("[Sample] g_loss: %.8f" % (g_loss))
#print("[Sample] d_loss: %.8f, g_loss: %.8f" % (d_loss, g_loss))
else:
try:
samples, d_loss, g_loss = self.sess.run(
[self.sampler, self.d_loss, self.g_loss],
feed_dict={
self.z: sample_z,
self.inputs: sample_inputs,
},
)
save_images(samples, image_manifold_size(samples.shape[0]),
'./{}/train_{:08d}.png'.format(config.sample_dir2, counter))
print("[Sample] d_loss: %.8f, g_loss: %.8f" % (d_loss, g_loss))
except:
print("one pic error!...")
if np.mod(counter, config.ckpt_freq2) == 0:
self.save(config.checkpoint_dir2, counter)
counter += 1
def discriminator(self, image, y=None, reuse=False):
with tf.variable_scope("discriminator") as scope:
if reuse:
scope.reuse_variables()
if not self.y_dim:
h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim * 2, name='d_h1_conv')))
h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim * 4, name='d_h2_conv')))
h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim * 8, name='d_h3_conv')))
h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h4_lin')
return tf.nn.sigmoid(h4), h4
else:
yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim])
x = conv_cond_concat(image, yb)
h0 = lrelu(conv2d(x, self.c_dim + self.y_dim, name='d_h0_conv'))
h0 = conv_cond_concat(h0, yb)
h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim + self.y_dim, name='d_h1_conv')))
h1 = tf.reshape(h1, [self.batch_size, -1])
h1 = concat([h1, y], 1)
h2 = lrelu(self.d_bn2(linear(h1, self.dfc_dim, 'd_h2_lin')))
h2 = concat([h2, y], 1)
h3 = linear(h2, 1, 'd_h3_lin')
return tf.nn.sigmoid(h3), h3
def generator(self, z, y=None):
with tf.variable_scope("generator") as scope:
if not self.y_dim:
s_h, s_w = self.output_height, self.output_width
s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)
# project `z` and reshape
self.z_, self.h0_w, self.h0_b = linear(
z, self.gf_dim * 8 * s_h16 * s_w16, 'g_h0_lin', with_w=True)
self.h0 = tf.reshape(
self.z_, [-1, s_h16, s_w16, self.gf_dim * 8])
h0 = tf.nn.relu(self.g_bn0(self.h0))
self.h1, self.h1_w, self.h1_b = deconv2d(
h0, [self.batch_size, s_h8, s_w8, self.gf_dim * 4], name='g_h1', with_w=True)
h1 = tf.nn.relu(self.g_bn1(self.h1))
h2, self.h2_w, self.h2_b = deconv2d(
h1, [self.batch_size, s_h4, s_w4, self.gf_dim * 2], name='g_h2', with_w=True)
h2 = tf.nn.relu(self.g_bn2(h2))
h3, self.h3_w, self.h3_b = deconv2d(
h2, [self.batch_size, s_h2, s_w2, self.gf_dim * 1], name='g_h3', with_w=True)
h3 = tf.nn.relu(self.g_bn3(h3))
h4, self.h4_w, self.h4_b = deconv2d(
h3, [self.batch_size, s_h, s_w, self.c_dim], name='g_h4', with_w=True)
return tf.nn.tanh(h4)
else:
s_h, s_w = self.output_height, self.output_width
s_h2, s_h4 = int(s_h / 2), int(s_h / 4)
s_w2, s_w4 = int(s_w / 2), int(s_w / 4)
# yb = tf.expand_dims(tf.expand_dims(y, 1),2)
#yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim])
yb = tf.reshape(y[0:self.batch_size], [self.batch_size, 1, 1, self.y_dim])
#yb = tf.reshape(y, [self.sample_num, 1, 1, self.y_dim])
#z = concat([z, y], 1)
z = concat([z, y[0:self.batch_size]], 1)
h0 = tf.nn.relu(
self.g_bn0(linear(z, self.gfc_dim, 'g_h0_lin')))
#h0 = concat([h0, y], 1)
h0 = concat([h0, y[0:self.batch_size]], 1)
h1 = tf.nn.relu(self.g_bn1(
linear(h0, self.gf_dim * 2 * s_h4 * s_w4, 'g_h1_lin')))
h1 = tf.reshape(h1, [self.batch_size, s_h4, s_w4, self.gf_dim * 2])
h1 = conv_cond_concat(h1, yb)
h2 = tf.nn.relu(self.g_bn2(deconv2d(h1,
[self.batch_size, s_h2, s_w2, self.gf_dim * 2], name='g_h2')))
h2 = conv_cond_concat(h2, yb)
return tf.nn.sigmoid(
deconv2d(h2, [self.batch_size, s_h, s_w, self.c_dim], name='g_h3'))
def sampler(self, z, y=None):
with tf.variable_scope("generator") as scope:
scope.reuse_variables()
if not self.y_dim:
s_h, s_w = self.output_height, self.output_width
s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)
# project `z` and reshape
h0 = tf.reshape(
linear(z, self.gf_dim * 8 * s_h16 * s_w16, 'g_h0_lin'),
[-1, s_h16, s_w16, self.gf_dim * 8])
h0 = tf.nn.relu(self.g_bn0(h0, train=False))
h1 = deconv2d(h0, [self.batch_size, s_h8, s_w8, self.gf_dim * 4], name='g_h1')
h1 = tf.nn.relu(self.g_bn1(h1, train=False))
h2 = deconv2d(h1, [self.batch_size, s_h4, s_w4, self.gf_dim * 2], name='g_h2')
h2 = tf.nn.relu(self.g_bn2(h2, train=False))
h3 = deconv2d(h2, [self.batch_size, s_h2, s_w2, self.gf_dim * 1], name='g_h3')
h3 = tf.nn.relu(self.g_bn3(h3, train=False))
h4 = deconv2d(h3, [self.batch_size, s_h, s_w, self.c_dim], name='g_h4')
return tf.nn.tanh(h4)
else:
s_h, s_w = self.output_height, self.output_width
s_h2, s_h4 = int(s_h / 2), int(s_h / 4)
s_w2, s_w4 = int(s_w / 2), int(s_w / 4)
# yb = tf.reshape(y, [-1, 1, 1, self.y_dim])
yb = tf.reshape(y[0:self.batch_size], [self.batch_size, 1, 1, self.y_dim])
z = concat([z, y[0:self.batch_size]], 1)
h0 = tf.nn.relu(self.g_bn0(linear(z, self.gfc_dim, 'g_h0_lin'), train=False))
h0 = concat([h0, y[0:self.batch_size]], 1)
h1 = tf.nn.relu(self.g_bn1(
linear(h0, self.gf_dim * 2 * s_h4 * s_w4, 'g_h1_lin'), train=False))
h1 = tf.reshape(h1, [self.batch_size, s_h4, s_w4, self.gf_dim * 2])
h1 = conv_cond_concat(h1, yb)
h2 = tf.nn.relu(self.g_bn2(
deconv2d(h1, [self.batch_size, s_h2, s_w2, self.gf_dim * 2], name='g_h2'), train=False))
h2 = conv_cond_concat(h2, yb)
return tf.nn.sigmoid(deconv2d(h2, [self.batch_size, s_h, s_w, self.c_dim], name='g_h3'))
def load_mnist(self):
data_dir = os.path.join(self.data_dir, self.dataset_name+'_data')
#data_dir = os.path.join(data_dir, '_data')
fd = open(os.path.join(data_dir, 'train-images-idx3-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
trX = loaded[16:].reshape((60000, 28, 28, 1)).astype(np.float)
fd = open(os.path.join(data_dir, 'train-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
trY = loaded[8:].reshape((60000)).astype(np.float)
fd = open(os.path.join(data_dir, 't10k-images-idx3-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
teX = loaded[16:].reshape((10000, 28, 28, 1)).astype(np.float)
fd = open(os.path.join(data_dir, 't10k-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
teY = loaded[8:].reshape((10000)).astype(np.float)
trY = np.asarray(trY)
teY = np.asarray(teY)
X = np.concatenate((trX, teX), axis=0)
y = np.concatenate((trY, teY), axis=0).astype(np.int)
seed = 547
np.random.seed(seed)
np.random.shuffle(X)
np.random.seed(seed)
np.random.shuffle(y)
y_vec = np.zeros((len(y), self.y_dim), dtype=np.float)
for i, label in enumerate(y):
y_vec[i, y[i]] = 1.0
return X / 255., y_vec
@property
def model_dir(self):
return "{}_{}_{}_{}".format(
self.dataset_name, self.batch_size,
self.output_height, self.output_width)
def save(self, checkpoint_dir, step, filename='model', ckpt=True, frozen=False):
# model_name = "DCGAN.model"
# checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
filename += '.b' + str(self.batch_size)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
if ckpt:
self.saver.save(self.sess,
os.path.join(checkpoint_dir, filename),
global_step=step)
if frozen:
tf.train.write_graph(
tf.graph_util.convert_variables_to_constants(self.sess, self.sess.graph_def, ["generator_1/Tanh"]),
checkpoint_dir,
'{}-{:06d}_frz.pb'.format(filename, step),
as_text=False)
def load(self, checkpoint_dir):
# import re
print(" [*] Reading checkpoints...", checkpoint_dir)
# checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
# print(" ->", checkpoint_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
# counter = int(next(re.finditer("(\d+)(?!.*\d)",ckpt_name)).group(0))
counter = int(ckpt_name.split('-')[-1])
print(" [*] Success to read {}".format(ckpt_name))
return True, counter
else:
print(" [*] Failed to find a checkpoint")
return False, 0
| [
"[email protected]"
] | |
0584bb10c38dba33bffbd8942a3bc1da49985993 | d903801965f5a203360a989c5e5330160bb8f509 | /pragmatics_2/settings/base.py | 2de70505263bd2f12c487534bdb5eabadcb1482f | [] | no_license | matt700395/server_test | fbc63fe4a9aea29610089b3ec87dcf5834047d27 | c21618bdf4d7f38889410d3204c15e4c61c15a54 | refs/heads/master | 2023-07-25T02:12:24.187475 | 2021-08-11T01:51:30 | 2021-08-11T01:51:30 | 387,237,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,084 | py | """
Django settings for pragmatics_2 project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
"gkgk"
from pathlib import Path
import os, environ
from django.urls import reverse_lazy
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent.parent
env = environ.Env(
# set casting, default value
DEBUG=(bool, False)
)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap4',
'accountapp',
'profileapp',
'articleapp',
'commentapp',
'projectapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pragmatics_2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pragmatics_2.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS=[
BASE_DIR / "static",
]
LOGIN_REDIRECT_URL = reverse_lazy('accountapp:hello_world')
LOGOUT_REDIRECT_URL = reverse_lazy('accountapp:login')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| [
"[email protected]"
] | |
9143e1cbbe22494451daa57eb795cafba2ef2bff | cfb705f3727ff2f53288269ae37bd2cb6687951d | /build/CAFCore/SFramework/CMakeFiles/SFrameworkpycGen.py | 5c92ddbf3102a6b81ff16550aee99de8cd85f4ab | [] | no_license | alessio94/di-Higgs-analysis | 395934df01190998057f7c81775209c5d32f906e | 79c793cc819df7c8511c45f3efe6bdd10fd966bf | refs/heads/master | 2023-02-17T05:44:59.997960 | 2023-02-13T18:02:42 | 2023-02-13T18:02:42 | 224,252,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | import py_compile; py_compile.compile( '/afs/cern.ch/work/a/apizzini/private/2022/nov/CAFbbll/CAFCore/SFramework/python/SFramework.py', cfile = '/afs/cern.ch/work/a/apizzini/private/2022/nov/CAFbbll/build/x86_64-centos7-gcc8-opt/python/SFramework/SFramework.pyc', doraise = True ) | [
"[email protected]"
] | |
0fa6083c10d10d0fd237b63dd89a046cd5d7ecf6 | 61efd764ae4586b6b2ee5e6e2c255079e2b01cfc | /azure-graphrbac/azure/graphrbac/models/graph_error.py | 0bfa9744e63d8b4251f72750eb08d46a828b3f80 | [
"MIT"
] | permissive | AutorestCI/azure-sdk-for-python | a3642f53b5bf79d1dbb77851ec56f4cc0c5b3b61 | 60b0726619ce9d7baca41f6cd38f741d74c4e54a | refs/heads/master | 2021-01-21T02:23:59.207091 | 2018-01-31T21:31:27 | 2018-01-31T21:31:27 | 55,251,306 | 4 | 3 | null | 2017-11-13T17:57:46 | 2016-04-01T17:48:48 | Python | UTF-8 | Python | false | false | 1,392 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class GraphError(Model):
"""Active Directory error information.
:param code: Error code.
:type code: str
:param message: Error message value.
:type message: str
"""
_attribute_map = {
'code': {'key': 'odata\\.error.code', 'type': 'str'},
'message': {'key': 'odata\\.error.message.value', 'type': 'str'},
}
def __init__(self, code=None, message=None):
self.code = code
self.message = message
class GraphErrorException(HttpOperationError):
"""Server responsed with exception of type: 'GraphError'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(GraphErrorException, self).__init__(deserialize, response, 'GraphError', *args)
| [
"[email protected]"
] | |
797f75d711c35e8f51c4b63311f9c718f07a6b4c | 91b6b36c7eba4ef0f97eea76a32c297760e24034 | /games/migrations/0001_initial.py | a999330944ff2ace8f32685de6c1ac1b44d755f4 | [] | no_license | joescaos/Tienda-video-juegos | ddfbba3affdb4302077d205d1a6b408cc08cf670 | 8b05f15d655398f7efc77af126fe022fec6d3261 | refs/heads/main | 2023-02-11T05:43:33.950151 | 2021-01-07T03:06:01 | 2021-01-07T03:06:01 | 327,482,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | # Generated by Django 3.0.8 on 2020-07-04 19:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=50)),
('imagen', models.ImageField(null=True, upload_to='games/')),
('descripcion', models.TextField()),
('precio', models.PositiveIntegerField()),
('categoria', models.CharField(choices=[('Juegos de accion', 'Juegos de accion'), ('Juegos de simulacion', 'Juegos de simulacion'), ('Juegos de deportes', 'Juegos de deportes'), ('Juegos de aventura', 'Juegos de aventura'), ('Juegos de plataformas', 'Juegos de plataformas'), ('Juegos de puzzle', 'Juegos de puzzle')], max_length=80, null=True)),
('existencia', models.PositiveIntegerField(null=True)),
],
),
]
| [
"[email protected]"
] | |
0d04fa2c27807dd543aa96a8eb34f13b5aa285a3 | 48b8ef4cb13195bd48c3bd741df407f4df7a7db1 | /py2vega/functions/date_time.py | 52cb563af5f98569f0a6115a6e91a96ee9e73dd4 | [
"BSD-3-Clause"
] | permissive | QuantStack/py2vega | 7c6d1e114e97f835ae2d3ef47950680d8a9b7e55 | 049f8a89adc4197a69a384160bbbb633c61abaf8 | refs/heads/master | 2021-06-22T22:38:32.885585 | 2021-03-03T09:46:31 | 2021-03-03T09:46:31 | 203,118,828 | 9 | 3 | BSD-3-Clause | 2021-03-03T08:32:02 | 2019-08-19T07:15:29 | Python | UTF-8 | Python | false | false | 4,165 | py | """Module that implements mocking Vega date and time functions."""
date_time_functions = [
'now', 'datetime', 'date', 'day', 'year', 'quarter', 'month', 'hours',
'minutes', 'seconds', 'milliseconds', 'time', 'timezoneoffset', 'utc',
'utcdate', 'utcday', 'utcyear', 'utcquarter', 'utcmonth', 'utchours',
'utcminutes', 'utcseconds', 'utcmilliseconds'
]
error_message = ' is a mocking function that is not supposed to be called directly'
def now():
"""Return the timestamp for the current time."""
raise RuntimeError('now' + error_message)
def datetime(year, month, day, hour, min, sec, millisec):
"""Return a new Date instance. The month is 0-based, such that 1 represents February."""
raise RuntimeError('datetime' + error_message)
def date(datetime):
"""Return the day of the month for the given datetime value, in local time."""
raise RuntimeError('date' + error_message)
def day(datetime):
"""Return the day of the week for the given datetime value, in local time."""
raise RuntimeError('day' + error_message)
def year(datetime):
"""Return the year for the given datetime value, in local time."""
raise RuntimeError('year' + error_message)
def quarter(datetime):
"""Return the quarter of the year (0-3): for the given datetime value, in local time."""
raise RuntimeError('quarter' + error_message)
def month(datetime):
"""Return the (zero-based): month for the given datetime value, in local time."""
raise RuntimeError('month' + error_message)
def hours(datetime):
"""Return the hours component for the given datetime value, in local time."""
raise RuntimeError('hours' + error_message)
def minutes(datetime):
"""Return the minutes component for the given datetime value, in local time."""
raise RuntimeError('minutes' + error_message)
def seconds(datetime):
"""Return the seconds component for the given datetime value, in local time."""
raise RuntimeError('seconds' + error_message)
def milliseconds(datetime):
"""Return the milliseconds component for the given datetime value, in local time."""
raise RuntimeError('milliseconds' + error_message)
def time(datetime):
"""Return the epoch-based timestamp for the given datetime value."""
raise RuntimeError('time' + error_message)
def timezoneoffset(datetime):
"""Return the timezone offset from the local timezone to UTC for the given datetime value."""
raise RuntimeError('timezoneoffset' + error_message)
def utc(year, month, day, hour, min, sec, millisec):
"""Return a timestamp for the given UTC date. The month is 0-based, such that 1 represents February."""
raise RuntimeError('utc' + error_message)
def utcdate(datetime):
"""Return the day of the month for the given datetime value, in UTC time."""
raise RuntimeError('utcdate' + error_message)
def utcday(datetime):
"""Return the day of the week for the given datetime value, in UTC time."""
raise RuntimeError('utcday' + error_message)
def utcyear(datetime):
"""Return the year for the given datetime value, in UTC time."""
raise RuntimeError('utcyear' + error_message)
def utcquarter(datetime):
"""Return the quarter of the year (0-3): for the given datetime value, in UTC time."""
raise RuntimeError('utcquarter' + error_message)
def utcmonth(datetime):
"""Return the (zero-based): month for the given datetime value, in UTC time."""
raise RuntimeError('utcmonth' + error_message)
def utchours(datetime):
"""Return the hours component for the given datetime value, in UTC time."""
raise RuntimeError('utchours' + error_message)
def utcminutes(datetime):
"""Return the minutes component for the given datetime value, in UTC time."""
raise RuntimeError('utcminutes' + error_message)
def utcseconds(datetime):
"""Return the seconds component for the given datetime value, in UTC time."""
raise RuntimeError('utcseconds' + error_message)
def utcmilliseconds(datetime):
"""Return the milliseconds component for the given datetime value, in UTC time."""
raise RuntimeError('utcmilliseconds' + error_message)
| [
"[email protected]"
] | |
ff1b3b0dca727f260be29d791c3eb863c60bb44c | e40111dda0ad509d474adfe4c52ae9b5525f388e | /show_weather/migrations/0001_initial.py | 257aa28f000c8c31d9e0659e17bdad9c05474fe7 | [] | no_license | XeyyamSherif/Weather-App | 2fb997fcfb5a6885ffffbf05e6ebe2127fd2bccf | 6de019cf289ff60d299b9f1e58c1f8c04fa3517f | refs/heads/master | 2023-01-23T06:57:55.655632 | 2020-12-04T20:10:42 | 2020-12-04T20:10:42 | 318,623,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,168 | py | <<<<<<< HEAD
# Generated by Django 3.0.3 on 2020-03-01 14:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='added_cities',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('city_name', models.CharField(max_length=100)),
('added_time', models.DateField()),
],
),
]
=======
# Generated by Django 3.0.3 on 2020-03-01 14:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='added_cities',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('city_name', models.CharField(max_length=100)),
('added_time', models.DateField()),
],
),
]
>>>>>>> 2001d54b7f6aa08db2779480e425bd1c54579a2f
| [
"[email protected]"
] | |
a67658f0ba4e3957c2b07e683cf18f9d800d4d49 | 962b7a864f6a85d4418292be2ad3f3c58ae89400 | /docs/conf.py | 1f83660b90ecd881682aa1743e2a7298a72d00e1 | [
"MIT"
] | permissive | ArtusU/one_buy | eb415697d4d314f1a23f255b83486f75fa1f6adb | 5d74a691f78f162eb6b16d9c3a2049043c36b0b0 | refs/heads/master | 2023-08-20T20:31:41.931260 | 2021-09-11T11:51:00 | 2021-09-11T11:51:00 | 405,059,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,259 | py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
import django
if os.getenv("READTHEDOCS", default=False) == "True":
sys.path.insert(0, os.path.abspath(".."))
os.environ["DJANGO_READ_DOT_ENV_FILE"] = "True"
os.environ["USE_DOCKER"] = "no"
else:
sys.path.insert(0, os.path.abspath("/app"))
os.environ["DATABASE_URL"] = "sqlite:///readthedocs.db"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
django.setup()
# -- Project information -----------------------------------------------------
project = "One Buy"
copyright = """2021, Artus U"""
author = "Artus U"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
| [
"[email protected]"
] | |
746dbef62b516ff59dc04fa89bbb2f22bfa4ff8a | 82c6dedfe9040b453c22c3f93f1a2c9a922c988b | /ClusterFind/cluster_dbscan_precomputed_neighbours.py | 7a4f8cb9ec5f99d729549b5d0ffd0a3962f8b5cc | [] | no_license | njcuk9999/g_clustering | 8d34439fd78ef7017c0414c932d21cd19fc6551c | 20e6a6ab17c72c5652ae33125f7dabf4131aa8d5 | refs/heads/master | 2021-05-11T16:10:41.382938 | 2018-05-08T22:55:03 | 2018-05-08T22:55:03 | 117,753,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,436 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2018-01-16 at 14:13
@author: cook
Version 0.0.0
"""
import numpy as np
from astropy.table import Table
from sklearn.cluster import DBSCAN
from sklearn import metrics
import random
import matplotlib.pyplot as plt
import time
from sklearn.neighbors import NearestNeighbors
# =============================================================================
# Define variables
# =============================================================================
# Define paths
WORKSPACE = '/scratch/Projects/Gaia_clustering'
WRITEPATH = WORKSPACE + '/data/Sim/Simulation_simple.fits'
# -----------------------------------------------------------------------------
COLOURS = ['r', 'g', 'b', 'c', 'm', 'orange']
MARKERS = ['o', 's', '*', 'd', 'v', '<', '>', '^', 'h', 'D', 'p', '8']
SUBSET = True
SUBSETSIZE = 500000
DIMNAMES = ['X [pc]', 'Y [pc]', 'Z [pc]',
'U [mas/yr]', 'V [mas/yr]', 'W [mas/yr]']
# =============================================================================
# Define functions
# =============================================================================
def get_random_choices(array_length, num):
mask = random.choices(range(array_length), k=num)
return mask
def optimal_grid(num):
# get maximum shape
shape = int(np.ceil(np.sqrt(num)))
# get number of rows and columns based on maximum shape
if shape ** 2 == num:
nrows = shape
ncols = shape
else:
nrows = int(np.ceil(num / shape))
ncols = int(np.ceil(num / nrows))
# get position of figures
pos = []
for i in range(nrows):
for j in range(ncols):
pos.append([i, j])
# return nrows, ncols and positions
return nrows, ncols, pos
def plot_data(data, limits=None):
# get dimensions fitted
Ndim = data.shape[1]
# get ranges for graph plotting
range1 = range(Ndim-1)
range2 = range(1, Ndim)
# get optimal grid
nrows, ncols, pos = optimal_grid(len(range1))
# set up figure
fig, frames = plt.subplots(nrows=nrows, ncols=ncols)
# loop around dimensions (graph positions)
for it in range(len(range1)):
# get positions of dimensions in data
r1, r2 = range1[it], range2[it]
frame = frames[pos[it][0]][pos[it][1]]
# plot points
frame.plot(data[:, r1], data[:, r2], markersize=2,
marker='x', alpha=0.1,
zorder=1, color='k', linestyle='none')
# limits
if limits is not None:
frame.set(xlim=limits[it][:2], ylim=limits[it][2:])
# labels
frame.set(xlabel='{0}'.format(DIMNAMES[r1]),
ylabel='{0}'.format(DIMNAMES[r2]))
# title
plt.suptitle('Data before clustering')
# deal with blank frames
for it in range(len(range1), nrows * ncols):
frame = frames[pos[it][0]][pos[it][1]]
frame.axis('off')
def plot_dims(data, labels, n_clusters, kind='out', setlimits=None):
# get unique labels
unique_labels = np.unique(labels)
# get colour marker combinations
colours = np.tile(COLOURS, len(MARKERS))
markers = np.repeat(MARKERS, len(COLOURS))
# make sure we are not repeating
while len(unique_labels) > len(markers):
colours = np.repeat(colours, 2)
markers = np.repeat(markers, 2)
# get dimensions fitted
Ndim = data.shape[1]
# get ranges for graph plotting
range1 = range(Ndim-1)
range2 = range(1, Ndim)
# get optimal grid
nrows, ncols, pos = optimal_grid(len(range1))
# set up figure
fig, frames = plt.subplots(nrows=nrows, ncols=ncols)
# loop around dimensions (graph positions)
limits = []
for it in range(len(range1)):
# get positions of dimensions in data
r1, r2 = range1[it], range2[it]
frame = frames[pos[it][0]][pos[it][1]]
stats = [0.0, 0.0, 0.0, 0.0]
# loop around groups
for k_it in unique_labels:
# get members for this group
class_member_mask = (labels == k_it)
# if noise set the colour to black
if k_it == -1:
alpha = 0.1
zorder = 1
else:
alpha = 1.0
zorder = 2
# plot points in the core sample
xy = data[class_member_mask]
if k_it != -1:
frame.plot(xy[:, r1], xy[:, r2], markersize=2,
marker=markers[k_it], alpha=alpha,
zorder=zorder, color=colours[k_it], linestyle='none')
stats = find_min_max(xy[:, r1], xy[:, r2], *stats)
else:
frame.plot(xy[:, r1], xy[:, r2], markersize=2,
marker='x', alpha=alpha,
zorder=zorder, color='k', linestyle='none')
# set labels
frame.set(xlabel='{0}'.format(DIMNAMES[r1]),
ylabel='{0}'.format(DIMNAMES[r2]))
# set limits
if setlimits is None:
frame.set(xlim=stats[:2], ylim=stats[2:])
limits.append(stats)
else:
frame.set(xlim=setlimits[it][:2], ylim=setlimits[it][2:])
limits.append(setlimits[it])
# deal with blank frames
for it in range(len(range1), nrows * ncols):
frame = frames[pos[it][0]][pos[it][1]]
frame.axis('off')
if kind == 'in':
plt.suptitle('Simulated number of clusters: {0}'.format(n_clusters))
else:
plt.suptitle('Estimated number of clusters: {0}'.format(n_clusters))
return limits
def find_min_max(x, y, xmin, xmax, ymin, ymax, zoomout=0.10):
"""
Takes arrays of x and y and tests limits against previously defined limits
if limits are exceeded limits are changed with a zoom out factor
:param x: array, x values
:param y: array, yvalues
:param xmin: float, old xmin value to be tested
:param xmax: float, old xmax value to be tested
:param ymin: float, old ymin value to be tested
:param ymax: float, old ymax value to be tested
:param zoomout: float, the fraction zoomout factor i.e. 0.05 = 5% zoomout
to zoom in make number negative, for no zoomout put it to
zero
:return:
"""
if len(x) != 0:
newxmin, newxmax = np.min(x), np.max(x)
diffx = newxmax - newxmin
if newxmin < xmin:
xmin = newxmin - zoomout * diffx
if newxmax > xmax:
xmax = newxmax + zoomout * diffx
if len(y) != 0:
newymin, newymax = np.min(y), np.max(y)
diffy = newymax - newymin
if newymin < ymin:
ymin = newymin - zoomout * diffy
if newymax > ymax:
ymax = newymax + zoomout * diffy
return xmin, xmax, ymin, ymax
def compare_results(groups, labels_true, labels):
ugroups = np.unique(groups)
newlabelgroup = dict()
for ugroup in ugroups:
# find the key for this ugroup
mask = groups == ugroup
in_num = np.sum(mask)
# make sure we only have one label per group (we should)
glabels = labels_true[mask]
if len(np.unique(glabels)) > 1:
raise ValueError('Group {0} has more than one key!'.format(ugroup))
else:
ulabel = glabels[0]
# get label mask
mask = labels_true == ulabel
# count the number of labels in group
comp = counter(labels[mask])
printlog('\t Group: {0} (Total = {1})'.format(ugroup, in_num))
for key in comp:
if key == -1:
ll = 'NOISE (G=-1)'
elif key in newlabelgroup:
ll = '{0} (G={1})'.format(newlabelgroup[key], key)
else:
ll = 'NEW (G={0})'.format(key)
printlog('\t\tlabel={0} number found={1}'.format(ll, comp[key]))
if key == -1:
newlabelgroup[key] = 'NOISE'
elif key not in newlabelgroup:
newlabelgroup[key] = ugroup
def counter(array):
ddict = dict()
for a in array:
if a not in ddict:
ddict[a] = 1
else:
ddict[a] += 1
# reverse sort by values
sort = np.argsort(list(ddict.values()))[::-1]
keys = np.array(list(ddict.keys()))[sort]
values = np.array(list(ddict.values()))[sort]
ddict2 = dict(zip(keys, values))
return ddict2
def printlog(message):
message = message.split('\n')
for mess in message:
unix_time = time.time()
human_time = time.strftime('%H:%M:%S', time.localtime(unix_time))
dsec = int((unix_time - int(unix_time)) * 100)
print('{0}.{1:02d} | {2}'.format(human_time, dsec, mess))
# =============================================================================
# Start of code
# =============================================================================
# Main code here
if __name__ == "__main__":
# get the data
printlog("Loading data...")
rawdata = Table.read(WRITEPATH)
# apply subset to data
if SUBSET:
mask = get_random_choices(len(rawdata), SUBSETSIZE)
else:
mask = np.ones(len(rawdata['X']), dtype=bool)
rawdata = rawdata[mask]
# construct data matrix
data = np.array([rawdata['X'], rawdata['Y'], rawdata['Z'],
rawdata['U'], rawdata['V'], rawdata['W']]).T
# data = np.array([rawdata['X'], rawdata['Y'], rawdata['Z']]).T
# get the true labels and group names
labels_true = np.array(rawdata['row'])
groups = np.array(rawdata['group'])
# convert data to 32 bit
data = np.array(data, dtype=np.float32)
# get nearest neighbours
printlog('Work out nearest neighbours...')
start = time.time()
neigh = NearestNeighbors(radius=20, metric='euclidean')
neigh.fit(data)
neighbours = neigh.radius_neighbors_graph(data, mode='distance')
end = time.time()
printlog('\t Time taken = {0} s'.format(end - start))
# ----------------------------------------------------------------------
# DBscan example from :
# scikit-learn.org/stable/modules/clustering.html#dbscan
# http://scikit-learn.org/stable/auto_examples/cluster/plot_dbscan
# .html#sphx-glr-auto-examples-cluster-plot-dbscan-py
printlog("Calculating clustering using 'DBSCAN'...")
start = time.time()
sargs = dict(eps=10, min_samples=50, metric='precomputed')
db = DBSCAN(**sargs).fit(neighbours)
end = time.time()
# get mask and labels
labels = db.labels_
# report timing
printlog('\t Time taken = {0} s'.format(end - start))
# ----------------------------------------------------------------------
# stats
# Number of clusters in labels, ignoring noise if present.
n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
n_clusters_true = len(set(labels_true)) - (1 if -1 in labels else 0)
printlog('\t Estimated number of clusters: {0}'.format(n_clusters))
#print stats
args = [labels_true, labels]
pargs = [metrics.homogeneity_score(*args),
metrics.completeness_score(*args),
metrics.v_measure_score(*args),
metrics.adjusted_rand_score(*args),
metrics.adjusted_mutual_info_score(*args)]
printlog("\t Homogeneity: {0:.3f}\n\t Completeness: {1:.3f}"
"\n\t V-measure: {2:.3f}\n\t Adjusted Rand Index: {3:.3f}"
"\n\t Adjusted Mutual Information: {4:.3f}".format(*pargs))
# ----------------------------------------------------------------------
# comparing results
printlog('Comparing results...')
compare_results(groups, labels_true, labels)
# ----------------------------------------------------------------------
# Plot result
printlog('Plotting graph...')
# dont plot all results
mask = get_random_choices(len(data), 100000)
limits = plot_dims(data[mask], labels[mask], n_clusters, kind='out')
limits = plot_dims(data[mask], labels_true[mask], n_clusters_true,
kind='in', setlimits=limits)
plot_data(data[mask], limits=limits)
plt.show()
plt.close()
# =============================================================================
# End of code
# =============================================================================
| [
"[email protected]"
] | |
aa6d9a1822835aebdd5132c6ba6a72d4ff601275 | c49590eb7f01df37c8ec5fef00d0ffc7250fa321 | /openapi_client/models/market_details_quote.py | 9be5f6cec718e266059c4b82e375d42004f2e4ce | [] | no_license | harshad5498/ks-orderapi-python | 373a4b85a56ff97e2367eebd076f67f972e92f51 | 237da6fc3297c02e85f0fff1a34857aaa4c1d295 | refs/heads/master | 2022-12-09T19:55:21.938764 | 2020-09-03T05:22:51 | 2020-09-03T05:22:51 | 293,533,651 | 0 | 0 | null | 2020-09-07T13:19:25 | 2020-09-07T13:19:24 | null | UTF-8 | Python | false | false | 3,151 | py | # coding: utf-8
"""
KS Trade API's
The version of the OpenAPI document: 1.0
"""
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class MarketDetailsQuote(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'depth': 'list[Depth]'
}
attribute_map = {
'depth': 'depth'
}
def __init__(self, depth=None, local_vars_configuration=None): # noqa: E501
"""MarketDetailsQuote - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._depth = None
self.discriminator = None
if depth is not None:
self.depth = depth
@property
def depth(self):
"""Gets the depth of this MarketDetailsQuote. # noqa: E501
:return: The depth of this MarketDetailsQuote. # noqa: E501
:rtype: list[Depth]
"""
return self._depth
@depth.setter
def depth(self, depth):
"""Sets the depth of this MarketDetailsQuote.
:param depth: The depth of this MarketDetailsQuote. # noqa: E501
:type depth: list[Depth]
"""
self._depth = depth
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MarketDetailsQuote):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, MarketDetailsQuote):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
a00eaff7c43f2734cc5e34167a1e3b2535928b9c | 3b9082ed8c0717d40165f5cc520937c23e9c49c0 | /lib/streamifiers/public_suffix.py | 672978920b94dfef4ec6ab269c416a679a78dd1c | [
"BSD-3-Clause"
] | permissive | CYBAI/compression-test | 2915c0d929be4689ba0df3beb1c19d808aef1405 | ea1306131e32f44f97b197550e7b3a5d7734ad0b | refs/heads/master | 2021-03-20T07:55:10.844035 | 2019-01-19T05:11:25 | 2019-01-19T05:11:25 | 247,191,443 | 1 | 0 | NOASSERTION | 2020-03-14T01:31:04 | 2020-03-14T01:31:03 | null | UTF-8 | Python | false | false | 1,103 | py | #!/usr/bin/env python
from collections import defaultdict
from . import BaseStreamifier, Stream
from publicsuffix import PublicSuffixList
class Streamifier(BaseStreamifier):
"""
Use the Public Suffix List <http://publicsuffix.org> to split the messages
into streams, one per direction per suffix.
"""
def __init__(self, procs):
BaseStreamifier.__init__(self, procs)
self.psl = PublicSuffixList()
def streamify(self, messages):
"""
Given a list of messages (each a req, res tuple), return a list of
Stream objects.
"""
reqs = defaultdict(list)
ress = defaultdict(list)
suffixes = []
for req, res in messages:
host = req[':host']
suffix = self.psl.get_public_suffix(host.split(":", 1)[0])
if suffix not in suffixes:
suffixes.append(suffix)
reqs[suffix].append((req, host))
ress[suffix].append((res, host))
streams = []
for suffix in suffixes:
streams.append(Stream(suffix, reqs[suffix], 'req', self.procs))
streams.append(Stream(suffix, ress[suffix], 'res', self.procs))
return streams | [
"[email protected]"
] | |
9cc14a0d8484b7a1274d7519a78757f8a1879fbc | 244ecfc2017a48c70b74556be8c188e7a4815848 | /res_bw/scripts/common/lib/ctypes/test/test_numbers.py | d22b4be81fa25957e12f9a9ea14e9e9cab2b4b56 | [] | no_license | webiumsk/WOT-0.9.12 | c1e1259411ba1e6c7b02cd6408b731419d3174e5 | 5be5fd9186f335e7bae88c9761c378ff5fbf5351 | refs/heads/master | 2021-01-10T01:38:36.523788 | 2015-11-18T11:33:37 | 2015-11-18T11:33:37 | 46,414,438 | 1 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 6,859 | py | # 2015.11.18 12:02:44 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/ctypes/test/test_numbers.py
from ctypes import *
import unittest
import struct
def valid_ranges(*types):
result = []
for t in types:
fmt = t._type_
size = struct.calcsize(fmt)
a = struct.unpack(fmt, ('\x00' * 32)[:size])[0]
b = struct.unpack(fmt, ('\xff' * 32)[:size])[0]
c = struct.unpack(fmt, ('\x7f' + '\x00' * 32)[:size])[0]
d = struct.unpack(fmt, ('\x80' + '\xff' * 32)[:size])[0]
result.append((min(a, b, c, d), max(a, b, c, d)))
return result
ArgType = type(byref(c_int(0)))
unsigned_types = [c_ubyte,
c_ushort,
c_uint,
c_ulong]
signed_types = [c_byte,
c_short,
c_int,
c_long,
c_longlong]
bool_types = []
float_types = [c_double, c_float]
try:
c_ulonglong
c_longlong
except NameError:
pass
else:
unsigned_types.append(c_ulonglong)
signed_types.append(c_longlong)
try:
c_bool
except NameError:
pass
else:
bool_types.append(c_bool)
unsigned_ranges = valid_ranges(*unsigned_types)
signed_ranges = valid_ranges(*signed_types)
bool_values = [True,
False,
0,
1,
-1,
5000,
'test',
[],
[1]]
class NumberTestCase(unittest.TestCase):
def test_default_init(self):
for t in signed_types + unsigned_types + float_types:
self.assertEqual(t().value, 0)
def test_unsigned_values(self):
for t, (l, h) in zip(unsigned_types, unsigned_ranges):
self.assertEqual(t(l).value, l)
self.assertEqual(t(h).value, h)
def test_signed_values(self):
for t, (l, h) in zip(signed_types, signed_ranges):
self.assertEqual(t(l).value, l)
self.assertEqual(t(h).value, h)
def test_bool_values(self):
from operator import truth
for t, v in zip(bool_types, bool_values):
self.assertEqual(t(v).value, truth(v))
def test_typeerror(self):
for t in signed_types + unsigned_types + float_types:
self.assertRaises(TypeError, t, '')
self.assertRaises(TypeError, t, None)
return
def test_from_param(self):
for t in signed_types + unsigned_types + float_types:
self.assertEqual(ArgType, type(t.from_param(0)))
def test_byref(self):
for t in signed_types + unsigned_types + float_types + bool_types:
parm = byref(t())
self.assertEqual(ArgType, type(parm))
def test_floats(self):
class FloatLike(object):
def __float__(self):
return 2.0
f = FloatLike()
for t in float_types:
self.assertEqual(t(2.0).value, 2.0)
self.assertEqual(t(2).value, 2.0)
self.assertEqual(t(2L).value, 2.0)
self.assertEqual(t(f).value, 2.0)
def test_integers(self):
class FloatLike(object):
def __float__(self):
return 2.0
f = FloatLike()
class IntLike(object):
def __int__(self):
return 2
i = IntLike()
for t in signed_types + unsigned_types:
self.assertRaises(TypeError, t, 3.14)
self.assertRaises(TypeError, t, f)
self.assertEqual(t(i).value, 2)
def test_sizes(self):
for t in signed_types + unsigned_types + float_types + bool_types:
try:
size = struct.calcsize(t._type_)
except struct.error:
continue
self.assertEqual(sizeof(t), size)
self.assertEqual(sizeof(t()), size)
def test_alignments(self):
for t in signed_types + unsigned_types + float_types:
code = t._type_
align = struct.calcsize('c%c' % code) - struct.calcsize(code)
self.assertEqual((code, alignment(t)), (code, align))
self.assertEqual((code, alignment(t())), (code, align))
def test_int_from_address(self):
from array import array
for t in signed_types + unsigned_types:
try:
array(t._type_)
except ValueError:
continue
a = array(t._type_, [100])
v = t.from_address(a.buffer_info()[0])
self.assertEqual(v.value, a[0])
self.assertEqual(type(v), t)
a[0] = 42
self.assertEqual(v.value, a[0])
def test_float_from_address(self):
from array import array
for t in float_types:
a = array(t._type_, [3.14])
v = t.from_address(a.buffer_info()[0])
self.assertEqual(v.value, a[0])
self.assertIs(type(v), t)
a[0] = 2.3456e+17
self.assertEqual(v.value, a[0])
self.assertIs(type(v), t)
def test_char_from_address(self):
from ctypes import c_char
from array import array
a = array('c', 'x')
v = c_char.from_address(a.buffer_info()[0])
self.assertEqual(v.value, a[0])
self.assertIs(type(v), c_char)
a[0] = '?'
self.assertEqual(v.value, a[0])
def test_init(self):
self.assertRaises(TypeError, c_int, c_long(42))
def test_float_overflow(self):
import sys
big_int = int(sys.float_info.max) * 2
for t in float_types + [c_longdouble]:
self.assertRaises(OverflowError, t, big_int)
if hasattr(t, '__ctype_be__'):
self.assertRaises(OverflowError, t.__ctype_be__, big_int)
if hasattr(t, '__ctype_le__'):
self.assertRaises(OverflowError, t.__ctype_le__, big_int)
from ctypes import _SimpleCData
class c_int_S(_SimpleCData):
_type_ = 'i'
__slots__ = []
def run_test(rep, msg, func, arg = None):
items = range(rep)
from time import clock
if arg is not None:
start = clock()
for i in items:
func(arg)
func(arg)
func(arg)
func(arg)
func(arg)
stop = clock()
else:
start = clock()
for i in items:
func()
func()
func()
func()
func()
stop = clock()
print '%15s: %.2f us' % (msg, (stop - start) * 1000000.0 / 5 / rep)
return
def check_perf():
from ctypes import c_int
REP = 200000
run_test(REP, 'int()', int)
run_test(REP, 'int(999)', int)
run_test(REP, 'c_int()', c_int)
run_test(REP, 'c_int(999)', c_int)
run_test(REP, 'c_int_S()', c_int_S)
run_test(REP, 'c_int_S(999)', c_int_S)
if __name__ == '__main__':
unittest.main()
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\ctypes\test\test_numbers.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.18 12:02:44 Střední Evropa (běžný čas)
| [
"[email protected]"
] | |
356c3d0d2080ca4ad61b6ecd2046b5002212c549 | 0dd881b86146eff46a99e3100a12addcb5b1bde9 | /weipinghui2019_zifuchuanxiangjia.py | 9450013b2f20ba2c8819ba996bb4dcb91ebcc8c8 | [] | no_license | BaijingML/leetcode | 8b04599ba6f1f9cf12fbb2726f6a1463a42f0a70 | 0ba37ea32ad71d9467f73da6f9e71971911f1d4c | refs/heads/master | 2020-03-22T05:07:17.884441 | 2020-01-10T12:13:54 | 2020-01-10T12:13:54 | 138,399,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@version: python3.6
@Author : Zhangfusheng
@Time : 2019/8/18 12:04
@File : weipinghui2019_zifuchuanxiangjia
@Software: PyCharm
"""
if __name__ == "__main__":
s1, s2 = input(), input()
result = ""
add = 0
if len(s1) < len(s2):
s1, s2 = s2, s1
s1, s2 = s1[::-1], s2[::-1]
for index, i in enumerate(s1):
if index > len(s2) - 1:
b = 0
else:
b = int(s2[index])
result += str((int(s1[index]) + b + add) % 2)
if int(s1[index]) + b + add > 1:
add = 1
else:
add = 0
if add == 1:
result += str(1)
print(result[::-1])
| [
"[email protected]"
] | |
899115ede16865c53b02a851dff925b61a1bf92a | 6eef7d400474384c9e36cafbbae95e3c34dbb6ad | /ben_kremer_clinvitae/urls.py | 61103ac498270fed074b36b27f1ff25b7c810666 | [] | no_license | codeAligned/clinvitae | 61d3c160e9dbc65d548818292681a27501d330ce | 4a75c14113dc562991c7d2d1a5812d2db91e2da0 | refs/heads/master | 2020-05-17T12:02:33.514187 | 2019-02-21T06:47:35 | 2019-02-21T06:47:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('genomic_variants.urls')),
]
| [
"[email protected]"
] | |
25d1da88d1366f1ccb7cbe8670e06b566940541c | c62c9f5cb72e23d9ac35260d9c556b35ae1861e4 | /collective/z3cform/html5widgets/widget_contenteditable.py | e3cdb08143c1fe06cda58e75623139d08489612d | [] | no_license | collective/collective.z3cform.html5widgets | 667cb567d1873cf0ca439df564df8c0cdf4ea6e6 | 3357495e8b445b5d75ccfc14608c55019b01bf6e | refs/heads/master | 2023-03-22T16:39:43.686088 | 2013-12-05T17:01:54 | 2013-12-05T17:01:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | #-*- coding: utf-8 -*-
from zope import interface
import z3c.form.interfaces
import z3c.form.browser.widget
import z3c.form.widget
class IContentEditableWidget(z3c.form.interfaces.IWidget):
""" ContentEditable widget marker for z3c.form"""
class ContentEditableWidget(
z3c.form.browser.widget.HTMLTextInputWidget,
z3c.form.widget.Widget):
"""HTML widget contenteditable"""
interface.implementsOnly(IContentEditableWidget)
klass = u'html5-contenteditable-widget'
def update(self):
super(ContentEditableWidget, self).update()
z3c.form.browser.widget.addFieldClass(self)
def ContentEditableFieldWidget(field, request):
"""IFieldWidget factory for ContentEditableWidget."""
return z3c.form.widget.FieldWidget(field, ContentEditableWidget(request))
| [
"[email protected]"
] | |
2c61d2a6b698b46867da354fa68394e9d35fb506 | be9716e8a831305fa3b2fbedf96ee9aa8f3cfaff | /Heuristics/Better_HalideAutotuner.py | d0ae6c0cdd7ad0825fcc7d41ebc763aaa50d542a | [] | no_license | Ikraam/HalideAutotuner | 0b94e1c3b8de25cb11e67f69bc4697cf8f0b0f66 | 19153b72d2496bcdda980d8546bc9868f653d624 | refs/heads/master | 2020-03-11T11:03:55.880834 | 2018-06-13T05:04:30 | 2018-06-13T05:04:30 | 129,960,042 | 2 | 0 | null | 2018-05-03T12:29:33 | 2018-04-17T20:17:01 | Python | UTF-8 | Python | false | false | 14,100 | py | import hashlib
import Restrictions_.ReorderRestriction_ as RR
import Restrictions_.ParallelRestriction_ as PR
import Restrictions_.SplitRestriction_ as SR
import Restrictions_.TileRestriction_ as TR
import Restrictions_.ComputeAtRestriction_ as CR
import Restrictions_.StoreAtRestriction_ as StR
import Restrictions_.VectorizeRestriction_ as VR
import Restrictions_.UnrollRestriction_ as UR
import Restrictions_.FuseRestriction_ as FR
import Schedule
from Schedule import *
import Heuristics.Heuristic_best_reorder
from Heuristics.Heuristic_best_reorder import reorder_heuristique
import GenerationOfOptimizations.settings
from GenerationOfOptimizations.settings import *
def generate_schedules_heuristic(program, args):
order_optimizations = list()
# define the order of optimizations for your generated schedules
order_optimizations.append("Tile")
order_optimizations.append("Split")
order_optimizations.append("Reorder")
order_optimizations.append("Fuse")
order_optimizations.append("Parallel")
order_optimizations.append("Vectorize")
order_optimizations.append("Unroll")
order_optimizations.append("Compute_At")
order_optimizations.append("Store_At")
# Launch exploration with restrictions
schedule = Schedule.Schedule(list(), args)
settings.set_best_schedule(schedule)
settings.set_best_time_schedule(schedule.test_schedule(program.args, program.id))
restrictions=define_restrictions_phase_01(program, 4)
settings.set_limit(None)
settings.set_nb_schedule_explorer(0)
settings.store_generated_schedules(True, 10)
settings.append_and_explore_optim(schedule,program, program.id, restrictions,0,order_optimizations)
for schedule_time in settings.get_stored_schedules() :
schedule = schedule_time[0]
hill_climbing_tile_factors(schedule, program, len(schedule.optimizations)-1)
def go_left_right(schedule, program, index, type_factor) :
optim = schedule.optimizations[index]
print 'schedule medium :\n', schedule
time_middle = schedule.test_schedule(program.args,program.id)
print 'time_middle', time_middle
if type_factor == "tile_one" :
factor_in = optim.tile_factor_in
factor_out = optim.tile_factor_out
if (factor_out > 1) | (factor_in // 2 > 1) :
optim.tile_factor_in = factor_in // 2
print 'schedule left : \n',schedule
time_left = schedule.test_schedule(program.args,program.id)
else :
time_left = float('inf')
print 'time_left', time_left
if factor_in * 2 <= optim.variable_in.extent_var // 2 :
optim.tile_factor_in = factor_in * 2
print 'schedule right : \n', schedule
time_right = schedule.test_schedule(program.args, program.id)
print 'time_right', time_right
else :
time_right = float('inf')
print 'time_middle :{}, time_right :{}, time_left : {}'.format(time_middle, time_right, time_left)
if (time_middle <= time_left) & (time_middle <= time_right) :
optim.tile_factor_in = factor_in
return None
else :
if time_right <= time_left :
optim.tile_factor_in = factor_in * 2
return "right"
else :
optim.tile_factor_in = factor_in // 2
return "left"
if type_factor == "tile_two" :
factor_out = optim.tile_factor_out
factor_in = optim.tile_factor_in
if (factor_out // 2 > 1) | (factor_in > 1) :
optim.tile_factor_out = factor_out // 2
print 'schedule left : \n', schedule
time_left = schedule.test_schedule(program.args, program.id)
else :
time_left = float('inf')
if factor_out * 2 <= optim.variable_out.extent_var // 2 :
optim.tile_factor_out = factor_out * 2
print 'schedule right : \n', schedule
time_right = schedule.test_schedule(program.args, program.id)
else :
time_right = float('inf')
print 'time_middle :{}, time_right :{}, time_left : {}'.format(time_middle, time_right, time_left)
if (time_middle <= time_left) & (time_middle <= time_right) :
optim.tile_factor_out = factor_out
return None
else :
if time_right <= time_left :
optim.tile_factor_out = factor_out * 2
return "right"
else :
optim.tile_factor_out = factor_out // 2
return "left"
if type_factor == "split" :
factor = optim.split_factor
if factor // 2 > 1 :
optim.split_factor = factor // 2
time_left = schedule.test_schedule(program.args, program.id)
else :
time_left = float('inf')
if factor * 2 <= optim.variable.extent_var // 2 :
optim.split_factor = factor * 2
time_right = schedule.test_schedule(program.args, program.id)
else :
time_right = float('inf')
print 'time_middle :{}, time_right :{}, time_left : {}'.format(time_middle, time_right, time_left)
if (time_middle <= time_left) & (time_middle <= time_right) :
optim.split_factor = factor
return None
else :
if time_right <= time_left :
optim.split_factor = factor * 2
return "right"
else :
optim.split_factor = factor // 2
return "left"
def hill_climbing_tile_factors(schedule, program, index):
if index == -1 :
print schedule
time = schedule.test_schedule(program.args, program.id)
print time
return 'valide schedule'
if isinstance(schedule.optimizations[index], TileOptimization):
while (True):
direction = go_left_right(schedule, program, index, "tile_one")
if direction == None :
break
while (True):
direction = go_left_right(schedule, program, index, "tile_two")
if direction == None :
break
hill_climbing_tile_factors(schedule, program, index-1)
else :
if isinstance(schedule.optimizations[index], SplitOptimization):
if schedule.optimizations[index].split_factor > 1 :
while (True) :
direction = go_left_right(schedule,program, index,"split")
if direction == None :
break
hill_climbing_tile_factors(schedule, program, index-1)
else :
hill_climbing_tile_factors(schedule, program, index-1)
def define_restrictions_phase_01(program, cache_line_size):
restrictions = list()
best_reorder_function = dict()
# define restrictions over each consumer function
for function in program.functions :
if function.is_consumer() :
# disable fuse optimization
fuse_res = FuseLevelRestriction(function, False, False, False)
restrictions.append(fuse_res)
# disable the unrolling optimization
unroll_res = UnrollLevelsRestriction(function, False, False)
restrictions.append(unroll_res)
# set reorder restriction
# search for the best reorder
best_reorder_function[function.name_function] = reorder_heuristique(dict(), dict(), \
function.instruction, cache_line_size, \
program.functions, program.args, function, program.constantes, program.id)
splitted_variables = list()
tiled_variables = list()
# dictionary of : {var_name : var_object}
dict_vars_name_vars = function.vars_of_func_dict()
# tile when there's a data reuse
enable_reorder = True
if len(function.reuses) >= 2 :
variable_in_tile = function.reuses[0]
variable_out_tile = function.reuses[1]
# Tile with a fix tile factor = 16
tile_res = TR.TileFactorsRestriction(function, 16, 16, \
dict_vars_name_vars[variable_in_tile], \
dict_vars_name_vars[variable_out_tile],\
None, None, True, True, None, \
function.tile_level\
, True)
restrictions.append(tile_res)
# add tiled variables to tiled_variables list
tiled_variables.append(dict_vars_name_vars[variable_in_tile])
tiled_variables.append(dict_vars_name_vars[variable_out_tile])
# if nesting is bigger than 1 we disable the reordering for that specific function
'''if tile_res.nesting > 1 :
enable_reorder = False'''
# split vectorizable loop nest level
if (function.legal_vectorize != None) & (function.legal_vectorize not in function.reuses) :
# search for the variable to vectorize
variable_to_vectorize = dict_vars_name_vars[function.legal_vectorize]
# vectorize only the variable with an extent bigger than 4
if variable_to_vectorize.extent_var > 4 :
# fix vectorize to True
vectorize_res = VR.VectorizeFixRestriction(function, variable_to_vectorize.name_var ,\
True, True, True)
restrictions.append(vectorize_res)
# define a split restriction over the vectorized variable : split with a default factor
split_res = SR.SplitFactorRestriction(function, 16, variable_to_vectorize, 1, None,\
True, True, True)
restrictions.append(split_res)
# add the splitted variable
splitted_variables.append(variable_to_vectorize)
# split unrollable level
reorder_variables = best_reorder_function[function.name_function]
# check if the first level is vectorized. If it is so unroll it, otherwise unroll the second level
if reorder_variables[0] == function.legal_vectorize :
variable_to_unroll = None
if len(reorder_variables) >= 2 :
variable_to_unroll = dict_vars_name_vars[reorder_variables[1]]
else :
variable_to_unroll = dict_vars_name_vars[reorder_variables[0]]
if (variable_to_unroll != None) & (variable_to_unroll not in tiled_variables) :
if variable_to_unroll.extent_var > 4 :
split_res = SR.SplitFactorRestriction(function,16, variable_to_unroll, 1, None, True, \
True, True)
restrictions.append(split_res)
splitted_variables.append(variable_to_unroll)
# update the best reorder configuration with tiled variables
reorder_variable_names = reorder_variables
if len(tiled_variables) >= 2:
index_var_in_tile = reorder_variable_names.index(function.reuses[0])
index_var_out_tile = reorder_variable_names.index(function.reuses[1])
reorder_variable_names_new = reorder_variable_names[:index_var_in_tile]
reorder_variable_names_new.append(function.reuses[0]+'i')
reorder_variable_names_new.append(function.reuses[1]+'i')
reorder_variable_names_new = reorder_variable_names_new + \
reorder_variable_names[index_var_in_tile+1:index_var_out_tile]
reorder_variable_names_new.append(function.reuses[0]+'o')
reorder_variable_names_new.append(function.reuses[1]+'o')
reorder_variable_names_new = reorder_variable_names_new + reorder_variable_names\
[index_var_out_tile+1:]
reorder_variable_names = reorder_variable_names_new
# update the best reorder configuration with splitted variables
for var in function.list_variables :
if var not in splitted_variables :
split_restriction = SR.SplitFactorRestriction(function,None, var, 1, None, True, \
True, False)
restrictions.append(split_restriction)
for var in splitted_variables :
index_var_splitted = reorder_variable_names.index(var.name_var)
reorder_variable_names_new = reorder_variable_names[:index_var_splitted]
reorder_variable_names_new.append(var.name_var+'i')
reorder_variable_names_new.append(var.name_var+'o')
reorder_variable_names_new = reorder_variable_names_new + reorder_variable_names[\
index_var_splitted+1:]
reorder_variable_names = reorder_variable_names_new
# set the reorder_restriction
reorder_restriction = RR.ReorderFixRestriction(function, [reorder_variable_names],\
enable_reorder)
restrictions.append(reorder_restriction)
# set a Hill climbing restriction to compute_at and disable store_at optimization
for producer in program.functions :
if producer.name_function in function.list_producers :
compute_res = CR.ComputeAtHillClimbing(producer, function, True, True)
restrictions.append(compute_res)
store_res = StR.StoreAtEnableRestriction(producer, function, False)
restrictions.append(store_res)
return restrictions
| [
"="
] | = |
2bdb1b2385181fd239dace5a48e5ffa805a4bd4e | 3c582a006b945cd95974d910ab5b0ff551ab42fa | /tsuru_dashboard/auth/tests/test_change_password_form.py | 7842f5c31e26fd2cb7f2eda76671938ed23b4712 | [] | no_license | tsuru/tsuru-dashboard | f8be15a72366a5cefeadd4a3aac117ed760e85bc | c94b0b1a6ec30d7f59b939adcff41646bad00e87 | refs/heads/master | 2023-06-22T12:01:20.024933 | 2022-10-20T19:50:47 | 2022-10-20T19:50:47 | 5,112,553 | 119 | 60 | null | 2023-06-13T17:53:35 | 2012-07-19T16:31:42 | Python | UTF-8 | Python | false | false | 1,514 | py | from django.test import TestCase
from django.forms import PasswordInput
from tsuru_dashboard.auth.forms import ChangePasswordForm
class ChangePasswordFormTest(TestCase):
def test_form_is_valid(self):
data = {
"old": "old",
"new": "new",
"confirm": "new",
}
form = ChangePasswordForm(data)
self.assertTrue(form.is_valid())
def test_old_is_required(self):
data = {
"new": "new",
"confirm": "new",
}
form = ChangePasswordForm(data)
self.assertFalse(form.is_valid())
def test_new_is_required(self):
data = {
"old": "old",
"confirm": "new",
}
form = ChangePasswordForm(data)
self.assertFalse(form.is_valid())
def test_confirm_is_required(self):
data = {
"old": "old",
"new": "new",
}
form = ChangePasswordForm(data)
self.assertFalse(form.is_valid())
def test_old_use_password_input(self):
old_field = ChangePasswordForm.base_fields['old']
self.assertIsInstance(old_field.widget, PasswordInput)
def test_new_use_password_input(self):
new_field = ChangePasswordForm.base_fields['new']
self.assertIsInstance(new_field.widget, PasswordInput)
def test_confirm_use_password_input(self):
confirm_field = ChangePasswordForm.base_fields['confirm']
self.assertIsInstance(confirm_field.widget, PasswordInput)
| [
"[email protected]"
] | |
18ec3b9176c0cadcb71400f69ac095ea871c5eee | b4339826d3def43a2553f0ac8d357ed393a8f471 | /apps/operation/models.py | 1447f0a0a33afa24c352bdb28c638b809f5acae7 | [] | no_license | buzzzzx/MultiUser_blog | e8c19537d29ab4d8bc97a2ca62703110adc9d683 | 1ff6c2c345051406b5862d902ca51939be755528 | refs/heads/master | 2021-08-08T11:47:06.666011 | 2017-11-10T08:51:28 | 2017-11-10T08:51:59 | 110,224,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | from django.db import models
from account.models import UserProfile
from blog.models import Post
# Create your models here.
class PostComment(models.Model):
post = models.ForeignKey(Post, related_name='comments') # 可通过post.comments.all()
user = models.ForeignKey(UserProfile, related_name='blog_comments') # 可通过user.blog_comments.all()取回所有评论
body = models.TextField()
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
active = models.BooleanField(default=True)
class Meta:
ordering = ('-created',)
def __str__(self):
return 'Comment by {} on {}'.format(self.user.username, self.post)
| [
"[email protected]"
] | |
77916008cef97dbe592ed28bdeb1fc24ef507f5b | 770801815a644df6de1d252799be520f69e467be | /dataResearch.py | 6b536432e9bb4642b8725ba2d3387a16d122c71f | [] | no_license | chutianwen/CapitalOneHackerthon | ad2b693694945ff56fa5e2ebf1c3a00dfec75439 | 5337b954b529c03c87816e8927cf1620a26e8a49 | refs/heads/master | 2021-05-07T13:37:39.336866 | 2017-11-05T17:03:44 | 2017-11-05T17:03:44 | 109,598,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,182 | py | from collections import Counter
with open("./Dataset/merchant_list.txt") as f:
text = f.read()
merchant_names = text.split(",")
text = text.lower()
text = text.replace('.', ' <PERIOD> ')
text = text.replace(',', ' <COMMA> ')
text = text.replace('"', ' <QUOTATION_MARK> ')
text = text.replace(';', ' <SEMICOLON> ')
text = text.replace('!', ' <EXCLAMATION_MARK> ')
text = text.replace('?', ' <QUESTION_MARK> ')
text = text.replace('(', ' <LEFT_PAREN> ')
text = text.replace(')', ' <RIGHT_PAREN> ')
text = text.replace('--', ' <HYPHENS> ')
text = text.replace('?', ' <QUESTION_MARK> ')
# text = text.replace('\n', ' <NEW_LINE> ')
text = text.replace(':', ' <COLON> ')
text = text.replace('&', ' <AND> ')
text = text.replace('-', ' <DASH> ')
words = text.split()
word_cnt = Counter(words)
print(len(word_cnt))
print(word_cnt)
# trim out unrelated words
unrelated_words = {'<AND>', '<DASH>', 'of', 'the', 'and', 'pa'}
word_cnt_trimmed = {word: word_cnt[word] for word in word_cnt
if word not in unrelated_words and 3 <= word_cnt[word] < 35}
print("Size of trimmed word_cnt:{}".format(len(word_cnt_trimmed)))
print(word_cnt_trimmed)
top_words = sorted(word_cnt_trimmed, key=word_cnt_trimmed.get, reverse=True)
print(top_words)
merchant_names_category = []
for merchant_name in merchant_names:
merchant_name_ori = merchant_name
merchant_name = merchant_name.replace("\"", "")
merchant_name = merchant_name.replace(".", " ")
merchant_name_words = merchant_name.lower().split()
category = "other"
for word in top_words:
merchant_name_words = merchant_name.split()
if word in merchant_name_words:
category = word
break
merchant_names_category.append([merchant_name_ori, category])
merchant_names_category.sort(key=lambda x: x[1])
categories = set(map(lambda x:x[1], merchant_names_category))
print("Categories:", categories)
with open("./Dataset/MerchantName_Category.txt", 'w') as f2:
f2.writelines("{}\t{}\n".format("Merchant Name", "Category"))
for item in merchant_names_category:
f2.writelines("{}\t{}\n".format(item[0], item[1]))
condense_category = {'inn': 'travel', } | [
"[email protected]"
] | |
791d0a9fae82a498c3c6ab479b99bb52d29b6763 | 130215e73cd45824fc5b7b2bc85949ce03115f20 | /py/portfol_classical050_1.py | 0607e55cae5a9a34dcf658afe57615f9c5a260ba | [] | no_license | felicitygong/MINLPinstances | 062634bf709a782a860234ec2daa7e6bf374371e | 1cd9c799c5758baa0818394c07adea84659c064c | refs/heads/master | 2022-12-06T11:58:14.141832 | 2022-12-01T17:17:35 | 2022-12-01T17:17:35 | 119,295,560 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 74,910 | py | # MINLP written by GAMS Convert at 11/10/17 15:35:22
#
# Equation counts
# Total E G L N X C B
# 104 52 0 52 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 151 101 50 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 2851 2801 50 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x2 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x3 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x4 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x5 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x6 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x7 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x8 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x9 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x10 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x11 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x12 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x13 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x14 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x15 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x16 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x17 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x18 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x19 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x20 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x21 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x22 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x23 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x24 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x25 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x26 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x27 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x28 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x29 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x30 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x31 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x32 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x33 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x34 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x35 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x36 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x37 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x38 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x39 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x40 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x41 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x42 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x43 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x44 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x45 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x46 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x47 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x48 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x49 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x50 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x51 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x52 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x53 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x54 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x55 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x56 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x57 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x58 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x59 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x60 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x61 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x62 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x63 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x64 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x65 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x66 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x67 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x68 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x69 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x70 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x71 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x72 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x73 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x74 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x75 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x76 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x77 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x78 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x79 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x80 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x81 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x82 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x83 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x84 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x85 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x86 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x87 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x88 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x89 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x90 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x91 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x92 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x93 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x94 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x95 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x96 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x97 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x98 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x99 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x100 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x101 = Var(within=Reals,bounds=(0,1),initialize=0)
m.b102 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b103 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b104 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b105 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b106 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b107 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b108 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b109 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b110 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b111 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b112 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b113 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b114 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b115 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b116 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b117 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b118 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b119 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b120 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b121 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b122 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b123 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b124 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b125 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b126 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b127 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b128 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b129 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b130 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b131 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b132 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b133 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b134 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b135 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b136 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b137 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b138 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b139 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b140 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b141 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b142 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b143 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b144 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b145 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b146 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b147 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b148 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b149 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b150 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b151 = Var(within=Binary,bounds=(0,1),initialize=0)
m.obj = Objective(expr= - 0.0399775*m.x52 - 0.0629738*m.x53 - 0.027838*m.x54 - 0.00361039*m.x55 - 0.0761837*m.x56
- 0.135299*m.x57 - 0.0122123*m.x58 - 0.0399709*m.x59 - 0.0256571*m.x60 - 0.0991766*m.x61
- 0.0210495*m.x62 - 0.044223*m.x63 - 0.0128715*m.x64 - 0.00399952*m.x65 - 0.0501755*m.x66
- 0.149247*m.x67 - 0.0613428*m.x68 - 0.041802*m.x69 - 0.0754226*m.x70 - 0.0434943*m.x71
- 0.10135*m.x72 - 0.15397*m.x73 - 0.0576577*m.x74 - 0.0340755*m.x75 - 0.0426673*m.x76
- 0.0298566*m.x77 - 0.0952893*m.x78 - 0.169485*m.x79 - 0.0440279*m.x80 - 0.0470473*m.x81
- 0.00699576*m.x82 - 0.127417*m.x83 - 0.126305*m.x84 - 0.0486665*m.x85 - 0.153319*m.x86
- 0.0202574*m.x87 - 0.0272516*m.x88 - 0.0695536*m.x89 - 0.030744*m.x90 - 0.0325349*m.x91
- 0.0163484*m.x92 - 0.0753619*m.x93 - 0.0271795*m.x94 - 0.0113752*m.x95 - 0.0394797*m.x96
- 0.123927*m.x97 - 0.00514876*m.x98 - 0.0380825*m.x99 - 0.142836*m.x100 - 0.0540865*m.x101
, sense=minimize)
m.c2 = Constraint(expr=m.x2*m.x2 + m.x3*m.x3 + m.x4*m.x4 + m.x5*m.x5 + m.x6*m.x6 + m.x7*m.x7 + m.x8*m.x8 + m.x9*m.x9 +
m.x10*m.x10 + m.x11*m.x11 + m.x12*m.x12 + m.x13*m.x13 + m.x14*m.x14 + m.x15*m.x15 + m.x16*m.x16
+ m.x17*m.x17 + m.x18*m.x18 + m.x19*m.x19 + m.x20*m.x20 + m.x21*m.x21 + m.x22*m.x22 + m.x23*
m.x23 + m.x24*m.x24 + m.x25*m.x25 + m.x26*m.x26 + m.x27*m.x27 + m.x28*m.x28 + m.x29*m.x29 + m.x30
*m.x30 + m.x31*m.x31 + m.x32*m.x32 + m.x33*m.x33 + m.x34*m.x34 + m.x35*m.x35 + m.x36*m.x36 +
m.x37*m.x37 + m.x38*m.x38 + m.x39*m.x39 + m.x40*m.x40 + m.x41*m.x41 + m.x42*m.x42 + m.x43*m.x43
+ m.x44*m.x44 + m.x45*m.x45 + m.x46*m.x46 + m.x47*m.x47 + m.x48*m.x48 + m.x49*m.x49 + m.x50*
m.x50 + m.x51*m.x51 <= 0.04)
m.c3 = Constraint(expr= m.x52 - m.b102 <= 0)
m.c4 = Constraint(expr= m.x53 - m.b103 <= 0)
m.c5 = Constraint(expr= m.x54 - m.b104 <= 0)
m.c6 = Constraint(expr= m.x55 - m.b105 <= 0)
m.c7 = Constraint(expr= m.x56 - m.b106 <= 0)
m.c8 = Constraint(expr= m.x57 - m.b107 <= 0)
m.c9 = Constraint(expr= m.x58 - m.b108 <= 0)
m.c10 = Constraint(expr= m.x59 - m.b109 <= 0)
m.c11 = Constraint(expr= m.x60 - m.b110 <= 0)
m.c12 = Constraint(expr= m.x61 - m.b111 <= 0)
m.c13 = Constraint(expr= m.x62 - m.b112 <= 0)
m.c14 = Constraint(expr= m.x63 - m.b113 <= 0)
m.c15 = Constraint(expr= m.x64 - m.b114 <= 0)
m.c16 = Constraint(expr= m.x65 - m.b115 <= 0)
m.c17 = Constraint(expr= m.x66 - m.b116 <= 0)
m.c18 = Constraint(expr= m.x67 - m.b117 <= 0)
m.c19 = Constraint(expr= m.x68 - m.b118 <= 0)
m.c20 = Constraint(expr= m.x69 - m.b119 <= 0)
m.c21 = Constraint(expr= m.x70 - m.b120 <= 0)
m.c22 = Constraint(expr= m.x71 - m.b121 <= 0)
m.c23 = Constraint(expr= m.x72 - m.b122 <= 0)
m.c24 = Constraint(expr= m.x73 - m.b123 <= 0)
m.c25 = Constraint(expr= m.x74 - m.b124 <= 0)
m.c26 = Constraint(expr= m.x75 - m.b125 <= 0)
m.c27 = Constraint(expr= m.x76 - m.b126 <= 0)
m.c28 = Constraint(expr= m.x77 - m.b127 <= 0)
m.c29 = Constraint(expr= m.x78 - m.b128 <= 0)
m.c30 = Constraint(expr= m.x79 - m.b129 <= 0)
m.c31 = Constraint(expr= m.x80 - m.b130 <= 0)
m.c32 = Constraint(expr= m.x81 - m.b131 <= 0)
m.c33 = Constraint(expr= m.x82 - m.b132 <= 0)
m.c34 = Constraint(expr= m.x83 - m.b133 <= 0)
m.c35 = Constraint(expr= m.x84 - m.b134 <= 0)
m.c36 = Constraint(expr= m.x85 - m.b135 <= 0)
m.c37 = Constraint(expr= m.x86 - m.b136 <= 0)
m.c38 = Constraint(expr= m.x87 - m.b137 <= 0)
m.c39 = Constraint(expr= m.x88 - m.b138 <= 0)
m.c40 = Constraint(expr= m.x89 - m.b139 <= 0)
m.c41 = Constraint(expr= m.x90 - m.b140 <= 0)
m.c42 = Constraint(expr= m.x91 - m.b141 <= 0)
m.c43 = Constraint(expr= m.x92 - m.b142 <= 0)
m.c44 = Constraint(expr= m.x93 - m.b143 <= 0)
m.c45 = Constraint(expr= m.x94 - m.b144 <= 0)
m.c46 = Constraint(expr= m.x95 - m.b145 <= 0)
m.c47 = Constraint(expr= m.x96 - m.b146 <= 0)
m.c48 = Constraint(expr= m.x97 - m.b147 <= 0)
m.c49 = Constraint(expr= m.x98 - m.b148 <= 0)
m.c50 = Constraint(expr= m.x99 - m.b149 <= 0)
m.c51 = Constraint(expr= m.x100 - m.b150 <= 0)
m.c52 = Constraint(expr= m.x101 - m.b151 <= 0)
m.c53 = Constraint(expr= m.x52 + m.x53 + m.x54 + m.x55 + m.x56 + m.x57 + m.x58 + m.x59 + m.x60 + m.x61 + m.x62 + m.x63
+ m.x64 + m.x65 + m.x66 + m.x67 + m.x68 + m.x69 + m.x70 + m.x71 + m.x72 + m.x73 + m.x74 + m.x75
+ m.x76 + m.x77 + m.x78 + m.x79 + m.x80 + m.x81 + m.x82 + m.x83 + m.x84 + m.x85 + m.x86 + m.x87
+ m.x88 + m.x89 + m.x90 + m.x91 + m.x92 + m.x93 + m.x94 + m.x95 + m.x96 + m.x97 + m.x98 + m.x99
+ m.x100 + m.x101 == 1)
m.c54 = Constraint(expr= m.b102 + m.b103 + m.b104 + m.b105 + m.b106 + m.b107 + m.b108 + m.b109 + m.b110 + m.b111
+ m.b112 + m.b113 + m.b114 + m.b115 + m.b116 + m.b117 + m.b118 + m.b119 + m.b120 + m.b121
+ m.b122 + m.b123 + m.b124 + m.b125 + m.b126 + m.b127 + m.b128 + m.b129 + m.b130 + m.b131
+ m.b132 + m.b133 + m.b134 + m.b135 + m.b136 + m.b137 + m.b138 + m.b139 + m.b140 + m.b141
+ m.b142 + m.b143 + m.b144 + m.b145 + m.b146 + m.b147 + m.b148 + m.b149 + m.b150 + m.b151
<= 10)
m.c55 = Constraint(expr= - m.x2 + 0.437623*m.x52 + 0.00776152*m.x53 + 0.00831088*m.x54 - 0.00522971*m.x55
+ 0.015015*m.x56 - 0.0107741*m.x57 - 0.00662896*m.x58 - 0.00824877*m.x59 + 0.00953726*m.x60
- 0.0162102*m.x61 + 0.06876*m.x62 + 0.0307553*m.x63 + 0.00493869*m.x64 + 0.00905031*m.x65
+ 0.00428006*m.x66 + 0.0159505*m.x67 + 0.0372772*m.x68 + 0.00356282*m.x69 + 0.0102555*m.x70
- 0.0161653*m.x71 - 0.00678775*m.x72 - 0.000991393*m.x73 + 0.0104307*m.x74 - 0.00554627*m.x75
+ 0.000275614*m.x76 + 0.00146767*m.x77 - 0.0219202*m.x78 - 0.0152471*m.x79 - 0.0133041*m.x80
+ 0.00532027*m.x81 + 0.0190296*m.x82 + 9.52152E-5*m.x83 - 0.0180784*m.x84 + 0.00127079*m.x85
- 0.00331643*m.x86 - 0.0107273*m.x87 - 6.72321E-5*m.x88 + 0.0019753*m.x89 - 0.00561942*m.x90
- 0.0137411*m.x91 + 0.0266953*m.x92 + 0.0039322*m.x93 + 0.0312023*m.x94 + 0.00475029*m.x95
+ 0.00458043*m.x96 - 0.0111713*m.x97 + 0.00233202*m.x98 + 0.00279105*m.x99 + 0.00588268*m.x100
+ 0.0171354*m.x101 == 0)
m.c56 = Constraint(expr= - m.x3 + 0.00776152*m.x52 + 0.305432*m.x53 + 0.0022503*m.x54 + 0.0131826*m.x55 + 0.013322*m.x56
+ 0.0622902*m.x57 + 0.00612167*m.x58 + 0.00797614*m.x59 + 0.00886071*m.x60 - 0.0285042*m.x61
+ 0.003025*m.x62 + 0.0159085*m.x63 - 0.00357187*m.x64 + 0.0016128*m.x65 + 0.012642*m.x66
+ 0.119815*m.x67 + 0.00505566*m.x68 + 0.0131274*m.x69 + 0.00269972*m.x70 + 0.00899326*m.x71
+ 0.0193615*m.x72 + 0.114117*m.x73 + 0.0118212*m.x74 + 0.00695719*m.x75 - 0.00146012*m.x76
- 0.00455327*m.x77 - 0.00233478*m.x78 - 0.00354018*m.x79 - 0.0108257*m.x80 + 0.00548427*m.x81
+ 0.00843954*m.x82 + 0.0957415*m.x83 + 0.0724208*m.x84 + 0.00920314*m.x85 - 0.00921773*m.x86
+ 0.0112775*m.x87 + 0.010577*m.x88 - 0.00268772*m.x89 + 0.0104329*m.x90 - 0.00184253*m.x91
+ 0.0230614*m.x92 + 0.0797692*m.x93 - 0.00718849*m.x94 + 0.00668562*m.x95 - 0.00479877*m.x96
+ 0.037467*m.x97 - 0.000833339*m.x98 - 0.00287641*m.x99 - 0.00540049*m.x100 + 0.0133618*m.x101
== 0)
m.c57 = Constraint(expr= - m.x4 + 0.00831088*m.x52 + 0.0022503*m.x53 + 0.179315*m.x54 + 0.0238256*m.x55
- 0.00566425*m.x56 - 0.0137602*m.x57 + 0.00878864*m.x58 + 0.0166554*m.x59 + 0.0152274*m.x60
- 0.0193213*m.x61 + 0.0171146*m.x62 + 0.0117301*m.x63 + 0.0108599*m.x64 + 0.011655*m.x65
- 0.00502711*m.x66 + 0.011192*m.x67 + 0.0247138*m.x68 + 0.00188025*m.x69 + 0.00635281*m.x70
+ 0.0217042*m.x71 + 0.0189843*m.x72 - 0.00893642*m.x73 + 0.020493*m.x74 + 0.0060982*m.x75
+ 0.00709161*m.x76 + 0.0192029*m.x77 + 0.00489188*m.x78 + 0.0141398*m.x79 + 0.0183881*m.x80
+ 0.0132555*m.x81 + 0.0089825*m.x82 - 0.00433095*m.x83 + 0.000368443*m.x84 + 0.00845006*m.x85
+ 0.0106863*m.x86 + 0.0165343*m.x87 + 0.0182906*m.x88 + 0.000474699*m.x89 + 0.0125524*m.x90
+ 0.00998269*m.x91 + 0.00663781*m.x92 - 0.00941355*m.x93 + 0.0166904*m.x94 + 0.00602889*m.x95
+ 0.00224387*m.x96 - 0.00806098*m.x97 + 0.0151626*m.x98 - 0.000965771*m.x99 + 0.0157379*m.x100
+ 0.0187837*m.x101 == 0)
m.c58 = Constraint(expr= - m.x5 - 0.00522971*m.x52 + 0.0131826*m.x53 + 0.0238256*m.x54 + 0.220297*m.x55
+ 0.0243861*m.x56 - 0.00430317*m.x57 + 0.0174604*m.x58 + 0.00681665*m.x59 + 0.0242063*m.x60
+ 0.00144938*m.x61 + 0.015222*m.x62 + 0.014716*m.x63 + 0.00177302*m.x64 + 0.0176392*m.x65
+ 0.021276*m.x66 + 0.00889693*m.x67 + 0.00407666*m.x68 + 0.00949954*m.x69 + 0.00937267*m.x70
+ 0.0242093*m.x71 + 0.00460206*m.x72 - 0.00745268*m.x73 + 0.0160821*m.x74 + 0.00240536*m.x75
+ 0.0042418*m.x76 + 0.00264811*m.x77 + 0.00832847*m.x78 + 0.0040175*m.x79 + 0.0153818*m.x80
+ 0.0182359*m.x81 + 0.00961571*m.x82 + 0.0122098*m.x83 - 0.000558226*m.x84 + 0.0179991*m.x85
+ 0.0126379*m.x86 + 0.0175827*m.x87 + 0.00566779*m.x88 - 0.000955585*m.x89 + 0.0234718*m.x90
- 0.00128625*m.x91 + 0.00397589*m.x92 + 0.00253364*m.x93 + 0.0161477*m.x94 + 0.0163612*m.x95
+ 0.012804*m.x96 + 0.0254602*m.x97 + 0.0164285*m.x98 + 0.0113336*m.x99 + 0.00992279*m.x100
+ 0.00909239*m.x101 == 0)
m.c59 = Constraint(expr= - m.x6 + 0.015015*m.x52 + 0.013322*m.x53 - 0.00566425*m.x54 + 0.0243861*m.x55 + 0.404084*m.x56
+ 0.058688*m.x57 + 0.0144003*m.x58 + 0.0371145*m.x59 + 0.0227472*m.x60 + 0.0120821*m.x61
+ 0.00730434*m.x62 + 0.0238735*m.x63 + 0.00933373*m.x64 + 0.0051169*m.x65 + 0.0488881*m.x66
+ 0.0227134*m.x67 + 0.00590284*m.x68 + 0.0335068*m.x69 + 0.0167733*m.x70 + 0.044455*m.x71
+ 0.069787*m.x72 + 0.040347*m.x73 + 0.039664*m.x74 + 0.0102778*m.x75 + 0.0172657*m.x76
+ 0.00473961*m.x77 + 0.0132399*m.x78 - 0.0118559*m.x79 + 0.0329745*m.x80 + 0.00776731*m.x81
+ 0.00146596*m.x82 + 0.0398038*m.x83 + 0.0268424*m.x84 + 0.0120171*m.x85 + 0.0145295*m.x86
+ 0.0354297*m.x87 - 0.00170776*m.x88 + 0.0255113*m.x89 + 0.0115797*m.x90 + 0.0340249*m.x91
+ 0.00175196*m.x92 + 0.0214384*m.x93 + 0.0113414*m.x94 + 0.039091*m.x95 + 0.00619763*m.x96
+ 0.0133319*m.x97 + 0.0121082*m.x98 + 0.0357203*m.x99 + 0.0381607*m.x100 + 0.0203578*m.x101
== 0)
m.c60 = Constraint(expr= - m.x7 - 0.0107741*m.x52 + 0.0622902*m.x53 - 0.0137602*m.x54 - 0.00430317*m.x55
+ 0.058688*m.x56 + 0.452644*m.x57 + 0.0193845*m.x58 + 0.0341649*m.x59 + 0.00602161*m.x60
+ 0.0583255*m.x61 - 0.00423459*m.x62 + 0.016241*m.x63 + 0.0157118*m.x64 - 0.00370551*m.x65
+ 0.0511023*m.x66 + 0.148921*m.x67 + 0.0156037*m.x68 + 0.0155171*m.x69 + 0.0112086*m.x70
+ 0.030702*m.x71 + 0.0216234*m.x72 + 0.105953*m.x73 + 0.0128583*m.x74 + 0.00399753*m.x75
+ 0.0184167*m.x76 + 0.010492*m.x77 + 0.0244629*m.x78 + 0.047228*m.x79 + 0.00547127*m.x80
+ 0.0133769*m.x81 + 0.0119332*m.x82 + 0.161483*m.x83 + 0.187982*m.x84 + 0.00916881*m.x85
+ 0.0209491*m.x86 + 0.0327261*m.x87 + 0.028455*m.x88 + 0.0105724*m.x89 + 0.0238296*m.x90
- 0.00223337*m.x91 + 0.0230382*m.x92 + 0.112083*m.x93 + 0.00257709*m.x94 - 0.0088657*m.x95
+ 0.0101284*m.x96 + 0.0087194*m.x97 + 0.016345*m.x98 + 0.0145296*m.x99 + 0.00606395*m.x100
+ 0.00747571*m.x101 == 0)
m.c61 = Constraint(expr= - m.x8 - 0.00662896*m.x52 + 0.00612167*m.x53 + 0.00878864*m.x54 + 0.0174604*m.x55
+ 0.0144003*m.x56 + 0.0193845*m.x57 + 0.28381*m.x58 + 0.0129912*m.x59 + 0.00711013*m.x60
+ 0.023726*m.x61 + 0.0135222*m.x62 + 0.00245137*m.x63 + 0.0139941*m.x64 + 0.0146659*m.x65
- 0.000316803*m.x66 + 0.0195659*m.x67 + 0.0130298*m.x68 + 0.0143949*m.x69 - 0.0152357*m.x70
+ 0.0229109*m.x71 + 0.0178969*m.x72 + 0.00747729*m.x73 + 0.0262*m.x74 + 0.0176229*m.x75
+ 0.0184672*m.x76 + 0.00333289*m.x77 + 0.0125282*m.x78 + 0.0160426*m.x79 - 0.00910903*m.x80
+ 0.0168617*m.x81 + 0.00649361*m.x82 + 0.000720061*m.x83 + 0.0015496*m.x84 + 0.0120757*m.x85
+ 0.0231367*m.x86 + 0.0160891*m.x87 + 0.000127307*m.x88 + 0.00590674*m.x89 + 0.0251974*m.x90
+ 0.0109883*m.x91 + 0.0197048*m.x92 + 0.00281047*m.x93 + 0.0113665*m.x94 + 0.0128475*m.x95
+ 0.00622782*m.x96 + 0.0245605*m.x97 + 0.00706149*m.x98 + 0.00272192*m.x99 + 0.00300911*m.x100
+ 0.0133916*m.x101 == 0)
m.c62 = Constraint(expr= - m.x9 - 0.00824877*m.x52 + 0.00797614*m.x53 + 0.0166554*m.x54 + 0.00681665*m.x55
+ 0.0371145*m.x56 + 0.0341649*m.x57 + 0.0129912*m.x58 + 0.189607*m.x59 + 0.0210316*m.x60
+ 0.00633527*m.x61 + 0.00869335*m.x62 + 0.031581*m.x63 - 0.00230763*m.x64 + 0.00682721*m.x65
+ 0.0158862*m.x66 + 0.016982*m.x67 + 0.0111502*m.x68 + 0.0375819*m.x69 + 0.0223572*m.x70
+ 0.0434772*m.x71 + 0.0304477*m.x72 + 0.00554913*m.x73 + 0.0268377*m.x74 + 0.00229807*m.x75
+ 0.01809*m.x76 + 0.0114054*m.x77 + 0.0148192*m.x78 + 0.0286969*m.x79 + 0.0156643*m.x80
+ 0.0214673*m.x81 + 0.00423722*m.x82 + 0.0101393*m.x83 + 0.00438509*m.x84 + 0.0186319*m.x85
+ 0.046181*m.x86 + 0.0332107*m.x87 + 0.0160758*m.x88 + 0.00541803*m.x89 + 0.0243196*m.x90
+ 0.0145438*m.x91 + 0.00473001*m.x92 + 0.00681241*m.x93 + 0.00988793*m.x94 + 0.0149668*m.x95
+ 0.023562*m.x96 + 0.0173729*m.x97 + 0.016267*m.x98 + 0.0121424*m.x99 - 0.00299957*m.x100
+ 0.00907044*m.x101 == 0)
m.c63 = Constraint(expr= - m.x10 + 0.00953726*m.x52 + 0.00886071*m.x53 + 0.0152274*m.x54 + 0.0242063*m.x55
+ 0.0227472*m.x56 + 0.00602161*m.x57 + 0.00711013*m.x58 + 0.0210316*m.x59 + 0.186866*m.x60
+ 0.00832283*m.x61 + 0.0180258*m.x62 + 0.0154265*m.x63 + 0.0114402*m.x64 + 0.0209618*m.x65
+ 0.0173064*m.x66 - 0.000705565*m.x67 + 0.0143527*m.x68 + 0.0248206*m.x69 + 0.0181781*m.x70
+ 0.0279005*m.x71 + 0.0285813*m.x72 + 0.00289351*m.x73 + 0.0153119*m.x74 + 0.00890117*m.x75
+ 0.0222796*m.x76 + 0.0442301*m.x77 + 0.0119004*m.x78 + 0.00720201*m.x79 + 0.0201433*m.x80
+ 0.0169933*m.x81 + 0.019457*m.x82 + 0.0111733*m.x83 + 0.00689119*m.x84 + 0.00669496*m.x85
+ 0.0331297*m.x86 + 0.0197397*m.x87 + 0.0120744*m.x88 + 0.0127905*m.x89 + 0.0406861*m.x90
+ 0.0323148*m.x91 + 0.0200869*m.x92 + 0.00172542*m.x93 + 0.0311244*m.x94 + 0.00519737*m.x95
+ 0.0142684*m.x96 + 0.0178041*m.x97 + 0.00992985*m.x98 + 0.0146222*m.x99 + 0.00920343*m.x100
+ 0.0199828*m.x101 == 0)
m.c64 = Constraint(expr= - m.x11 - 0.0162102*m.x52 - 0.0285042*m.x53 - 0.0193213*m.x54 + 0.00144938*m.x55
+ 0.0120821*m.x56 + 0.0583255*m.x57 + 0.023726*m.x58 + 0.00633527*m.x59 + 0.00832283*m.x60
+ 0.63428*m.x61 - 0.00280448*m.x62 - 0.00545788*m.x63 - 0.00396523*m.x64 - 0.0183861*m.x65
+ 0.0180971*m.x66 + 0.00513145*m.x67 + 0.00613144*m.x68 - 0.0110514*m.x69 + 0.0194917*m.x70
+ 0.00495793*m.x71 + 0.0244718*m.x72 + 0.00915034*m.x73 - 0.000197643*m.x74 - 0.00657968*m.x75
- 0.00738206*m.x76 + 0.0105229*m.x77 - 0.0124412*m.x78 - 0.00440667*m.x79 + 0.0123441*m.x80
+ 0.00670955*m.x81 + 0.000975768*m.x82 + 0.0409171*m.x83 - 0.0110323*m.x84 - 0.00482281*m.x85
- 0.00546107*m.x86 - 0.0142879*m.x87 + 0.018699*m.x88 + 0.0440906*m.x89 - 0.00363253*m.x90
+ 0.00273765*m.x91 + 0.00673168*m.x92 + 0.0033605*m.x93 + 0.0241296*m.x94 - 0.00441557*m.x95
- 0.00703875*m.x96 + 0.016325*m.x97 + 0.00222896*m.x98 - 0.0077883*m.x99 - 0.00313691*m.x100
+ 0.0264584*m.x101 == 0)
m.c65 = Constraint(expr= - m.x12 + 0.06876*m.x52 + 0.003025*m.x53 + 0.0171146*m.x54 + 0.015222*m.x55 + 0.00730434*m.x56
- 0.00423459*m.x57 + 0.0135222*m.x58 + 0.00869335*m.x59 + 0.0180258*m.x60 - 0.00280448*m.x61
+ 0.316413*m.x62 + 0.0323352*m.x63 - 0.00236891*m.x64 + 0.00787061*m.x65 + 0.0149546*m.x66
+ 0.0036316*m.x67 - 0.0116267*m.x68 + 0.032345*m.x69 - 0.000144027*m.x70 - 0.00218381*m.x71
+ 0.00530167*m.x72 + 0.000497945*m.x73 + 0.0156557*m.x74 + 0.0127479*m.x75 + 0.0111445*m.x76
+ 0.0085222*m.x77 - 0.00157042*m.x78 + 0.00905753*m.x79 - 0.00402737*m.x80 + 0.00937755*m.x81
+ 0.00827346*m.x82 + 0.00543371*m.x83 + 0.0230998*m.x84 + 0.0238731*m.x85 + 0.0199311*m.x86
+ 0.0174054*m.x87 + 0.00185204*m.x88 + 0.0156839*m.x89 + 0.00443354*m.x90 + 0.0202129*m.x91
+ 0.0114171*m.x92 + 0.00122747*m.x93 + 0.0118384*m.x94 + 0.0228483*m.x95 + 0.0131884*m.x96
- 0.0151598*m.x97 + 0.00844519*m.x98 + 0.0198609*m.x99 + 0.0242712*m.x100 + 0.0138048*m.x101
== 0)
m.c66 = Constraint(expr= - m.x13 + 0.0307553*m.x52 + 0.0159085*m.x53 + 0.0117301*m.x54 + 0.014716*m.x55
+ 0.0238735*m.x56 + 0.016241*m.x57 + 0.00245137*m.x58 + 0.031581*m.x59 + 0.0154265*m.x60
- 0.00545788*m.x61 + 0.0323352*m.x62 + 0.187022*m.x63 + 0.00222855*m.x64 + 0.00747903*m.x65
+ 0.0223879*m.x66 + 0.0408618*m.x67 + 0.00998685*m.x68 + 0.0255*m.x69 + 0.0234902*m.x70
+ 0.0410056*m.x71 + 0.0457515*m.x72 + 0.0404933*m.x73 + 0.0173727*m.x74 + 0.0186957*m.x75
+ 0.0206278*m.x76 + 0.0197312*m.x77 + 0.0258626*m.x78 + 0.0281149*m.x79 + 0.020796*m.x80
+ 0.0154147*m.x81 + 0.00821687*m.x82 + 0.0277493*m.x83 + 0.0231334*m.x84 + 0.0242186*m.x85
+ 0.0562299*m.x86 + 0.0315629*m.x87 + 0.0122553*m.x88 + 0.0146058*m.x89 + 0.0225422*m.x90
+ 0.0126094*m.x91 + 0.0195556*m.x92 + 0.0148528*m.x93 + 0.016949*m.x94 + 0.0309886*m.x95
+ 0.0111695*m.x96 + 0.023004*m.x97 + 0.00865625*m.x98 + 0.0218181*m.x99 + 0.0268327*m.x100
+ 0.0203605*m.x101 == 0)
m.c67 = Constraint(expr= - m.x14 + 0.00493869*m.x52 - 0.00357187*m.x53 + 0.0108599*m.x54 + 0.00177302*m.x55
+ 0.00933373*m.x56 + 0.0157118*m.x57 + 0.0139941*m.x58 - 0.00230763*m.x59 + 0.0114402*m.x60
- 0.00396523*m.x61 - 0.00236891*m.x62 + 0.00222855*m.x63 + 0.221194*m.x64 + 0.0104987*m.x65
+ 0.0399316*m.x66 - 0.000811365*m.x67 + 0.00762929*m.x68 - 0.0044099*m.x69 + 0.0198057*m.x70
+ 0.00234582*m.x71 - 0.0069834*m.x72 + 0.00152018*m.x73 - 0.00484524*m.x74 + 0.0034154*m.x75
- 0.0060451*m.x76 + 0.0102102*m.x77 + 0.019147*m.x78 + 0.00861968*m.x79 - 0.0013634*m.x80
+ 0.00686903*m.x81 + 0.0133687*m.x82 + 0.00136495*m.x83 + 0.00888952*m.x84 + 0.00809492*m.x85
+ 0.00573295*m.x86 + 0.00828577*m.x87 + 0.0152408*m.x88 + 0.0110413*m.x89 + 0.0069969*m.x90
+ 0.0053944*m.x91 + 0.0104813*m.x92 - 0.00694263*m.x93 + 0.0141714*m.x94 - 0.00184581*m.x95
+ 0.0147295*m.x96 - 0.00369236*m.x97 + 0.00526228*m.x98 + 0.00828497*m.x99 - 0.0189632*m.x100
+ 0.0101028*m.x101 == 0)
m.c68 = Constraint(expr= - m.x15 + 0.00905031*m.x52 + 0.0016128*m.x53 + 0.011655*m.x54 + 0.0176392*m.x55
+ 0.0051169*m.x56 - 0.00370551*m.x57 + 0.0146659*m.x58 + 0.00682721*m.x59 + 0.0209618*m.x60
- 0.0183861*m.x61 + 0.00787061*m.x62 + 0.00747903*m.x63 + 0.0104987*m.x64 + 0.172607*m.x65
+ 0.010781*m.x66 + 0.0114342*m.x67 + 0.00907137*m.x68 + 0.0104462*m.x69 + 0.0151955*m.x70
+ 0.00458498*m.x71 + 0.0183508*m.x72 - 0.0158535*m.x73 + 0.0070277*m.x74 + 0.00809957*m.x75
+ 0.0120566*m.x76 + 0.0156797*m.x77 + 0.019146*m.x78 + 0.0230557*m.x79 + 0.00625971*m.x80
+ 0.0154784*m.x81 + 0.0113709*m.x82 - 0.00207874*m.x83 - 0.00747722*m.x84 + 0.00726553*m.x85
+ 0.037832*m.x86 + 0.0123555*m.x87 - 0.000156492*m.x88 + 0.0119264*m.x89 + 0.0124128*m.x90
+ 0.0206051*m.x91 + 0.0182519*m.x92 - 0.0063393*m.x93 + 0.0162264*m.x94 + 0.0114734*m.x95
+ 0.0298746*m.x96 + 0.00393739*m.x97 + 0.0153743*m.x98 + 0.00989917*m.x99 + 0.0228823*m.x100
+ 0.017772*m.x101 == 0)
m.c69 = Constraint(expr= - m.x16 + 0.00428006*m.x52 + 0.012642*m.x53 - 0.00502711*m.x54 + 0.021276*m.x55
+ 0.0488881*m.x56 + 0.0511023*m.x57 - 0.000316803*m.x58 + 0.0158862*m.x59 + 0.0173064*m.x60
+ 0.0180971*m.x61 + 0.0149546*m.x62 + 0.0223879*m.x63 + 0.0399316*m.x64 + 0.010781*m.x65
+ 0.30953*m.x66 + 0.0123346*m.x67 - 0.00454343*m.x68 + 0.00554417*m.x69 + 0.0322368*m.x70
+ 0.0122026*m.x71 + 0.0154661*m.x72 + 0.0109601*m.x73 + 0.0128077*m.x74 + 0.00710322*m.x75
+ 0.0100525*m.x76 + 0.0141544*m.x77 - 0.00302889*m.x78 + 0.0202446*m.x79 + 0.0273331*m.x80
+ 0.0142628*m.x81 + 0.0130754*m.x82 + 0.00886564*m.x83 + 0.0125267*m.x84 + 0.00167144*m.x85
+ 0.0368131*m.x86 + 0.0135909*m.x87 - 0.000550234*m.x88 + 0.0369853*m.x89 + 0.00970355*m.x90
+ 0.0253109*m.x91 + 0.01371*m.x92 + 0.0151066*m.x93 + 0.0201164*m.x94 + 0.0193544*m.x95
+ 0.0166079*m.x96 + 0.0113423*m.x97 + 0.0488179*m.x98 + 0.016393*m.x99 - 0.00100315*m.x100
+ 0.0101386*m.x101 == 0)
m.c70 = Constraint(expr= - m.x17 + 0.0159505*m.x52 + 0.119815*m.x53 + 0.011192*m.x54 + 0.00889693*m.x55
+ 0.0227134*m.x56 + 0.148921*m.x57 + 0.0195659*m.x58 + 0.016982*m.x59 - 0.000705565*m.x60
+ 0.00513145*m.x61 + 0.0036316*m.x62 + 0.0408618*m.x63 - 0.000811365*m.x64 + 0.0114342*m.x65
+ 0.0123346*m.x66 + 0.506241*m.x67 + 0.025301*m.x68 + 0.0356088*m.x69 + 0.0108864*m.x70
+ 0.0190276*m.x71 + 0.0288312*m.x72 + 0.12559*m.x73 + 0.0213959*m.x74 + 0.0275661*m.x75
+ 0.0260354*m.x76 + 0.00490195*m.x77 - 8.95127E-5*m.x78 + 0.0278101*m.x79 + 0.0154943*m.x80
+ 0.0110009*m.x81 + 0.0209885*m.x82 + 0.129895*m.x83 + 0.104593*m.x84 + 0.0164835*m.x85
+ 0.0238469*m.x86 + 0.0319592*m.x87 + 0.016159*m.x88 - 0.00048612*m.x89 + 0.0206697*m.x90
- 0.0044719*m.x91 + 0.0412523*m.x92 + 0.150222*m.x93 + 0.0060731*m.x94 + 0.00469106*m.x95
+ 0.032667*m.x96 + 0.00513266*m.x97 + 0.00884207*m.x98 + 0.0125003*m.x99 - 0.00578404*m.x100
+ 0.0225237*m.x101 == 0)
m.c71 = Constraint(expr= - m.x18 + 0.0372772*m.x52 + 0.00505566*m.x53 + 0.0247138*m.x54 + 0.00407666*m.x55
+ 0.00590284*m.x56 + 0.0156037*m.x57 + 0.0130298*m.x58 + 0.0111502*m.x59 + 0.0143527*m.x60
+ 0.00613144*m.x61 - 0.0116267*m.x62 + 0.00998685*m.x63 + 0.00762929*m.x64 + 0.00907137*m.x65
- 0.00454343*m.x66 + 0.025301*m.x67 + 0.272867*m.x68 + 0.013367*m.x69 + 0.0153675*m.x70
+ 0.0202051*m.x71 + 0.0334085*m.x72 + 0.0195246*m.x73 + 0.0119803*m.x74 + 0.0131243*m.x75
+ 0.009587*m.x76 + 0.00326145*m.x77 + 0.0055836*m.x78 + 0.0160137*m.x79 - 0.00700837*m.x80
+ 0.00816694*m.x81 + 0.0133907*m.x82 + 0.00598212*m.x83 - 0.00201041*m.x84 + 0.0153712*m.x85
+ 0.00839091*m.x86 + 0.00597115*m.x87 - 0.000508298*m.x88 - 0.00265155*m.x89 + 0.0148232*m.x90
- 0.000660928*m.x91 + 0.0219128*m.x92 + 0.0200429*m.x93 + 0.00803816*m.x94 + 0.0174527*m.x95
+ 0.00328568*m.x96 + 0.00580133*m.x97 - 0.000537323*m.x98 + 0.0127107*m.x99 + 0.0134156*m.x100
+ 0.00882735*m.x101 == 0)
m.c72 = Constraint(expr= - m.x19 + 0.00356282*m.x52 + 0.0131274*m.x53 + 0.00188025*m.x54 + 0.00949954*m.x55
+ 0.0335068*m.x56 + 0.0155171*m.x57 + 0.0143949*m.x58 + 0.0375819*m.x59 + 0.0248206*m.x60
- 0.0110514*m.x61 + 0.032345*m.x62 + 0.0255*m.x63 - 0.0044099*m.x64 + 0.0104462*m.x65
+ 0.00554417*m.x66 + 0.0356088*m.x67 + 0.013367*m.x68 + 0.243112*m.x69 + 0.00434594*m.x70
+ 0.057792*m.x71 + 0.0294945*m.x72 + 0.030868*m.x73 + 0.0219596*m.x74 + 0.00928365*m.x75
+ 0.0279232*m.x76 + 0.0138525*m.x77 + 0.0582128*m.x78 + 0.0225874*m.x79 + 0.0216165*m.x80
+ 0.0188341*m.x81 + 0.0113276*m.x82 + 0.0272881*m.x83 + 0.0118425*m.x84 + 0.0244022*m.x85
+ 0.0305204*m.x86 + 0.0378227*m.x87 + 0.00150342*m.x88 + 0.000336096*m.x89 + 0.0330899*m.x90
+ 0.0189859*m.x91 + 0.0161305*m.x92 + 0.00657093*m.x93 + 0.0118269*m.x94 + 0.0262376*m.x95
+ 0.0229703*m.x96 + 0.0245122*m.x97 + 0.00497315*m.x98 + 0.0222552*m.x99 + 0.00180371*m.x100
+ 0.00323067*m.x101 == 0)
m.c73 = Constraint(expr= - m.x20 + 0.0102555*m.x52 + 0.00269972*m.x53 + 0.00635281*m.x54 + 0.00937267*m.x55
+ 0.0167733*m.x56 + 0.0112086*m.x57 - 0.0152357*m.x58 + 0.0223572*m.x59 + 0.0181781*m.x60
+ 0.0194917*m.x61 - 0.000144027*m.x62 + 0.0234902*m.x63 + 0.0198057*m.x64 + 0.0151955*m.x65
+ 0.0322368*m.x66 + 0.0108864*m.x67 + 0.0153675*m.x68 + 0.00434594*m.x69 + 0.486402*m.x70
+ 0.0205735*m.x71 + 0.0176842*m.x72 + 0.016224*m.x73 + 0.029091*m.x74 + 0.0174387*m.x75
+ 0.0237535*m.x76 + 0.0139083*m.x77 + 0.0112918*m.x78 + 0.0315031*m.x79 + 0.0104372*m.x80
+ 0.0253639*m.x81 + 0.00237959*m.x82 - 0.00567431*m.x83 + 0.0125939*m.x84 + 0.0195843*m.x85
+ 0.0768331*m.x86 + 0.0267106*m.x87 + 0.00312045*m.x88 + 0.00720686*m.x89 + 0.0261195*m.x90
+ 0.0295481*m.x91 - 0.00121588*m.x92 + 0.00174197*m.x93 + 0.000971523*m.x94 + 0.016521*m.x95
+ 0.0242338*m.x96 + 0.0387835*m.x97 + 0.0249114*m.x98 + 0.0106646*m.x99 - 0.0157855*m.x100
+ 0.0165385*m.x101 == 0)
m.c74 = Constraint(expr= - m.x21 - 0.0161653*m.x52 + 0.00899326*m.x53 + 0.0217042*m.x54 + 0.0242093*m.x55
+ 0.044455*m.x56 + 0.030702*m.x57 + 0.0229109*m.x58 + 0.0434772*m.x59 + 0.0279005*m.x60
+ 0.00495793*m.x61 - 0.00218381*m.x62 + 0.0410056*m.x63 + 0.00234582*m.x64 + 0.00458498*m.x65
+ 0.0122026*m.x66 + 0.0190276*m.x67 + 0.0202051*m.x68 + 0.057792*m.x69 + 0.0205735*m.x70
+ 0.30309*m.x71 + 0.0477266*m.x72 + 0.0307124*m.x73 + 0.0320937*m.x74 + 0.00895684*m.x75
+ 0.0261585*m.x76 + 0.0224334*m.x77 + 0.0281506*m.x78 + 0.0324489*m.x79 + 0.0266137*m.x80
+ 0.0183526*m.x81 - 0.0016676*m.x82 + 0.0194921*m.x83 + 0.0366494*m.x84 + 0.0166731*m.x85
+ 0.0415684*m.x86 + 0.0425512*m.x87 + 0.0185632*m.x88 + 0.0150068*m.x89 + 0.0206301*m.x90
+ 0.00808519*m.x91 - 0.00805047*m.x92 + 0.0108192*m.x93 + 0.01367*m.x94 + 0.0348135*m.x95
+ 0.0320515*m.x96 + 0.0132639*m.x97 - 0.00327629*m.x98 + 0.0267494*m.x99 + 0.0178498*m.x100
+ 0.0295494*m.x101 == 0)
m.c75 = Constraint(expr= - m.x22 - 0.00678775*m.x52 + 0.0193615*m.x53 + 0.0189843*m.x54 + 0.00460206*m.x55
+ 0.069787*m.x56 + 0.0216234*m.x57 + 0.0178969*m.x58 + 0.0304477*m.x59 + 0.0285813*m.x60
+ 0.0244718*m.x61 + 0.00530167*m.x62 + 0.0457515*m.x63 - 0.0069834*m.x64 + 0.0183508*m.x65
+ 0.0154661*m.x66 + 0.0288312*m.x67 + 0.0334085*m.x68 + 0.0294945*m.x69 + 0.0176842*m.x70
+ 0.0477266*m.x71 + 0.574196*m.x72 + 0.0396485*m.x73 + 0.0302363*m.x74 + 0.0130538*m.x75
+ 0.02932*m.x76 + 0.0266188*m.x77 + 0.0279647*m.x78 + 0.0180419*m.x79 + 0.0293269*m.x80
+ 0.02223*m.x81 + 0.00413185*m.x82 + 0.0241439*m.x83 + 0.0263683*m.x84 - 0.0132754*m.x85
+ 0.0388595*m.x86 + 0.0578838*m.x87 + 0.00722557*m.x88 + 0.0210916*m.x89 + 0.0335768*m.x90
- 0.00914657*m.x91 + 0.0153621*m.x92 + 0.0170669*m.x93 + 0.00771841*m.x94 + 0.0161467*m.x95
+ 0.0470226*m.x96 + 0.0696792*m.x97 + 0.00688465*m.x98 + 0.0406248*m.x99 - 0.00265226*m.x100
+ 0.0216914*m.x101 == 0)
m.c76 = Constraint(expr= - m.x23 - 0.000991393*m.x52 + 0.114117*m.x53 - 0.00893642*m.x54 - 0.00745268*m.x55
+ 0.040347*m.x56 + 0.105953*m.x57 + 0.00747729*m.x58 + 0.00554913*m.x59 + 0.00289351*m.x60
+ 0.00915034*m.x61 + 0.000497945*m.x62 + 0.0404933*m.x63 + 0.00152018*m.x64 - 0.0158535*m.x65
+ 0.0109601*m.x66 + 0.12559*m.x67 + 0.0195246*m.x68 + 0.030868*m.x69 + 0.016224*m.x70
+ 0.0307124*m.x71 + 0.0396485*m.x72 + 0.567664*m.x73 + 0.0167088*m.x74 + 0.00851376*m.x75
+ 0.0194063*m.x76 - 0.00258911*m.x77 + 0.000352563*m.x78 + 0.0170447*m.x79 + 0.00326757*m.x80
+ 0.0111415*m.x81 + 0.0158008*m.x82 + 0.10889*m.x83 + 0.116075*m.x84 + 0.0169971*m.x85
+ 0.0341233*m.x86 + 0.0267429*m.x87 - 0.0114268*m.x88 - 0.00234199*m.x89 + 0.0350183*m.x90
- 0.00327782*m.x91 + 0.0234788*m.x92 + 0.0976326*m.x93 + 0.000202835*m.x94 + 0.00567421*m.x95
+ 0.0334415*m.x96 + 0.0182382*m.x97 - 0.00355687*m.x98 + 0.0188454*m.x99 + 0.0261119*m.x100
+ 0.0236217*m.x101 == 0)
m.c77 = Constraint(expr= - m.x24 + 0.0104307*m.x52 + 0.0118212*m.x53 + 0.020493*m.x54 + 0.0160821*m.x55 + 0.039664*m.x56
+ 0.0128583*m.x57 + 0.0262*m.x58 + 0.0268377*m.x59 + 0.0153119*m.x60 - 0.000197643*m.x61
+ 0.0156557*m.x62 + 0.0173727*m.x63 - 0.00484524*m.x64 + 0.0070277*m.x65 + 0.0128077*m.x66
+ 0.0213959*m.x67 + 0.0119803*m.x68 + 0.0219596*m.x69 + 0.029091*m.x70 + 0.0320937*m.x71
+ 0.0302363*m.x72 + 0.0167088*m.x73 + 0.227104*m.x74 + 0.0110539*m.x75 + 0.0685123*m.x76
+ 0.0166982*m.x77 + 0.00939654*m.x78 + 0.00636519*m.x79 + 0.0242445*m.x80 + 0.0724648*m.x81
+ 0.0194513*m.x82 + 0.00366476*m.x83 + 0.0134866*m.x84 + 0.00878361*m.x85 + 0.0269894*m.x86
+ 0.0281086*m.x87 + 0.00493919*m.x88 + 0.0265072*m.x89 + 0.0495917*m.x90 + 0.00899853*m.x91
+ 0.0191737*m.x92 + 0.0112022*m.x93 + 0.0106917*m.x94 + 0.0282436*m.x95 + 0.0119814*m.x96
+ 0.00852934*m.x97 + 0.0132486*m.x98 - 0.00483593*m.x99 + 0.00268557*m.x100 + 0.0264927*m.x101
== 0)
m.c78 = Constraint(expr= - m.x25 - 0.00554627*m.x52 + 0.00695719*m.x53 + 0.0060982*m.x54 + 0.00240536*m.x55
+ 0.0102778*m.x56 + 0.00399753*m.x57 + 0.0176229*m.x58 + 0.00229807*m.x59 + 0.00890117*m.x60
- 0.00657968*m.x61 + 0.0127479*m.x62 + 0.0186957*m.x63 + 0.0034154*m.x64 + 0.00809957*m.x65
+ 0.00710322*m.x66 + 0.0275661*m.x67 + 0.0131243*m.x68 + 0.00928365*m.x69 + 0.0174387*m.x70
+ 0.00895684*m.x71 + 0.0130538*m.x72 + 0.00851376*m.x73 + 0.0110539*m.x74 + 0.183511*m.x75
+ 0.00968069*m.x76 + 0.00777885*m.x77 + 0.00484151*m.x78 + 0.0120339*m.x79 + 0.0182045*m.x80
+ 0.0142639*m.x81 + 0.014134*m.x82 + 0.0123093*m.x83 + 0.00543117*m.x84 + 0.0065975*m.x85
+ 0.016776*m.x86 + 0.00170557*m.x87 + 0.0026933*m.x88 + 0.00792354*m.x89 + 0.00735961*m.x90
- 0.000614984*m.x91 + 0.0118767*m.x92 + 0.00947244*m.x93 + 0.00574257*m.x94 + 0.0110814*m.x95
+ 0.00174348*m.x96 + 0.00448876*m.x97 + 0.0220952*m.x98 + 0.0063483*m.x99 + 0.000150809*m.x100
+ 6.68242E-5*m.x101 == 0)
m.c79 = Constraint(expr= - m.x26 + 0.000275614*m.x52 - 0.00146012*m.x53 + 0.00709161*m.x54 + 0.0042418*m.x55
+ 0.0172657*m.x56 + 0.0184167*m.x57 + 0.0184672*m.x58 + 0.01809*m.x59 + 0.0222796*m.x60
- 0.00738206*m.x61 + 0.0111445*m.x62 + 0.0206278*m.x63 - 0.0060451*m.x64 + 0.0120566*m.x65
+ 0.0100525*m.x66 + 0.0260354*m.x67 + 0.009587*m.x68 + 0.0279232*m.x69 + 0.0237535*m.x70
+ 0.0261585*m.x71 + 0.02932*m.x72 + 0.0194063*m.x73 + 0.0685123*m.x74 + 0.00968069*m.x75
+ 0.190498*m.x76 + 0.0273631*m.x77 + 0.0144043*m.x78 + 0.00276303*m.x79 + 0.00422846*m.x80
+ 0.0638216*m.x81 + 0.017823*m.x82 + 0.0135183*m.x83 + 0.00365697*m.x84 - 0.000986928*m.x85
+ 0.0169049*m.x86 + 0.0266562*m.x87 + 0.00523559*m.x88 + 0.014168*m.x89 + 0.0413952*m.x90
+ 0.00776725*m.x91 + 0.0326211*m.x92 + 0.0119027*m.x93 + 0.011424*m.x94 + 0.015665*m.x95
+ 0.0129933*m.x96 + 0.0057329*m.x97 + 0.00863731*m.x98 + 0.00782909*m.x99 + 0.0385547*m.x100
+ 0.0147477*m.x101 == 0)
m.c80 = Constraint(expr= - m.x27 + 0.00146767*m.x52 - 0.00455327*m.x53 + 0.0192029*m.x54 + 0.00264811*m.x55
+ 0.00473961*m.x56 + 0.010492*m.x57 + 0.00333289*m.x58 + 0.0114054*m.x59 + 0.0442301*m.x60
+ 0.0105229*m.x61 + 0.0085222*m.x62 + 0.0197312*m.x63 + 0.0102102*m.x64 + 0.0156797*m.x65
+ 0.0141544*m.x66 + 0.00490195*m.x67 + 0.00326145*m.x68 + 0.0138525*m.x69 + 0.0139083*m.x70
+ 0.0224334*m.x71 + 0.0266188*m.x72 - 0.00258911*m.x73 + 0.0166982*m.x74 + 0.00777885*m.x75
+ 0.0273631*m.x76 + 0.14242*m.x77 + 0.0237243*m.x78 + 0.00294961*m.x79 + 0.0200953*m.x80
+ 0.0206276*m.x81 + 0.0230949*m.x82 + 0.00859757*m.x83 + 0.0169*m.x84 + 0.0129568*m.x85
+ 0.0262844*m.x86 + 0.0202602*m.x87 + 0.0135266*m.x88 + 0.0134485*m.x89 + 0.0259415*m.x90
+ 0.0189386*m.x91 + 0.0167553*m.x92 + 0.012156*m.x93 + 0.0312321*m.x94 + 0.0133677*m.x95
+ 0.0168904*m.x96 + 0.021903*m.x97 + 0.00904192*m.x98 + 0.00640522*m.x99 + 0.000393756*m.x100
+ 0.0123718*m.x101 == 0)
m.c81 = Constraint(expr= - m.x28 - 0.0219202*m.x52 - 0.00233478*m.x53 + 0.00489188*m.x54 + 0.00832847*m.x55
+ 0.0132399*m.x56 + 0.0244629*m.x57 + 0.0125282*m.x58 + 0.0148192*m.x59 + 0.0119004*m.x60
- 0.0124412*m.x61 - 0.00157042*m.x62 + 0.0258626*m.x63 + 0.019147*m.x64 + 0.019146*m.x65
- 0.00302889*m.x66 - 8.95127E-5*m.x67 + 0.0055836*m.x68 + 0.0582128*m.x69 + 0.0112918*m.x70
+ 0.0281506*m.x71 + 0.0279647*m.x72 + 0.000352563*m.x73 + 0.00939654*m.x74 + 0.00484151*m.x75
+ 0.0144043*m.x76 + 0.0237243*m.x77 + 0.507964*m.x78 + 0.0151067*m.x79 + 0.0166188*m.x80
+ 0.010503*m.x81 + 0.006312*m.x82 + 0.00351795*m.x83 + 0.0068205*m.x84 + 0.00479431*m.x85
+ 0.0145654*m.x86 + 0.033506*m.x87 + 0.00559812*m.x88 + 0.0126415*m.x89 + 0.0123446*m.x90
+ 0.028821*m.x91 + 0.00981253*m.x92 + 0.0284364*m.x93 + 0.0179957*m.x94 + 0.0240785*m.x95
+ 0.0203486*m.x96 + 0.0246958*m.x97 + 0.0301721*m.x98 + 0.00697773*m.x99 + 0.00248209*m.x100
- 0.00975878*m.x101 == 0)
m.c82 = Constraint(expr= - m.x29 - 0.0152471*m.x52 - 0.00354018*m.x53 + 0.0141398*m.x54 + 0.0040175*m.x55
- 0.0118559*m.x56 + 0.047228*m.x57 + 0.0160426*m.x58 + 0.0286969*m.x59 + 0.00720201*m.x60
- 0.00440667*m.x61 + 0.00905753*m.x62 + 0.0281149*m.x63 + 0.00861968*m.x64 + 0.0230557*m.x65
+ 0.0202446*m.x66 + 0.0278101*m.x67 + 0.0160137*m.x68 + 0.0225874*m.x69 + 0.0315031*m.x70
+ 0.0324489*m.x71 + 0.0180419*m.x72 + 0.0170447*m.x73 + 0.00636519*m.x74 + 0.0120339*m.x75
+ 0.00276303*m.x76 + 0.00294961*m.x77 + 0.0151067*m.x78 + 0.670433*m.x79 + 0.0205952*m.x80
+ 0.00444933*m.x81 + 0.0225512*m.x82 + 0.0465233*m.x83 + 0.0608492*m.x84 + 0.0358653*m.x85
+ 0.0417635*m.x86 - 0.00291679*m.x87 - 0.000317393*m.x88 + 0.0125595*m.x89 - 0.00116156*m.x90
- 0.00192373*m.x91 + 0.0114605*m.x92 + 0.0425365*m.x93 - 0.000808147*m.x94 + 0.00295518*m.x95
+ 0.0242798*m.x96 + 0.0107554*m.x97 + 0.0120875*m.x98 + 0.0292966*m.x99 - 0.00126318*m.x100
- 0.0099048*m.x101 == 0)
m.c83 = Constraint(expr= - m.x30 - 0.0133041*m.x52 - 0.0108257*m.x53 + 0.0183881*m.x54 + 0.0153818*m.x55
+ 0.0329745*m.x56 + 0.00547127*m.x57 - 0.00910903*m.x58 + 0.0156643*m.x59 + 0.0201433*m.x60
+ 0.0123441*m.x61 - 0.00402737*m.x62 + 0.020796*m.x63 - 0.0013634*m.x64 + 0.00625971*m.x65
+ 0.0273331*m.x66 + 0.0154943*m.x67 - 0.00700837*m.x68 + 0.0216165*m.x69 + 0.0104372*m.x70
+ 0.0266137*m.x71 + 0.0293269*m.x72 + 0.00326757*m.x73 + 0.0242445*m.x74 + 0.0182045*m.x75
+ 0.00422846*m.x76 + 0.0200953*m.x77 + 0.0166188*m.x78 + 0.0205952*m.x79 + 0.229224*m.x80
+ 0.0223216*m.x81 + 0.0206237*m.x82 + 0.0101265*m.x83 + 0.0015088*m.x84 + 0.0223314*m.x85
+ 0.0273206*m.x86 + 0.00161461*m.x87 + 0.00487681*m.x88 + 0.0183379*m.x89 + 0.0275921*m.x90
+ 0.0159442*m.x91 + 0.0134875*m.x92 + 0.0270417*m.x93 + 0.00200928*m.x94 + 0.0218467*m.x95
+ 0.00352069*m.x96 + 0.00446644*m.x97 + 0.0176237*m.x98 + 0.0279531*m.x99 + 0.0110346*m.x100
+ 0.00696769*m.x101 == 0)
m.c84 = Constraint(expr= - m.x31 + 0.00532027*m.x52 + 0.00548427*m.x53 + 0.0132555*m.x54 + 0.0182359*m.x55
+ 0.00776731*m.x56 + 0.0133769*m.x57 + 0.0168617*m.x58 + 0.0214673*m.x59 + 0.0169933*m.x60
+ 0.00670955*m.x61 + 0.00937755*m.x62 + 0.0154147*m.x63 + 0.00686903*m.x64 + 0.0154784*m.x65
+ 0.0142628*m.x66 + 0.0110009*m.x67 + 0.00816694*m.x68 + 0.0188341*m.x69 + 0.0253639*m.x70
+ 0.0183526*m.x71 + 0.02223*m.x72 + 0.0111415*m.x73 + 0.0724648*m.x74 + 0.0142639*m.x75
+ 0.0638216*m.x76 + 0.0206276*m.x77 + 0.010503*m.x78 + 0.00444933*m.x79 + 0.0223216*m.x80
+ 0.185075*m.x81 + 0.0205911*m.x82 + 0.0145088*m.x83 + 0.00876387*m.x84 + 0.0107778*m.x85
+ 0.014933*m.x86 + 0.0186524*m.x87 + 0.0106153*m.x88 + 0.044217*m.x89 + 0.0463482*m.x90
+ 0.019405*m.x91 + 0.0233399*m.x92 + 0.0136317*m.x93 + 0.0110294*m.x94 + 0.0119847*m.x95
+ 0.0293732*m.x96 - 0.00785039*m.x97 + 0.0195485*m.x98 + 0.00530393*m.x99 - 0.00585743*m.x100
+ 0.0197286*m.x101 == 0)
m.c85 = Constraint(expr= - m.x32 + 0.0190296*m.x52 + 0.00843954*m.x53 + 0.0089825*m.x54 + 0.00961571*m.x55
+ 0.00146596*m.x56 + 0.0119332*m.x57 + 0.00649361*m.x58 + 0.00423722*m.x59 + 0.019457*m.x60
+ 0.000975768*m.x61 + 0.00827346*m.x62 + 0.00821687*m.x63 + 0.0133687*m.x64 + 0.0113709*m.x65
+ 0.0130754*m.x66 + 0.0209885*m.x67 + 0.0133907*m.x68 + 0.0113276*m.x69 + 0.00237959*m.x70
- 0.0016676*m.x71 + 0.00413185*m.x72 + 0.0158008*m.x73 + 0.0194513*m.x74 + 0.014134*m.x75
+ 0.017823*m.x76 + 0.0230949*m.x77 + 0.006312*m.x78 + 0.0225512*m.x79 + 0.0206237*m.x80
+ 0.0205911*m.x81 + 0.147147*m.x82 + 0.0105685*m.x83 + 0.00474516*m.x84 + 0.0149866*m.x85
- 0.00374475*m.x86 + 0.0147657*m.x87 + 0.00370161*m.x88 - 0.00382518*m.x89 + 0.0112733*m.x90
+ 0.00898559*m.x91 + 0.047951*m.x92 + 0.00269973*m.x93 + 0.00305288*m.x94 + 0.00998711*m.x95
- 0.00599198*m.x96 + 0.00378519*m.x97 + 0.00228262*m.x98 + 0.000223223*m.x99 + 0.0131328*m.x100
+ 0.0100911*m.x101 == 0)
m.c86 = Constraint(expr= - m.x33 + 9.52152E-5*m.x52 + 0.0957415*m.x53 - 0.00433095*m.x54 + 0.0122098*m.x55
+ 0.0398038*m.x56 + 0.161483*m.x57 + 0.000720061*m.x58 + 0.0101393*m.x59 + 0.0111733*m.x60
+ 0.0409171*m.x61 + 0.00543371*m.x62 + 0.0277493*m.x63 + 0.00136495*m.x64 - 0.00207874*m.x65
+ 0.00886564*m.x66 + 0.129895*m.x67 + 0.00598212*m.x68 + 0.0272881*m.x69 - 0.00567431*m.x70
+ 0.0194921*m.x71 + 0.0241439*m.x72 + 0.10889*m.x73 + 0.00366476*m.x74 + 0.0123093*m.x75
+ 0.0135183*m.x76 + 0.00859757*m.x77 + 0.00351795*m.x78 + 0.0465233*m.x79 + 0.0101265*m.x80
+ 0.0145088*m.x81 + 0.0105685*m.x82 + 0.389649*m.x83 + 0.138762*m.x84 + 0.00825629*m.x85
+ 0.0181004*m.x86 + 0.0167077*m.x87 + 0.00722734*m.x88 - 0.00583878*m.x89 + 0.0232216*m.x90
+ 0.0168437*m.x91 + 0.0278419*m.x92 + 0.117531*m.x93 + 0.00545108*m.x94 + 0.007432*m.x95
+ 0.0161894*m.x96 + 0.0203409*m.x97 - 0.00640225*m.x98 + 0.00363753*m.x99 + 0.00102053*m.x100
+ 0.0252622*m.x101 == 0)
m.c87 = Constraint(expr= - m.x34 - 0.0180784*m.x52 + 0.0724208*m.x53 + 0.000368443*m.x54 - 0.000558226*m.x55
+ 0.0268424*m.x56 + 0.187982*m.x57 + 0.0015496*m.x58 + 0.00438509*m.x59 + 0.00689119*m.x60
- 0.0110323*m.x61 + 0.0230998*m.x62 + 0.0231334*m.x63 + 0.00888952*m.x64 - 0.00747722*m.x65
+ 0.0125267*m.x66 + 0.104593*m.x67 - 0.00201041*m.x68 + 0.0118425*m.x69 + 0.0125939*m.x70
+ 0.0366494*m.x71 + 0.0263683*m.x72 + 0.116075*m.x73 + 0.0134866*m.x74 + 0.00543117*m.x75
+ 0.00365697*m.x76 + 0.0169*m.x77 + 0.0068205*m.x78 + 0.0608492*m.x79 + 0.0015088*m.x80
+ 0.00876387*m.x81 + 0.00474516*m.x82 + 0.138762*m.x83 + 0.397419*m.x84 + 0.0108491*m.x85
- 0.00298466*m.x86 + 0.0247715*m.x87 + 0.0157939*m.x88 + 0.00640654*m.x89 + 0.0102405*m.x90
+ 0.0051056*m.x91 + 0.0145699*m.x92 + 0.0756527*m.x93 + 0.00684049*m.x94 - 0.000862575*m.x95
+ 0.00996209*m.x96 + 0.0282548*m.x97 + 0.0055526*m.x98 + 0.00924268*m.x99 + 0.00369864*m.x100
- 0.00445725*m.x101 == 0)
m.c88 = Constraint(expr= - m.x35 + 0.00127079*m.x52 + 0.00920314*m.x53 + 0.00845006*m.x54 + 0.0179991*m.x55
+ 0.0120171*m.x56 + 0.00916881*m.x57 + 0.0120757*m.x58 + 0.0186319*m.x59 + 0.00669496*m.x60
- 0.00482281*m.x61 + 0.0238731*m.x62 + 0.0242186*m.x63 + 0.00809492*m.x64 + 0.00726553*m.x65
+ 0.00167144*m.x66 + 0.0164835*m.x67 + 0.0153712*m.x68 + 0.0244022*m.x69 + 0.0195843*m.x70
+ 0.0166731*m.x71 - 0.0132754*m.x72 + 0.0169971*m.x73 + 0.00878361*m.x74 + 0.0065975*m.x75
- 0.000986928*m.x76 + 0.0129568*m.x77 + 0.00479431*m.x78 + 0.0358653*m.x79 + 0.0223314*m.x80
+ 0.0107778*m.x81 + 0.0149866*m.x82 + 0.00825629*m.x83 + 0.0108491*m.x84 + 0.312298*m.x85
+ 0.0120296*m.x86 + 0.0106859*m.x87 + 0.0204397*m.x88 + 0.0119026*m.x89 + 0.0319466*m.x90
+ 0.00664877*m.x91 + 0.00548571*m.x92 + 0.0048078*m.x93 + 0.0331056*m.x94 + 0.0274019*m.x95
+ 0.00104681*m.x96 + 0.011411*m.x97 - 0.00331677*m.x98 - 0.00425863*m.x99 + 0.0100274*m.x100
+ 0.00728145*m.x101 == 0)
m.c89 = Constraint(expr= - m.x36 - 0.00331643*m.x52 - 0.00921773*m.x53 + 0.0106863*m.x54 + 0.0126379*m.x55
+ 0.0145295*m.x56 + 0.0209491*m.x57 + 0.0231367*m.x58 + 0.046181*m.x59 + 0.0331297*m.x60
- 0.00546107*m.x61 + 0.0199311*m.x62 + 0.0562299*m.x63 + 0.00573295*m.x64 + 0.037832*m.x65
+ 0.0368131*m.x66 + 0.0238469*m.x67 + 0.00839091*m.x68 + 0.0305204*m.x69 + 0.0768331*m.x70
+ 0.0415684*m.x71 + 0.0388595*m.x72 + 0.0341233*m.x73 + 0.0269894*m.x74 + 0.016776*m.x75
+ 0.0169049*m.x76 + 0.0262844*m.x77 + 0.0145654*m.x78 + 0.0417635*m.x79 + 0.0273206*m.x80
+ 0.014933*m.x81 - 0.00374475*m.x82 + 0.0181004*m.x83 - 0.00298466*m.x84 + 0.0120296*m.x85
+ 0.618581*m.x86 + 0.0289636*m.x87 - 0.00446781*m.x88 + 0.0224213*m.x89 + 0.0380495*m.x90
+ 0.0386705*m.x91 + 0.0297938*m.x92 + 0.0058598*m.x93 + 0.0252835*m.x94 + 0.0145417*m.x95
+ 0.0665246*m.x96 + 0.00798604*m.x97 + 0.00560573*m.x98 + 0.0328297*m.x99 + 0.0235991*m.x100
+ 0.0470289*m.x101 == 0)
m.c90 = Constraint(expr= - m.x37 - 0.0107273*m.x52 + 0.0112775*m.x53 + 0.0165343*m.x54 + 0.0175827*m.x55
+ 0.0354297*m.x56 + 0.0327261*m.x57 + 0.0160891*m.x58 + 0.0332107*m.x59 + 0.0197397*m.x60
- 0.0142879*m.x61 + 0.0174054*m.x62 + 0.0315629*m.x63 + 0.00828577*m.x64 + 0.0123555*m.x65
+ 0.0135909*m.x66 + 0.0319592*m.x67 + 0.00597115*m.x68 + 0.0378227*m.x69 + 0.0267106*m.x70
+ 0.0425512*m.x71 + 0.0578838*m.x72 + 0.0267429*m.x73 + 0.0281086*m.x74 + 0.00170557*m.x75
+ 0.0266562*m.x76 + 0.0202602*m.x77 + 0.033506*m.x78 - 0.00291679*m.x79 + 0.00161461*m.x80
+ 0.0186524*m.x81 + 0.0147657*m.x82 + 0.0167077*m.x83 + 0.0247715*m.x84 + 0.0106859*m.x85
+ 0.0289636*m.x86 + 0.270232*m.x87 + 0.0400357*m.x88 + 0.00621348*m.x89 + 0.0404134*m.x90
+ 0.00592392*m.x91 + 0.00614247*m.x92 + 0.00530712*m.x93 + 0.00684822*m.x94 + 0.0187153*m.x95
+ 0.0225813*m.x96 + 0.0289411*m.x97 + 0.00901397*m.x98 + 0.0166774*m.x99 + 0.0332544*m.x100
+ 0.0151416*m.x101 == 0)
m.c91 = Constraint(expr= - m.x38 - 6.72321E-5*m.x52 + 0.010577*m.x53 + 0.0182906*m.x54 + 0.00566779*m.x55
- 0.00170776*m.x56 + 0.028455*m.x57 + 0.000127307*m.x58 + 0.0160758*m.x59 + 0.0120744*m.x60
+ 0.018699*m.x61 + 0.00185204*m.x62 + 0.0122553*m.x63 + 0.0152408*m.x64 - 0.000156492*m.x65
- 0.000550234*m.x66 + 0.016159*m.x67 - 0.000508298*m.x68 + 0.00150342*m.x69 + 0.00312045*m.x70
+ 0.0185632*m.x71 + 0.00722557*m.x72 - 0.0114268*m.x73 + 0.00493919*m.x74 + 0.0026933*m.x75
+ 0.00523559*m.x76 + 0.0135266*m.x77 + 0.00559812*m.x78 - 0.000317393*m.x79 + 0.00487681*m.x80
+ 0.0106153*m.x81 + 0.00370161*m.x82 + 0.00722734*m.x83 + 0.0157939*m.x84 + 0.0204397*m.x85
- 0.00446781*m.x86 + 0.0400357*m.x87 + 0.222166*m.x88 + 0.00907574*m.x89 + 0.0281441*m.x90
+ 0.0265542*m.x91 + 0.00608259*m.x92 + 0.0066023*m.x93 + 0.00659999*m.x94 + 0.0224381*m.x95
+ 0.00149053*m.x96 + 0.000405727*m.x97 - 0.0104234*m.x98 + 0.000189871*m.x99
+ 0.00118145*m.x100 + 0.00362186*m.x101 == 0)
m.c92 = Constraint(expr= - m.x39 + 0.0019753*m.x52 - 0.00268772*m.x53 + 0.000474699*m.x54 - 0.000955585*m.x55
+ 0.0255113*m.x56 + 0.0105724*m.x57 + 0.00590674*m.x58 + 0.00541803*m.x59 + 0.0127905*m.x60
+ 0.0440906*m.x61 + 0.0156839*m.x62 + 0.0146058*m.x63 + 0.0110413*m.x64 + 0.0119264*m.x65
+ 0.0369853*m.x66 - 0.00048612*m.x67 - 0.00265155*m.x68 + 0.000336096*m.x69 + 0.00720686*m.x70
+ 0.0150068*m.x71 + 0.0210916*m.x72 - 0.00234199*m.x73 + 0.0265072*m.x74 + 0.00792354*m.x75
+ 0.014168*m.x76 + 0.0134485*m.x77 + 0.0126415*m.x78 + 0.0125595*m.x79 + 0.0183379*m.x80
+ 0.044217*m.x81 - 0.00382518*m.x82 - 0.00583878*m.x83 + 0.00640654*m.x84 + 0.0119026*m.x85
+ 0.0224213*m.x86 + 0.00621348*m.x87 + 0.00907574*m.x88 + 0.394267*m.x89 + 0.0165051*m.x90
+ 0.00980853*m.x91 - 0.00226117*m.x92 - 0.00984533*m.x93 + 0.00565748*m.x94 + 0.00895692*m.x95
+ 0.00919195*m.x96 + 0.00900527*m.x97 + 0.0181986*m.x98 + 0.0249229*m.x99 - 0.000623048*m.x100
+ 0.0135896*m.x101 == 0)
m.c93 = Constraint(expr= - m.x40 - 0.00561942*m.x52 + 0.0104329*m.x53 + 0.0125524*m.x54 + 0.0234718*m.x55
+ 0.0115797*m.x56 + 0.0238296*m.x57 + 0.0251974*m.x58 + 0.0243196*m.x59 + 0.0406861*m.x60
- 0.00363253*m.x61 + 0.00443354*m.x62 + 0.0225422*m.x63 + 0.0069969*m.x64 + 0.0124128*m.x65
+ 0.00970355*m.x66 + 0.0206697*m.x67 + 0.0148232*m.x68 + 0.0330899*m.x69 + 0.0261195*m.x70
+ 0.0206301*m.x71 + 0.0335768*m.x72 + 0.0350183*m.x73 + 0.0495917*m.x74 + 0.00735961*m.x75
+ 0.0413952*m.x76 + 0.0259415*m.x77 + 0.0123446*m.x78 - 0.00116156*m.x79 + 0.0275921*m.x80
+ 0.0463482*m.x81 + 0.0112733*m.x82 + 0.0232216*m.x83 + 0.0102405*m.x84 + 0.0319466*m.x85
+ 0.0380495*m.x86 + 0.0404134*m.x87 + 0.0281441*m.x88 + 0.0165051*m.x89 + 0.226153*m.x90
+ 0.00565646*m.x91 + 0.0239442*m.x92 + 0.00622955*m.x93 + 0.014515*m.x94 + 0.0227247*m.x95
+ 0.026331*m.x96 + 0.0188097*m.x97 + 0.00284125*m.x98 + 0.00673929*m.x99 + 0.00450472*m.x100
+ 0.0152845*m.x101 == 0)
m.c94 = Constraint(expr= - m.x41 - 0.0137411*m.x52 - 0.00184253*m.x53 + 0.00998269*m.x54 - 0.00128625*m.x55
+ 0.0340249*m.x56 - 0.00223337*m.x57 + 0.0109883*m.x58 + 0.0145438*m.x59 + 0.0323148*m.x60
+ 0.00273765*m.x61 + 0.0202129*m.x62 + 0.0126094*m.x63 + 0.0053944*m.x64 + 0.0206051*m.x65
+ 0.0253109*m.x66 - 0.0044719*m.x67 - 0.000660928*m.x68 + 0.0189859*m.x69 + 0.0295481*m.x70
+ 0.00808519*m.x71 - 0.00914657*m.x72 - 0.00327782*m.x73 + 0.00899853*m.x74 - 0.000614984*m.x75
+ 0.00776725*m.x76 + 0.0189386*m.x77 + 0.028821*m.x78 - 0.00192373*m.x79 + 0.0159442*m.x80
+ 0.019405*m.x81 + 0.00898559*m.x82 + 0.0168437*m.x83 + 0.0051056*m.x84 + 0.00664877*m.x85
+ 0.0386705*m.x86 + 0.00592392*m.x87 + 0.0265542*m.x88 + 0.00980853*m.x89 + 0.00565646*m.x90
+ 0.290035*m.x91 + 0.0156774*m.x92 - 0.00869674*m.x93 + 0.00461003*m.x94 - 0.000555319*m.x95
+ 0.016294*m.x96 + 0.0016488*m.x97 + 0.0137582*m.x98 + 0.0245795*m.x99 - 0.00658672*m.x100
+ 0.00527527*m.x101 == 0)
m.c95 = Constraint(expr= - m.x42 + 0.0266953*m.x52 + 0.0230614*m.x53 + 0.00663781*m.x54 + 0.00397589*m.x55
+ 0.00175196*m.x56 + 0.0230382*m.x57 + 0.0197048*m.x58 + 0.00473001*m.x59 + 0.0200869*m.x60
+ 0.00673168*m.x61 + 0.0114171*m.x62 + 0.0195556*m.x63 + 0.0104813*m.x64 + 0.0182519*m.x65
+ 0.01371*m.x66 + 0.0412523*m.x67 + 0.0219128*m.x68 + 0.0161305*m.x69 - 0.00121588*m.x70
- 0.00805047*m.x71 + 0.0153621*m.x72 + 0.0234788*m.x73 + 0.0191737*m.x74 + 0.0118767*m.x75
+ 0.0326211*m.x76 + 0.0167553*m.x77 + 0.00981253*m.x78 + 0.0114605*m.x79 + 0.0134875*m.x80
+ 0.0233399*m.x81 + 0.047951*m.x82 + 0.0278419*m.x83 + 0.0145699*m.x84 + 0.00548571*m.x85
+ 0.0297938*m.x86 + 0.00614247*m.x87 + 0.00608259*m.x88 - 0.00226117*m.x89 + 0.0239442*m.x90
+ 0.0156774*m.x91 + 0.195197*m.x92 + 0.0167141*m.x93 - 0.00108078*m.x94 + 0.0154638*m.x95
+ 0.00879495*m.x96 + 0.0251912*m.x97 + 0.00951858*m.x98 + 0.0145509*m.x99 + 0.0109233*m.x100
+ 0.00930651*m.x101 == 0)
m.c96 = Constraint(expr= - m.x43 + 0.0039322*m.x52 + 0.0797692*m.x53 - 0.00941355*m.x54 + 0.00253364*m.x55
+ 0.0214384*m.x56 + 0.112083*m.x57 + 0.00281047*m.x58 + 0.00681241*m.x59 + 0.00172542*m.x60
+ 0.0033605*m.x61 + 0.00122747*m.x62 + 0.0148528*m.x63 - 0.00694263*m.x64 - 0.0063393*m.x65
+ 0.0151066*m.x66 + 0.150222*m.x67 + 0.0200429*m.x68 + 0.00657093*m.x69 + 0.00174197*m.x70
+ 0.0108192*m.x71 + 0.0170669*m.x72 + 0.0976326*m.x73 + 0.0112022*m.x74 + 0.00947244*m.x75
+ 0.0119027*m.x76 + 0.012156*m.x77 + 0.0284364*m.x78 + 0.0425365*m.x79 + 0.0270417*m.x80
+ 0.0136317*m.x81 + 0.00269973*m.x82 + 0.117531*m.x83 + 0.0756527*m.x84 + 0.0048078*m.x85
+ 0.0058598*m.x86 + 0.00530712*m.x87 + 0.0066023*m.x88 - 0.00984533*m.x89 + 0.00622955*m.x90
- 0.00869674*m.x91 + 0.0167141*m.x92 + 0.306057*m.x93 + 0.018202*m.x94 + 0.0064207*m.x95
+ 0.007465*m.x96 + 0.0209936*m.x97 + 0.00813794*m.x98 + 0.0137895*m.x99 + 0.00376129*m.x100
+ 0.00807619*m.x101 == 0)
m.c97 = Constraint(expr= - m.x44 + 0.0312023*m.x52 - 0.00718849*m.x53 + 0.0166904*m.x54 + 0.0161477*m.x55
+ 0.0113414*m.x56 + 0.00257709*m.x57 + 0.0113665*m.x58 + 0.00988793*m.x59 + 0.0311244*m.x60
+ 0.0241296*m.x61 + 0.0118384*m.x62 + 0.016949*m.x63 + 0.0141714*m.x64 + 0.0162264*m.x65
+ 0.0201164*m.x66 + 0.0060731*m.x67 + 0.00803816*m.x68 + 0.0118269*m.x69 + 0.000971523*m.x70
+ 0.01367*m.x71 + 0.00771841*m.x72 + 0.000202835*m.x73 + 0.0106917*m.x74 + 0.00574257*m.x75
+ 0.011424*m.x76 + 0.0312321*m.x77 + 0.0179957*m.x78 - 0.000808147*m.x79 + 0.00200928*m.x80
+ 0.0110294*m.x81 + 0.00305288*m.x82 + 0.00545108*m.x83 + 0.00684049*m.x84 + 0.0331056*m.x85
+ 0.0252835*m.x86 + 0.00684822*m.x87 + 0.00659999*m.x88 + 0.00565748*m.x89 + 0.014515*m.x90
+ 0.00461003*m.x91 - 0.00108078*m.x92 + 0.018202*m.x93 + 0.2295*m.x94 + 0.0263474*m.x95
+ 0.0158978*m.x96 - 0.00338835*m.x97 + 0.0116215*m.x98 + 0.0102735*m.x99 - 0.0164264*m.x100
+ 0.0105885*m.x101 == 0)
m.c98 = Constraint(expr= - m.x45 + 0.00475029*m.x52 + 0.00668562*m.x53 + 0.00602889*m.x54 + 0.0163612*m.x55
+ 0.039091*m.x56 - 0.0088657*m.x57 + 0.0128475*m.x58 + 0.0149668*m.x59 + 0.00519737*m.x60
- 0.00441557*m.x61 + 0.0228483*m.x62 + 0.0309886*m.x63 - 0.00184581*m.x64 + 0.0114734*m.x65
+ 0.0193544*m.x66 + 0.00469106*m.x67 + 0.0174527*m.x68 + 0.0262376*m.x69 + 0.016521*m.x70
+ 0.0348135*m.x71 + 0.0161467*m.x72 + 0.00567421*m.x73 + 0.0282436*m.x74 + 0.0110814*m.x75
+ 0.015665*m.x76 + 0.0133677*m.x77 + 0.0240785*m.x78 + 0.00295518*m.x79 + 0.0218467*m.x80
+ 0.0119847*m.x81 + 0.00998711*m.x82 + 0.007432*m.x83 - 0.000862575*m.x84 + 0.0274019*m.x85
+ 0.0145417*m.x86 + 0.0187153*m.x87 + 0.0224381*m.x88 + 0.00895692*m.x89 + 0.0227247*m.x90
- 0.000555319*m.x91 + 0.0154638*m.x92 + 0.0064207*m.x93 + 0.0263474*m.x94 + 0.219232*m.x95
+ 0.0233015*m.x96 - 0.00971973*m.x97 + 0.0161499*m.x98 + 0.0121398*m.x99 - 0.000692501*m.x100
+ 0.00371111*m.x101 == 0)
m.c99 = Constraint(expr= - m.x46 + 0.00458043*m.x52 - 0.00479877*m.x53 + 0.00224387*m.x54 + 0.012804*m.x55
+ 0.00619763*m.x56 + 0.0101284*m.x57 + 0.00622782*m.x58 + 0.023562*m.x59 + 0.0142684*m.x60
- 0.00703875*m.x61 + 0.0131884*m.x62 + 0.0111695*m.x63 + 0.0147295*m.x64 + 0.0298746*m.x65
+ 0.0166079*m.x66 + 0.032667*m.x67 + 0.00328568*m.x68 + 0.0229703*m.x69 + 0.0242338*m.x70
+ 0.0320515*m.x71 + 0.0470226*m.x72 + 0.0334415*m.x73 + 0.0119814*m.x74 + 0.00174348*m.x75
+ 0.0129933*m.x76 + 0.0168904*m.x77 + 0.0203486*m.x78 + 0.0242798*m.x79 + 0.00352069*m.x80
+ 0.0293732*m.x81 - 0.00599198*m.x82 + 0.0161894*m.x83 + 0.00996209*m.x84 + 0.00104681*m.x85
+ 0.0665246*m.x86 + 0.0225813*m.x87 + 0.00149053*m.x88 + 0.00919195*m.x89 + 0.026331*m.x90
+ 0.016294*m.x91 + 0.00879495*m.x92 + 0.007465*m.x93 + 0.0158978*m.x94 + 0.0233015*m.x95
+ 0.325248*m.x96 + 0.0152129*m.x97 + 0.0136663*m.x98 + 0.0127301*m.x99 - 0.00399355*m.x100
+ 0.00993756*m.x101 == 0)
m.c100 = Constraint(expr= - m.x47 - 0.0111713*m.x52 + 0.037467*m.x53 - 0.00806098*m.x54 + 0.0254602*m.x55
+ 0.0133319*m.x56 + 0.0087194*m.x57 + 0.0245605*m.x58 + 0.0173729*m.x59 + 0.0178041*m.x60
+ 0.016325*m.x61 - 0.0151598*m.x62 + 0.023004*m.x63 - 0.00369236*m.x64 + 0.00393739*m.x65
+ 0.0113423*m.x66 + 0.00513266*m.x67 + 0.00580133*m.x68 + 0.0245122*m.x69 + 0.0387835*m.x70
+ 0.0132639*m.x71 + 0.0696792*m.x72 + 0.0182382*m.x73 + 0.00852934*m.x74 + 0.00448876*m.x75
+ 0.0057329*m.x76 + 0.021903*m.x77 + 0.0246958*m.x78 + 0.0107554*m.x79 + 0.00446644*m.x80
- 0.00785039*m.x81 + 0.00378519*m.x82 + 0.0203409*m.x83 + 0.0282548*m.x84 + 0.011411*m.x85
+ 0.00798604*m.x86 + 0.0289411*m.x87 + 0.000405727*m.x88 + 0.00900527*m.x89 + 0.0188097*m.x90
+ 0.0016488*m.x91 + 0.0251912*m.x92 + 0.0209936*m.x93 - 0.00338835*m.x94 - 0.00971973*m.x95
+ 0.0152129*m.x96 + 0.903924*m.x97 - 0.0108291*m.x98 + 0.0425572*m.x99 - 0.0154741*m.x100
+ 0.0155463*m.x101 == 0)
m.c101 = Constraint(expr= - m.x48 + 0.00233202*m.x52 - 0.000833339*m.x53 + 0.0151626*m.x54 + 0.0164285*m.x55
+ 0.0121082*m.x56 + 0.016345*m.x57 + 0.00706149*m.x58 + 0.016267*m.x59 + 0.00992985*m.x60
+ 0.00222896*m.x61 + 0.00844519*m.x62 + 0.00865625*m.x63 + 0.00526228*m.x64 + 0.0153743*m.x65
+ 0.0488179*m.x66 + 0.00884207*m.x67 - 0.000537323*m.x68 + 0.00497315*m.x69 + 0.0249114*m.x70
- 0.00327629*m.x71 + 0.00688465*m.x72 - 0.00355687*m.x73 + 0.0132486*m.x74 + 0.0220952*m.x75
+ 0.00863731*m.x76 + 0.00904192*m.x77 + 0.0301721*m.x78 + 0.0120875*m.x79 + 0.0176237*m.x80
+ 0.0195485*m.x81 + 0.00228262*m.x82 - 0.00640225*m.x83 + 0.0055526*m.x84 - 0.00331677*m.x85
+ 0.00560573*m.x86 + 0.00901397*m.x87 - 0.0104234*m.x88 + 0.0181986*m.x89 + 0.00284125*m.x90
+ 0.0137582*m.x91 + 0.00951858*m.x92 + 0.00813794*m.x93 + 0.0116215*m.x94 + 0.0161499*m.x95
+ 0.0136663*m.x96 - 0.0108291*m.x97 + 0.224056*m.x98 + 0.00641426*m.x99 + 0.0200771*m.x100
- 0.0157458*m.x101 == 0)
m.c102 = Constraint(expr= - m.x49 + 0.00279105*m.x52 - 0.00287641*m.x53 - 0.000965771*m.x54 + 0.0113336*m.x55
+ 0.0357203*m.x56 + 0.0145296*m.x57 + 0.00272192*m.x58 + 0.0121424*m.x59 + 0.0146222*m.x60
- 0.0077883*m.x61 + 0.0198609*m.x62 + 0.0218181*m.x63 + 0.00828497*m.x64 + 0.00989917*m.x65
+ 0.016393*m.x66 + 0.0125003*m.x67 + 0.0127107*m.x68 + 0.0222552*m.x69 + 0.0106646*m.x70
+ 0.0267494*m.x71 + 0.0406248*m.x72 + 0.0188454*m.x73 - 0.00483593*m.x74 + 0.0063483*m.x75
+ 0.00782909*m.x76 + 0.00640522*m.x77 + 0.00697773*m.x78 + 0.0292966*m.x79 + 0.0279531*m.x80
+ 0.00530393*m.x81 + 0.000223223*m.x82 + 0.00363753*m.x83 + 0.00924268*m.x84
- 0.00425863*m.x85 + 0.0328297*m.x86 + 0.0166774*m.x87 + 0.000189871*m.x88 + 0.0249229*m.x89
+ 0.00673929*m.x90 + 0.0245795*m.x91 + 0.0145509*m.x92 + 0.0137895*m.x93 + 0.0102735*m.x94
+ 0.0121398*m.x95 + 0.0127301*m.x96 + 0.0425572*m.x97 + 0.00641426*m.x98 + 0.246306*m.x99
+ 0.00353612*m.x100 - 0.00520827*m.x101 == 0)
m.c103 = Constraint(expr= - m.x50 + 0.00588268*m.x52 - 0.00540049*m.x53 + 0.0157379*m.x54 + 0.00992279*m.x55
+ 0.0381607*m.x56 + 0.00606395*m.x57 + 0.00300911*m.x58 - 0.00299957*m.x59 + 0.00920343*m.x60
- 0.00313691*m.x61 + 0.0242712*m.x62 + 0.0268327*m.x63 - 0.0189632*m.x64 + 0.0228823*m.x65
- 0.00100315*m.x66 - 0.00578404*m.x67 + 0.0134156*m.x68 + 0.00180371*m.x69 - 0.0157855*m.x70
+ 0.0178498*m.x71 - 0.00265226*m.x72 + 0.0261119*m.x73 + 0.00268557*m.x74 + 0.000150809*m.x75
+ 0.0385547*m.x76 + 0.000393756*m.x77 + 0.00248209*m.x78 - 0.00126318*m.x79 + 0.0110346*m.x80
- 0.00585743*m.x81 + 0.0131328*m.x82 + 0.00102053*m.x83 + 0.00369864*m.x84 + 0.0100274*m.x85
+ 0.0235991*m.x86 + 0.0332544*m.x87 + 0.00118145*m.x88 - 0.000623048*m.x89 + 0.00450472*m.x90
- 0.00658672*m.x91 + 0.0109233*m.x92 + 0.00376129*m.x93 - 0.0164264*m.x94 - 0.000692501*m.x95
- 0.00399355*m.x96 - 0.0154741*m.x97 + 0.0200771*m.x98 + 0.00353612*m.x99 + 1.25224*m.x100
+ 0.0259038*m.x101 == 0)
m.c104 = Constraint(expr= - m.x51 + 0.0171354*m.x52 + 0.0133618*m.x53 + 0.0187837*m.x54 + 0.00909239*m.x55
+ 0.0203578*m.x56 + 0.00747571*m.x57 + 0.0133916*m.x58 + 0.00907044*m.x59 + 0.0199828*m.x60
+ 0.0264584*m.x61 + 0.0138048*m.x62 + 0.0203605*m.x63 + 0.0101028*m.x64 + 0.017772*m.x65
+ 0.0101386*m.x66 + 0.0225237*m.x67 + 0.00882735*m.x68 + 0.00323067*m.x69 + 0.0165385*m.x70
+ 0.0295494*m.x71 + 0.0216914*m.x72 + 0.0236217*m.x73 + 0.0264927*m.x74 + 6.68242E-5*m.x75
+ 0.0147477*m.x76 + 0.0123718*m.x77 - 0.00975878*m.x78 - 0.0099048*m.x79 + 0.00696769*m.x80
+ 0.0197286*m.x81 + 0.0100911*m.x82 + 0.0252622*m.x83 - 0.00445725*m.x84 + 0.00728145*m.x85
+ 0.0470289*m.x86 + 0.0151416*m.x87 + 0.00362186*m.x88 + 0.0135896*m.x89 + 0.0152845*m.x90
+ 0.00527527*m.x91 + 0.00930651*m.x92 + 0.00807619*m.x93 + 0.0105885*m.x94 + 0.00371111*m.x95
+ 0.00993756*m.x96 + 0.0155463*m.x97 - 0.0157458*m.x98 - 0.00520827*m.x99 + 0.0259038*m.x100
+ 0.389181*m.x101 == 0)
| [
"[email protected]"
] | |
9851d845473ed6fbc6126a25358bcd7ae744f2b9 | 3043ff697647429b5164806e218a1bf69e96cd3d | /dolon/migrations/0004_auto__del_field_imageitem_image.py | d702793406f90a5723f909e6a3cb40bbb598fb99 | [] | no_license | erickpeirson/Dolon | bc584823bd4dbc468a5b30d4a8045d7729dfe3ab | fa33aa5589c52c4770c7a177314b0a71318e313a | refs/heads/master | 2016-09-11T02:27:07.226209 | 2014-10-15T17:02:15 | 2014-10-15T17:02:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,266 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'ImageItem.image'
db.delete_column(u'dolon_imageitem', 'image_id')
# Adding M2M table for field images on 'ImageItem'
m2m_table_name = db.shorten_name(u'dolon_imageitem_images')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('imageitem', models.ForeignKey(orm[u'dolon.imageitem'], null=False)),
('image', models.ForeignKey(orm[u'dolon.image'], null=False))
))
db.create_unique(m2m_table_name, ['imageitem_id', 'image_id'])
def backwards(self, orm):
# Adding field 'ImageItem.image'
db.add_column(u'dolon_imageitem', 'image',
self.gf('django.db.models.fields.related.ForeignKey')(related_name='imageitem_fullsize', null=True, to=orm['dolon.Image'], blank=True),
keep_default=False)
# Removing M2M table for field images on 'ImageItem'
db.delete_table(db.shorten_name(u'dolon_imageitem_images'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'dolon.audio': {
'Meta': {'object_name': 'Audio'},
'audio_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'mime': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '2000'})
},
u'dolon.audioitem': {
'Meta': {'object_name': 'AudioItem', '_ormbases': [u'dolon.Item']},
'audio_segments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'segment'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['dolon.Audio']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'item_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['dolon.Item']", 'unique': 'True', 'primary_key': 'True'}),
'length': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'audioitem_thumbnail'", 'null': 'True', 'to': u"orm['dolon.Thumbnail']"})
},
u'dolon.context': {
'Meta': {'object_name': 'Context'},
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'publicationDate': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tagged_contexts'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['dolon.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '2000'})
},
u'dolon.engine': {
'Meta': {'object_name': 'Engine'},
'daylimit': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dayusage': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manager': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'monthlimit': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'monthusage': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pagelimit': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'pagesize': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'parameters': ('dolon.models.ListField', [], {}),
'ratelimit': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'dolon.grouptask': {
'Meta': {'object_name': 'GroupTask'},
'dispatched': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subtask_ids': ('dolon.models.ListField', [], {}),
'task_id': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
u'dolon.hashtag': {
'Meta': {'object_name': 'HashTag'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'string': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'dolon.image': {
'Meta': {'object_name': 'Image'},
'height': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'mime': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '2000'}),
'width': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'dolon.imageitem': {
'Meta': {'object_name': 'ImageItem', '_ormbases': [u'dolon.Item']},
'height': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['dolon.Image']", 'null': 'True', 'blank': 'True'}),
u'item_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['dolon.Item']", 'unique': 'True', 'primary_key': 'True'}),
'thumbnail': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'imageitem_thumbnail'", 'null': 'True', 'to': u"orm['dolon.Thumbnail']"}),
'width': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
u'dolon.item': {
'Meta': {'object_name': 'Item'},
'context': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'items'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['dolon.Context']"}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'events': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'items'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['dolon.QueryEvent']"}),
'hide': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'merged_with': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'merged_from'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['dolon.Item']"}),
'retrieved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'PG'", 'max_length': '2'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tagged_items'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['dolon.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '2000'})
},
u'dolon.queryevent': {
'Meta': {'object_name': 'QueryEvent'},
'after': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'before': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_events'", 'blank': 'True', 'to': u"orm['auth.User']"}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'dispatched': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'engine_events'", 'to': u"orm['dolon.Engine']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'queryresults': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'event_instance'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['dolon.QueryResult']"}),
'querystring': ('django.db.models.fields.related.ForeignKey', [], {'default': '-1', 'related_name': "'queryevents'", 'null': 'True', 'blank': 'True', 'to': u"orm['dolon.QueryString']"}),
'rangeEnd': ('django.db.models.fields.IntegerField', [], {'default': '10', 'null': 'True', 'blank': 'True'}),
'rangeStart': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'search_by': ('django.db.models.fields.CharField', [], {'default': "'ST'", 'max_length': '2'}),
'search_task': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'searchtaskevent'", 'null': 'True', 'to': u"orm['dolon.GroupTask']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dolon.HashTag']", 'null': 'True', 'blank': 'True'}),
'thumbnail_tasks': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'thumbtaskevent'", 'symmetrical': 'False', 'to': u"orm['dolon.GroupTask']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dolon.SocialUser']", 'null': 'True', 'blank': 'True'})
},
u'dolon.queryresult': {
'Meta': {'object_name': 'QueryResult'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rangeEnd': ('django.db.models.fields.IntegerField', [], {}),
'rangeStart': ('django.db.models.fields.IntegerField', [], {}),
'result': ('django.db.models.fields.TextField', [], {}),
'resultitems': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'queryresult_instance'", 'symmetrical': 'False', 'to': u"orm['dolon.QueryResultItem']"})
},
u'dolon.queryresultitem': {
'Meta': {'object_name': 'QueryResultItem'},
'contextURL': ('django.db.models.fields.URLField', [], {'max_length': '2000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'query_result_item'", 'null': 'True', 'to': u"orm['dolon.Item']"}),
'params': ('django.db.models.fields.CharField', [], {'max_length': '50000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '2000'})
},
u'dolon.querystring': {
'Meta': {'object_name': 'QueryString'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'querystring': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '1000'})
},
u'dolon.socialplatform': {
'Meta': {'object_name': 'SocialPlatform'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'dolon.socialuser': {
'Meta': {'object_name': 'SocialUser'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'handle': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'platform': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dolon.SocialPlatform']"}),
'profile_url': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'})
},
u'dolon.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
u'dolon.text': {
'Meta': {'object_name': 'Text'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mime': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'text_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '2000'})
},
u'dolon.textitem': {
'Meta': {'object_name': 'TextItem', '_ormbases': [u'dolon.Item']},
'contents': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'item_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['dolon.Item']", 'unique': 'True', 'primary_key': 'True'}),
'length': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'original_files': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['dolon.Text']", 'null': 'True', 'blank': 'True'}),
'snippet': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'dolon.thumbnail': {
'Meta': {'object_name': 'Thumbnail'},
'height': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'mime': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '2000'}),
'width': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'dolon.video': {
'Meta': {'object_name': 'Video'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'mime': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '2000'}),
'video': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'dolon.videoitem': {
'Meta': {'object_name': 'VideoItem', '_ormbases': [u'dolon.Item']},
u'item_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['dolon.Item']", 'unique': 'True', 'primary_key': 'True'}),
'length': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'thumbnails': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'video_items'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['dolon.Thumbnail']"}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'videoitem'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['dolon.Video']"})
}
}
complete_apps = ['dolon'] | [
"[email protected]"
] | |
45d2a65651be56165f6cfe5b28a341f127eb57a5 | 302442c32bacca6cde69184d3f2d7529361e4f3c | /cidtrsend-all/stage1-model/pytz/zoneinfo/America/Boa_Vista.py | 297b01070fa499ceb5132271bc1a11bdd52ca2e8 | [] | no_license | fucknoob/WebSemantic | 580b85563072b1c9cc1fc8755f4b09dda5a14b03 | f2b4584a994e00e76caccce167eb04ea61afa3e0 | refs/heads/master | 2021-01-19T09:41:59.135927 | 2015-02-07T02:11:23 | 2015-02-07T02:11:23 | 30,441,659 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,896 | py | '''tzinfo timezone information for America/Boa_Vista.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Boa_Vista(DstTzInfo):
'''America/Boa_Vista timezone definition. See datetime.tzinfo for details'''
zone = 'America/Boa_Vista'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1914,1,1,4,2,40),
d(1931,10,3,15,0,0),
d(1932,4,1,3,0,0),
d(1932,10,3,4,0,0),
d(1933,4,1,3,0,0),
d(1949,12,1,4,0,0),
d(1950,4,16,4,0,0),
d(1950,12,1,4,0,0),
d(1951,4,1,3,0,0),
d(1951,12,1,4,0,0),
d(1952,4,1,3,0,0),
d(1952,12,1,4,0,0),
d(1953,3,1,3,0,0),
d(1963,12,9,4,0,0),
d(1964,3,1,3,0,0),
d(1965,1,31,4,0,0),
d(1965,3,31,3,0,0),
d(1965,12,1,4,0,0),
d(1966,3,1,3,0,0),
d(1966,11,1,4,0,0),
d(1967,3,1,3,0,0),
d(1967,11,1,4,0,0),
d(1968,3,1,3,0,0),
d(1985,11,2,4,0,0),
d(1986,3,15,3,0,0),
d(1986,10,25,4,0,0),
d(1987,2,14,3,0,0),
d(1987,10,25,4,0,0),
d(1988,2,7,3,0,0),
d(1999,10,3,4,0,0),
d(2000,2,27,3,0,0),
d(2000,10,8,4,0,0),
d(2000,10,15,3,0,0),
]
_transition_info = [
i(-14580,0,'LMT'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
]
Boa_Vista = Boa_Vista()
| [
"[email protected]"
] | |
7ebf3809f66c13bb9a44a6e6f6da11692ab7c74b | 1817aca734cda258cbbfd9e13fbf040d76824621 | /aliyun-python-sdk-cms/aliyunsdkcms/request/v20190101/DeleteMetricRulesRequest.py | 6d8233fd23f78159ba6bc9c8e1e4547043fcfd53 | [
"Apache-2.0"
] | permissive | sdk-team/aliyun-openapi-python-sdk | 4bd770718e70e31f19e1e322727c27ba74d9fb80 | 996cb07bfcf010fe3ab65daa73d26df2f3b6e97f | refs/heads/master | 2022-08-04T13:11:56.729215 | 2022-07-25T10:01:10 | 2022-07-25T10:01:10 | 183,356,741 | 0 | 0 | null | 2019-04-25T04:33:24 | 2019-04-25T04:33:24 | null | UTF-8 | Python | false | false | 1,185 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DeleteMetricRulesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cms', '2019-01-01', 'DeleteMetricRules','cms')
def get_Ids(self):
return self.get_query_params().get('Ids')
def set_Ids(self,Ids):
for i in range(len(Ids)):
if Ids[i] is not None:
self.add_query_param('Id.' + str(i + 1) , Ids[i]); | [
"[email protected]"
] | |
663175d84618b1612f00e593024da938380a9840 | 98accbb4d8c8f972bfda31e12fabab7c3ca37533 | /linux_lou_plus/step_7/multiprocess/process_sys.py | 4d33b61e088ad55ac86b641f5138761583f1014d | [] | no_license | qimanchen/interview_plan | 49e5a323f35b8b3496d5dc4baba0f12a1b2c2a13 | 6a11a1927a14ce3fc439149e907a3febbee446a7 | refs/heads/master | 2022-12-10T05:25:54.927847 | 2020-06-13T02:54:25 | 2020-06-13T02:54:25 | 194,584,004 | 0 | 0 | null | 2022-12-08T05:25:13 | 2019-07-01T02:08:27 | Python | UTF-8 | Python | false | false | 555 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
from multiprocessing import Process, Value, Lock
def func(val, lock):
for i in range(50):
time.sleep(0.01)
with lock:
val.value += 1
if __name__ == "__main__":
# 多进程无法使用全局变量,multiprocessing 提供的 Value 是一个代理器,
# 可以实现在多进程中共享这个变量
v = Value('i', 0)
lock = Lock()
procs = [Process(target=func, args=(v, lock)) for i in range(10)]
for p in procs:
p.start()
for p in procs:
p.join()
print(v.value) | [
"[email protected]"
] | |
0b01324db38b6537c084e6e7d7954b7f84f49fa4 | 45d1478e29cdc173085e9fe2ae092f4c71ea19f2 | /retinaface/modeling/__init__.py | e09b09632129673b7c4c911168f1f58126455bdd | [
"MIT"
] | permissive | OxYunzhe/RetinaFace.detectron2 | 0c5f8659d17fb2e46c194dc0fcbbac7732cd54ab | 3edf1c2d539763115741819bcf16816d7c2e4c91 | refs/heads/master | 2023-03-16T07:50:51.572555 | 2020-06-10T09:41:29 | 2020-06-10T09:41:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | '''
@Copyright (c) tkianai All Rights Reserved.
@Author : tkianai
@Github : https://github.com/tkianai
@Date : 2020-04-21 13:11:05
@FilePath : /RetinaFace.detectron2/retinaface/modeling/__init__.py
@Description :
'''
| [
"[email protected]"
] | |
264ccf1d747e420a00b8a22c7b1db3529c65867c | a8d68074db5c2b2697650ed0281979d3e00cf5a8 | /python-spider/shuaia.py | 4f3d70d1d53400532bacaf55f467f8d4bb664164 | [] | no_license | 15807857476/bogdata-2 | 9595609ea2ae5ae0a48c511f911df2498456467e | 1934cdfa234b77ca91e349b84688db113ff39e8c | refs/heads/master | 2023-05-26T19:10:18.439269 | 2019-05-24T02:50:41 | 2019-05-24T02:50:41 | 188,327,526 | 3 | 1 | null | 2023-05-22T21:37:27 | 2019-05-24T00:53:28 | Python | UTF-8 | Python | false | false | 1,583 | py | # -*- coding:UTF-8 -*-
from bs4 import BeautifulSoup
from urllib.request import urlretrieve
import requests
import os
import time
if __name__ == '__main__':
list_url = []
for num in range(1,3):
if num == 1:
url = 'http://www.shuaia.net/index.html'
else:
url = 'http://www.shuaia.net/index_%d.html' % num
headers = {
"User-Agent":"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"
}
req = requests.get(url = url,headers = headers)
req.encoding = 'utf-8'
html = req.text
bf = BeautifulSoup(html, 'lxml')
targets_url = bf.find_all(class_='item-img')
for each in targets_url:
list_url.append(each.img.get('alt') + '=' + each.get('href'))
print('连接采集完成')
for each_img in list_url:
img_info = each_img.split('=')
target_url = img_info[1]
filename = img_info[0] + '.jpg'
print('下载:' + filename)
headers = {
"User-Agent":"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"
}
img_req = requests.get(url = target_url,headers = headers)
img_req.encoding = 'utf-8'
img_html = img_req.text
img_bf_1 = BeautifulSoup(img_html, 'lxml')
img_url = img_bf_1.find_all('div', class_='wr-single-content-list')
img_bf_2 = BeautifulSoup(str(img_url), 'lxml')
img_url = 'http://www.shuaia.net' + img_bf_2.div.img.get('src')
if 'images' not in os.listdir():
os.makedirs('images')
urlretrieve(url = img_url,filename = 'images/' + filename)
time.sleep(1)
print('下载完成!') | [
"[email protected]"
] | |
27662d158b11bc9bb0943ec4d1b442d23925248b | b75fa0885bc3ba3f153225fd3396aadef6c1f97e | /slides/pypyjs/lib-py3k/modules/_functools.py | e51a4aa7f3b13c9b4d6cc9168e73ea720b598012 | [
"MIT"
] | permissive | rfk/talk-pypyjs-what-how-why | e084303185167dbc9b704c3568e0c31d0a1f6885 | 1ab62ee32ff9495ae9313ec81e8ee2044212ea71 | refs/heads/master | 2016-09-06T05:27:09.800382 | 2015-04-10T03:12:07 | 2015-04-10T03:12:07 | 22,421,369 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,980 | py | """ Supplies the internal functions for functools.py in the standard library """
try: from __pypy__ import builtinify
except ImportError: builtinify = lambda f: f
sentinel = object()
@builtinify
def reduce(func, sequence, initial=sentinel):
"""reduce(function, sequence[, initial]) -> value
Apply a function of two arguments cumulatively to the items of a sequence,
from left to right, so as to reduce the sequence to a single value.
For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates
((((1+2)+3)+4)+5). If initial is present, it is placed before the items
of the sequence in the calculation, and serves as a default when the
sequence is empty."""
iterator = iter(sequence)
if initial is sentinel:
try:
initial = next(iterator)
except StopIteration:
raise TypeError("reduce() of empty sequence with no initial value")
result = initial
for item in iterator:
result = func(result, item)
return result
class partial(object):
"""
partial(func, *args, **keywords) - new function with partial application
of the given arguments and keywords.
"""
def __init__(self, *args, **keywords):
if not args:
raise TypeError('__init__() takes at least 2 arguments (1 given)')
func, args = args[0], args[1:]
if not callable(func):
raise TypeError("the first argument must be callable")
self._func = func
self._args = args
self._keywords = keywords or None
def __delattr__(self, key):
if key == '__dict__':
raise TypeError("a partial object's dictionary may not be deleted")
object.__delattr__(self, key)
@property
def func(self):
return self._func
@property
def args(self):
return self._args
@property
def keywords(self):
return self._keywords
def __call__(self, *fargs, **fkeywords):
if self.keywords is not None:
fkeywords = dict(self.keywords, **fkeywords)
return self.func(*(self.args + fargs), **fkeywords)
def __repr__(self):
cls = type(self)
if cls is partial:
name = 'functools.partial'
else:
name = cls.__name__
tmp = [repr(self.func)]
for arg in self.args:
tmp.append(repr(arg))
if self.keywords:
for k, v in self.keywords.items():
tmp.append("{}={!r}".format(k, v))
return "{}({})".format(name, ', '.join(tmp))
def __reduce__(self):
d = dict((k, v) for k, v in self.__dict__.items() if k not in
('_func', '_args', '_keywords'))
if len(d) == 0:
d = None
return (type(self), (self.func,),
(self.func, self.args, self.keywords, d))
def __setstate__(self, state):
self._func, self._args, self._keywords, d = state
if d is not None:
self.__dict__.update(d)
| [
"[email protected]"
] | |
5bcff5cffd28731828328a79715cde3f608f37b3 | 9fefd87bf65dd0be051988ead6fa532ad968371c | /01_MathBasic/ex15.py | 574694f9302e6bc50f1d3caf72765a3d1df5dfee | [] | no_license | et0511/linear-algebra-basics | b94659832dd16342bdc73bace18ab21d1fc3458d | b9f3a91ea9ba5b1011d619d374f238dd56c09c9a | refs/heads/master | 2023-01-22T05:35:56.850516 | 2020-11-20T08:39:14 | 2020-11-20T08:39:14 | 313,239,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | # 행렬의 산술 연산: 곱셈
import numpy as np
m1 = np.array([
[1, 2, 3],
[4, 5, 6]
])
m2 = np.array([
[10, 20, 30],
[40, 50, 60]
])
m3 = m1 * m2
print(m3)
m4 = np.multiply(m1, m2)
print(m4)
| [
"[email protected]"
] | |
80a7bbe98fb1db2e7affb126f6379fbca8deaffa | 0be27c0a583d3a8edd5d136c091e74a3df51b526 | /pro_6.py | cd9f08e7650bc6d5eabf300b5b77d29d6bc94b75 | [] | no_license | ssangitha/guvicode | 3d38942f5d5e27a7978e070e14be07a5269b01fe | ea960fb056cfe577eec81e83841929e41a31f72e | refs/heads/master | 2020-04-15T05:01:00.226391 | 2019-09-06T10:08:23 | 2019-09-06T10:08:23 | 164,405,935 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | n=int(input())
l=list(map(int,input().split()))
c=0
for i in range(len(l)-2):
for j in range(i+1,len(l)-1):
for k in range(j+1,len(l)):
if l[i]<l[j]<l[k]:
c=c+1
print(c)
#no.of triplet...
| [
"[email protected]"
] | |
f46db137e5ffc06d2d219a96623c75d5281f13d1 | 49986759cb09afe8888e87cb5d3d02defedf7fcf | /examples/openpyxl/openpyxl__loadedFiles_example.py | 43f14fe2d32458d4bd9f8b34649c22fd76ea9b62 | [] | no_license | Ze1598/Python_stuff | ca7e8a85ab693efb1909b1aaf3075906419ab43b | df4d9b85eeff4e14c91533135a347b59d52812c7 | refs/heads/master | 2023-04-10T16:50:37.719570 | 2023-03-26T15:06:31 | 2023-03-26T15:06:31 | 117,886,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,790 | py | '''
File that goes over basic operations with the openpyxl module: a
module for working with Excel files (.xlsx) and LibreOffice Calc
(.xls), that is, workbook files.
This file goes over the basic operations performed with these files,
without touching any writing to the files.
'''
import openpyxl
# Used to pretty-print the information written to the output file
import pprint
# Input
# -----------------------------------------------------------------
# book(load) a worksheet file (we'll use an Excel file in this\
# case)
wb = openpyxl.load_workbook("openpyxl_sample.xlsx")
# -----------------------------------------------------------------
# Get sheets
# -----------------------------------------------------------------
# Get a list with the names of each sheet in the file
wb_sheets = wb.get_sheet_names()
# Get the sheet with name "Sheet1"
single_sheet = wb.get_sheet_by_name("Sheet1")
# Print the title of the sheet
print(single_sheet.title)
# Gets the sheet currently open in the file
active_sheet = wb.active
print(active_sheet)
print()
# -----------------------------------------------------------------
# Get cells
# -----------------------------------------------------------------
# Print the cell located at A1 in the "Sheet1" sheet
a1_cell = single_sheet["A1"]
print(a1_cell)
# Actually print the value saved in the cell (properly converted to a\
# Python datatype)
print(a1_cell.value)
# Print the coordinates of the A1 cell
print(f"`a1_cell` is located at row {a1_cell.row} and column {a1_cell.column}, that is, {a1_cell.coordinate}.")
# Print the value of another cell (cell located at B1)
# This time we access a cell by calling the `cell()` method and pass the\
# the desired coordinate as row and column values
print(single_sheet.cell(row=1, column=2).value)
print()
# -----------------------------------------------------------------
# Access rows and columns
# -----------------------------------------------------------------
# Print the values saved in all the rows of the second column (B)
print("Values saved in the second column:")
# Loop through all the rows in the second column and print the values found
# Use the `max_row` attribute to find the index of the last row in this sheet
for i in range(1, single_sheet.max_row+1):
print(f"Row {i}, Value {single_sheet.cell(row=i, column=2).value}")
print()
# Print the number of columns found in the current sheet by using the\
# `max_column` attribute
print(f"{single_sheet.title} has {single_sheet.max_column} columns.")
print()
# Extract the first three rows, including columns A through C
extract_rows = tuple(single_sheet["A1":"C3"])
# Each item corresponds to a single row, that is, a single item contains\
# all the cells of that row
print(extract_rows)
print("Loop through rows 1 through 3, including columns A through C.")
for row_of_cell_objs in extract_rows:
for cell_obj in row_of_cell_objs:
print(cell_obj.coordinate, cell_obj.value)
print('--- END OF ROW ---')
print()
# We can loop through all the columns in a given row by with dictionary syntax
# Loop through the cells in the first row column (column B)
for cell_obj in single_sheet[1]:
print(cell_obj.value)
print()
# -----------------------------------------------------------------
# Convert between integer and alphabetic representation of columns
# -----------------------------------------------------------------
# Because a workbook can have many columns, the when it reachs the 27th\
# it needs to start using two letters to represent the column. Thus, we\
# can use `get_column_letter()` method to input the integer representation\
# of a column and get the alphabetic representation returned
print("get_column_letter(27) =>", openpyxl.utils.get_column_letter(27))
print("get_column_letter(900) =>", openpyxl.utils.get_column_letter(900))
# The exact operation, that is, get the integer representation given the\
# alphabetic counterpart, is done via the `column_index_from_string()` method
print("column_index_from_string(AA) =>", openpyxl.utils.column_index_from_string("AA"))
print("column_index_from_string(AHP) =>", openpyxl.utils.column_index_from_string("AHP"))
print()
# -----------------------------------------------------------------
# Load an Excel file, extract data and save it in a new Python file
# -----------------------------------------------------------------
wb = openpyxl.load_workbook("censuspopdata.xlsx")
sheet = wb.get_sheet_by_name("Population by Census Tract")
# Dictionary to hold the extracted data in the format:
# county_data[state][county]["tracts"]
# county_data[state][county]["pop"]
county_data = {}
# Loop through all the rows in the file
for row in range(2, sheet.max_row+1):
# Get the state, county and population count for the current row
state = sheet["B"+str(row)].value
county = sheet["C"+str(row)].value
pop = sheet["D"+str(row)].value
# To make sure a key for the current state exists in the dictionary,\
# create the key-value pair `county_data[state] = {}`
county_data.setdefault(state, {})
# Create default values as well for the values of the current `state`\
# key, so that the `state` key holds a dictionary of the type:
# county_data[state][county]["tracts"]
# county_data[state][county]["pop"]
county_data[state].setdefault( county, {"tracts":0, "pop":0} )
# Since each row represents a census tract, increment the `tracts` key
county_data[state][county]["tracts"] += 1
# While we are in the same row, that is, the same county, add up the\
# population amounts found
county_data[state][county]["pop"] += int(pop)
# Now write the extracted data to a Python file
with open("openpyxl_sample_output_file.py", "w") as f:
f.write("all_data = " + pprint.pformat(county_data))
# ----------------------------------------------------------------- | [
"jose.fernando.costa.1998@gmailcom"
] | jose.fernando.costa.1998@gmailcom |
d64351067c60d186c1b36d2c2daa144f70f80fb5 | 9d931ad4bb1ee0806a0b1012cf551d77199416ae | /isobar/pattern/harmony.py | b1d8ece983fa44e188235e02acd85ed1e43070e7 | [
"MIT"
] | permissive | EnigmaCurry/isobar | 9d8a92c44f9ba7e0eb8b8527fdb1a61691b85fe4 | 05de0d105984b642eeaca3286abf08e02e309362 | refs/heads/master | 2022-05-24T23:07:35.859526 | 2020-04-28T23:05:08 | 2020-04-28T23:05:08 | 259,672,137 | 1 | 0 | MIT | 2020-04-28T15:11:04 | 2020-04-28T15:11:02 | null | UTF-8 | Python | false | false | 576 | py | from isobar.pattern.core import *
class PFilterByKey(Pattern):
def __init__(self, input, key):
self.input = input
self.key = key
def __next__(self):
note = next(self.input)
key = Pattern.value(self.key)
if note in key:
return note
else:
return None
class PNearest(Pattern):
def __init__(self, input, key):
self.input = input
self.key = key
def __next__(self):
note = next(self.input)
key = Pattern.value(self.key)
return key.nearest_note(note)
| [
"[email protected]"
] | |
6f3ef8ad3d317b58bdc8a750c23661686421a08f | 810b7b2bb5829bf9ce0d921395ad6ca22563915c | /question-type-fine-num-classifier-builder.py | 19e0daeeb9bccff0320b77706b209ea2dbd8f908 | [] | no_license | daksh-ddt/QuestionTypeClassifier | 5f1ee8e8f017fbe4836e24f943e92dd14ecaebe5 | 33cffbefe1869612f8d39c83bb3e72602060893d | refs/heads/master | 2020-12-25T03:01:05.344465 | 2014-03-22T16:01:24 | 2014-03-22T16:01:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,500 | py | #!/usr/bin/env python
"""
Best score: 0.928
Best parameters set:
clf__alpha: 1e-06
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__norm: 'l1'
tfidf__use_idf: True
vect__max_df: 0.75
vect__max_features: 5000
vect__ngram_range: (1, 2)
vect__stop_words: None
"""
__author__ = 'gavin hackeling'
__email__ = '[email protected]'
import os
from time import time
import pickle
from pprint import pprint
from sklearn.datasets import load_files
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
def grid_search():
os.chdir('/home/gavin/PycharmProjects/question-type-classifier/corpora/')
stop_words = [l.strip() for l in open('stop-words.txt', 'rb')]
categories = ['code', 'count', 'date', 'dist', 'money', 'ord', 'other', 'percent', 'period', 'speed', 'temp',
'volsize', 'weight']
train = load_files('fine/NUM', categories=categories, shuffle=True, random_state=42)
X, y = train.data, train.target
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
parameters = {
'vect__stop_words': ('english', stop_words, None),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
'tfidf__use_idf': (True, False),
'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.1, 0.01, 0.001, 0.0001, 0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
'clf__n_iter': (10, 50, 80),
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
t0 = time()
print 'Performing grid search...'
print 'pipeline:', [name for name, _ in pipeline.steps]
print 'parameters:'
pprint(parameters)
grid_search.fit(X, y)
print 'done in %0.3fs' % (time() - t0)
print 'Best score: %0.3f' % grid_search.best_score_
print 'Best parameters set:'
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print '\t%s: %r' % (param_name, best_parameters[param_name])
def build_model():
os.chdir('/home/gavin/PycharmProjects/question-type-classifier/corpora/')
categories = ['code', 'count', 'date', 'dist', 'money', 'ord', 'other', 'percent', 'period', 'speed', 'temp',
'volsize', 'weight']
train = load_files('fine/NUM', categories=categories, shuffle=True, random_state=42)
X, y = train.data, train.target
pipeline = Pipeline([
('vect', CountVectorizer(max_df=0.75, ngram_range=(1, 2), stop_words=None)),
('tfidf', TfidfTransformer(norm='l2', use_idf=False)),
('clf', SGDClassifier(n_iter=80, penalty='elasticnet', alpha=0.0001)),
])
X_train, X_test, y_train, y_test = train_test_split(train.data, train.target, test_size=0.25, random_state=42)
pipeline.fit(X_train, y_train)
print 'classifier score:', pipeline.score(X_test, y_test)
pipeline.fit(X, y)
filehandler = open('fine-num-classifier.p', 'wb')
pickle.dump(pipeline, filehandler)
filehandler.close()
if __name__ == '__main__':
grid_search()
#build_model()
| [
"[email protected]"
] | |
2a5efff102c4a2919f73211d409750b6f210d7c8 | 8aa9ecfe421b196589b6c9fdc0e954d02d927feb | /sphinx/source/docs/user_guide/examples/data_linked_brushing_subsets.py | 49c98cd13e1f65a5c2be6677d3b998058f107edb | [
"BSD-3-Clause"
] | permissive | hongyu9000/bokeh | b384484925c6c145e4eaf87460a3f776095e81ed | b19f2c5547024bdc288d02e73fdb65e65991df5f | refs/heads/master | 2020-09-03T15:57:31.157443 | 2019-11-04T05:25:46 | 2019-11-04T05:25:46 | 219,503,733 | 1 | 0 | BSD-3-Clause | 2019-11-04T13:06:20 | 2019-11-04T13:06:19 | null | UTF-8 | Python | false | false | 1,055 | py | from bokeh.plotting import figure, output_file, show
from bokeh.layouts import gridplot
from bokeh.models import ColumnDataSource, CDSView, BooleanFilter
output_file("linked_selection_subsets.html")
x = list(range(-20, 21))
y0 = [abs(xx) for xx in x]
y1 = [xx**2 for xx in x]
# create a column data source for the plots to share
source = ColumnDataSource(data=dict(x=x, y0=y0, y1=y1))
# create a view of the source for one plot to use
view = CDSView(source=source, filters=[BooleanFilter([True if y > 250 or y < 100 else False for y in y1])])
TOOLS = "box_select,lasso_select,hover,help"
# create a new plot and add a renderer
left = figure(tools=TOOLS, plot_width=300, plot_height=300, title=None)
left.circle('x', 'y0', size=10, hover_color="firebrick", source=source)
# create another new plot, add a renderer that uses the view of the data source
right = figure(tools=TOOLS, plot_width=300, plot_height=300, title=None)
right.circle('x', 'y1', size=10, hover_color="firebrick", source=source, view=view)
p = gridplot([[left, right]])
show(p)
| [
"[email protected]"
] | |
b9ae2e9946a3611c2159abe0a20a624591d0eeca | 01abb5fe2d6a51e8ee4330eaead043f4f9aad99d | /Repo_Files/Zips/plugin.video.streamhub/resources/lib/ssources/moviesplanet.py | 60b053928b8fa5b276249a2f1673279da57f3e1c | [] | no_license | MrAnhell/StreamHub | 01bb97bd3ae385205f3c1ac6c0c883d70dd20b9f | e70f384abf23c83001152eae87c6897f2d3aef99 | refs/heads/master | 2021-01-18T23:25:48.119585 | 2017-09-06T12:39:41 | 2017-09-06T12:39:41 | 87,110,979 | 0 | 0 | null | 2017-04-03T19:09:49 | 2017-04-03T19:09:49 | null | UTF-8 | Python | false | false | 7,208 | py | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,json,base64,time
from resources.lib.smodules import control
from resources.lib.smodules import pyaes
from resources.lib.smodules import cleantitle
from resources.lib.smodules import client
from resources.lib.smodules import directstream
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['moviesplanet.is']
self.base_link = 'http://www.moviesplanet.is'
self.search_link = '/ajax/search.php'
self.user = control.setting('moviesplanet.user')
self.password = control.setting('moviesplanet.pass')
def movie(self, imdb, title, year):
try:
if (self.user == '' or self.password == ''): raise Exception()
t = cleantitle.get(title)
h = {'X-Requested-With': 'XMLHttpRequest'}
u = urlparse.urljoin(self.base_link, self.search_link)
p = {'q': title.rsplit(':', 1)[0], 'limit': '10', 'timestamp': int(time.time() * 1000), 'verifiedCheck': ''}
p = urllib.urlencode(p)
r = client.request(u, post=p, headers=h)
r = json.loads(r)
r = [i for i in r if i['meta'].strip().split()[0].lower() == 'movie']
r = [i['permalink'] for i in r if t == cleantitle.get(i['title'])][:2]
r = [(i, urlparse.urljoin(self.base_link, i)) for i in r]
r = [(i[0], client.request(i[1])) for i in r]
r = [(i[0], i[1]) for i in r if not i[1] == None]
r = [(i[0], re.sub('\s|<.+?>|</.+?>', '', i[1])) for i in r]
r = [(i[0], re.findall('eleased:(\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0]) for i in r if i[1]]
r = [i for i in r if year in i[1]]
r = r[0][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
if (self.user == '' or self.password == ''): raise Exception()
t = cleantitle.get(tvshowtitle)
h = {'X-Requested-With': 'XMLHttpRequest'}
u = urlparse.urljoin(self.base_link, self.search_link)
p = {'q': tvshowtitle.rsplit(':', 1)[0], 'limit': '10', 'timestamp': int(time.time() * 1000), 'verifiedCheck': ''}
p = urllib.urlencode(p)
r = client.request(u, post=p, headers=h)
r = json.loads(r)
r = [i for i in r if i['meta'].strip().split()[0].lower() == 'tv']
r = [i['permalink'] for i in r if t == cleantitle.get(i['title'])][:2]
r = [(i, urlparse.urljoin(self.base_link, i)) for i in r]
r = [(i[0], client.request(i[1])) for i in r]
r = [(i[0], i[1]) for i in r if not i[1] == None]
r = [(i[0], re.sub('\s|<.+?>|</.+?>', '', i[1])) for i in r]
r = [(i[0], re.findall('eleased:(\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0]) for i in r if i[1]]
r = [i for i in r if year in i[1]]
r = r[0][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if (self.user == '' or self.password == ''): raise Exception()
if url == None: return
url = '%s/season/%01d/episode/%01d' % (url, int(season), int(episode))
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def _gkdecrypt(self, key, str):
try:
key += (24 - len(key)) * '\0'
decrypter = pyaes.Decrypter(pyaes.AESModeOfOperationECB(key))
str = decrypter.feed(str.decode('hex')) + decrypter.feed()
str = str.split('\0', 1)[0]
return str
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if (self.user == '' or self.password == ''): raise Exception()
headers = {'X-Requested-With': 'XMLHttpRequest'}
login = urlparse.urljoin(self.base_link, '/login')
post = {'username': self.user, 'password': self.password, 'action': 'login'}
post = urllib.urlencode(post)
cookie = client.request(login, post=post, headers=headers, output='cookie')
url = urlparse.urljoin(self.base_link, url)
result = client.request(url, cookie=cookie)
url = re.findall("embeds\[\d+\]\s*=\s*'([^']+)", result)[0]
url = client.parseDOM(url, 'iframe', ret='src')[0]
url = url.replace('https://', 'http://')
links = []
try:
dec = re.findall('mplanet\*(.+)', url)[0]
dec = dec.rsplit('&')[0]
dec = self._gkdecrypt(base64.b64decode('MllVcmlZQmhTM2swYU9BY0lmTzQ='), dec)
dec = directstream.google(dec)
links += [(i['url'], i['quality'], 'gvideo') for i in dec]
except:
pass
result = client.request(url)
try:
url = re.findall('src\s*=\s*(?:\'|\")(http.+?)(?:\'|\")', result)
for i in url:
try: links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i})
except: pass
except:
pass
try:
url = client.parseDOM(result, 'source', ret='src')
url += re.findall('src\s*:\s*\'(.*?)\'', result)
url = [i for i in url if '://' in i]
links.append({'source': 'cdn', 'quality': 'HD', 'url': url[0]})
except:
pass
for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': True, 'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
try:
url = client.request(url, output='geturl')
return url
except:
return
| [
"[email protected]"
] | |
741af7c3e798bb5ec67f95ad3116c1635c8b4701 | 5b8aaa4f70d6710b81033aaa300563ce23ae114d | /OpenNMT-py/onmt/inputters/text_dataset.py | 4d867f309a88e9534225534c1d672d63b532d725 | [
"MIT"
] | permissive | takatomo-k/Code | 2cbfe9481abc026e3664a16421b9f12c9b550b51 | 54ec9f656579610c86acb3276bf0d26dbe9d8761 | refs/heads/master | 2020-04-04T16:00:15.612724 | 2018-11-27T11:32:59 | 2018-11-27T11:32:59 | 156,060,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,732 | py | # -*- coding: utf-8 -*-
"""Define word-based embedders."""
from collections import Counter
from itertools import chain
import io
import codecs
import sys
import torch
import torchtext
from onmt.inputters.dataset_base import (DatasetBase, UNK_WORD,
PAD_WORD, BOS_WORD, EOS_WORD)
from onmt.utils.misc import aeq
class TextDataset(DatasetBase):
""" Dataset for data_type=='text'
Build `Example` objects, `Field` objects, and filter_pred function
from text corpus.
Args:
fields (dict): a dictionary of `torchtext.data.Field`.
Keys are like 'src', 'tgt', 'src_map', and 'alignment'.
src_examples_iter (dict iter): preprocessed source example
dictionary iterator.
tgt_examples_iter (dict iter): preprocessed target example
dictionary iterator.
num_src_feats (int): number of source side features.
num_tgt_feats (int): number of target side features.
src_seq_length (int): maximum source sequence length.
tgt_seq_length (int): maximum target sequence length.
dynamic_dict (bool): create dynamic dictionaries?
use_filter_pred (bool): use a custom filter predicate to filter
out examples?
"""
def __init__(self, fields, src_examples_iter, tgt_examples_iter,
num_src_feats=0, num_tgt_feats=0,
src_seq_length=0, tgt_seq_length=0,
dynamic_dict=True, use_filter_pred=True):
self.data_type = 'text'
# self.src_vocabs: mutated in dynamic_dict, used in
# collapse_copy_scores and in Translator.py
self.src_vocabs = []
self.n_src_feats = num_src_feats
self.n_tgt_feats = num_tgt_feats
# Each element of an example is a dictionary whose keys represents
# at minimum the src tokens and their indices and potentially also
# the src and tgt features and alignment information.
if tgt_examples_iter is not None:
examples_iter = (self._join_dicts(src, tgt) for src, tgt in
zip(src_examples_iter, tgt_examples_iter))
else:
examples_iter = src_examples_iter
if dynamic_dict:
examples_iter = self._dynamic_dict(examples_iter)
# Peek at the first to see which fields are used.
ex, examples_iter = self._peek(examples_iter)
keys = ex.keys()
out_fields = [(k, fields[k]) if k in fields else (k, None)
for k in keys]
example_values = ([ex[k] for k in keys] for ex in examples_iter)
# If out_examples is a generator, we need to save the filter_pred
# function in serialization too, which would cause a problem when
# `torch.save()`. Thus we materialize it as a list.
src_size = 0
out_examples = []
for ex_values in example_values:
example = self._construct_example_fromlist(
ex_values, out_fields)
src_size += len(example.src)
out_examples.append(example)
def filter_pred(example):
""" ? """
return 0 < len(example.src) <= src_seq_length \
and 0 < len(example.tgt) <= tgt_seq_length
filter_pred = filter_pred if use_filter_pred else lambda x: True
super(TextDataset, self).__init__(
out_examples, out_fields, filter_pred
)
def sort_key(self, ex):
""" Sort using length of source sentences. """
# Default to a balanced sort, prioritizing tgt len match.
# TODO: make this configurable.
if hasattr(ex, "tgt"):
return len(ex.src), len(ex.tgt)
return len(ex.src)
@staticmethod
def collapse_copy_scores(scores, batch, tgt_vocab, src_vocabs):
"""
Given scores from an expanded dictionary
corresponeding to a batch, sums together copies,
with a dictionary word when it is ambigious.
"""
offset = len(tgt_vocab)
for b in range(batch.batch_size):
blank = []
fill = []
index = batch.indices.data[b]
src_vocab = src_vocabs[index]
for i in range(1, len(src_vocab)):
sw = src_vocab.itos[i]
ti = tgt_vocab.stoi[sw]
if ti != 0:
blank.append(offset + i)
fill.append(ti)
if blank:
blank = torch.Tensor(blank).type_as(batch.indices.data)
fill = torch.Tensor(fill).type_as(batch.indices.data)
scores[:, b].index_add_(1, fill,
scores[:, b].index_select(1, blank))
scores[:, b].index_fill_(1, blank, 1e-10)
return scores
@staticmethod
def make_text_examples_nfeats_tpl(text_iter, text_path, truncate, side):
"""
Args:
text_iter(iterator): an iterator (or None) that we can loop over
to read examples.
It may be an openned file, a string list etc...
text_path(str): path to file or None
path (str): location of a src or tgt file.
truncate (int): maximum sequence length (0 for unlimited).
side (str): "src" or "tgt".
Returns:
(example_dict iterator, num_feats) tuple.
"""
assert side in ['src', 'tgt']
if text_iter is None:
if text_path is not None:
text_iter = TextDataset.make_text_iterator_from_file(text_path)
else:
return (None, 0)
# All examples have same number of features, so we peek first one
# to get the num_feats.
examples_nfeats_iter = \
TextDataset.make_examples(text_iter, truncate, side)
#import pdb; pdb.set_trace()
first_ex = next(examples_nfeats_iter)
num_feats = first_ex[1]
# Chain back the first element - we only want to peek it.
examples_nfeats_iter = chain([first_ex], examples_nfeats_iter)
examples_iter = (ex for ex, nfeats in examples_nfeats_iter)
return (examples_iter, num_feats)
@staticmethod
def make_examples(text_iter, truncate, side):
"""
Args:
text_iter (iterator): iterator of text sequences
truncate (int): maximum sequence length (0 for unlimited).
side (str): "src" or "tgt".
Yields:
(word, features, nfeat) triples for each line.
"""
for i, line in enumerate(text_iter):
line = line.strip().split()
if truncate:
line = line[:truncate]
words, feats, n_feats = \
TextDataset.extract_text_features(line)
example_dict = {side: words, "indices": i}
if feats:
prefix = side + "_feat_"
example_dict.update((prefix + str(j), f)
for j, f in enumerate(feats))
yield example_dict, n_feats
@staticmethod
def make_text_iterator_from_file(path):
with codecs.open(path, "r", "utf-8") as corpus_file:
for line in corpus_file:
yield line
@staticmethod
def get_fields(n_src_features, n_tgt_features):
"""
Args:
n_src_features (int): the number of source features to
create `torchtext.data.Field` for.
n_tgt_features (int): the number of target features to
create `torchtext.data.Field` for.
Returns:
A dictionary whose keys are strings and whose values
are the corresponding Field objects.
"""
fields = {}
fields["src"] = torchtext.data.Field(
eos_token=EOS_WORD,
pad_token=PAD_WORD,
include_lengths=True)
for j in range(n_src_features):
fields["src_feat_" + str(j)] = \
torchtext.data.Field(eos_token=EOS_WORD,pad_token=PAD_WORD)
fields["tgt"] = torchtext.data.Field(
init_token=BOS_WORD, eos_token=EOS_WORD,
pad_token=PAD_WORD)
for j in range(n_tgt_features):
fields["tgt_feat_" + str(j)] = \
torchtext.data.Field(init_token=BOS_WORD, eos_token=EOS_WORD,
pad_token=PAD_WORD)
def make_src(data, vocab):
""" ? """
src_size = max([t.size(0) for t in data])
src_vocab_size = max([t.max() for t in data]) + 1
alignment = torch.zeros(src_size, len(data), src_vocab_size)
for i, sent in enumerate(data):
for j, t in enumerate(sent):
alignment[j, i, t] = 1
return alignment
fields["src_map"] = torchtext.data.Field(
use_vocab=False, dtype=torch.float,
postprocessing=make_src, sequential=False)
def make_tgt(data, vocab):
""" ? """
tgt_size = max([t.size(0) for t in data])
alignment = torch.zeros(tgt_size, len(data)).long()
for i, sent in enumerate(data):
alignment[:sent.size(0), i] = sent
return alignment
fields["alignment"] = torchtext.data.Field(
use_vocab=False, dtype=torch.long,
postprocessing=make_tgt, sequential=False)
fields["indices"] = torchtext.data.Field(
use_vocab=False, dtype=torch.long,
sequential=False)
return fields
@staticmethod
def get_num_features(corpus_file, side):
"""
Peek one line and get number of features of it.
(All lines must have same number of features).
For text corpus, both sides are in text form, thus
it works the same.
Args:
corpus_file (str): file path to get the features.
side (str): 'src' or 'tgt'.
Returns:
number of features on `side`.
"""
with codecs.open(corpus_file, "r", "utf-8") as cf:
f_line = cf.readline().strip().split()
_, _, num_feats = TextDataset.extract_text_features(f_line)
return num_feats
# Below are helper functions for intra-class use only.
def _dynamic_dict(self, examples_iter):
for example in examples_iter:
src = example["src"]
src_vocab = torchtext.vocab.Vocab(Counter(src),
specials=[UNK_WORD, PAD_WORD])
self.src_vocabs.append(src_vocab)
# Mapping source tokens to indices in the dynamic dict.
src_map = torch.LongTensor([src_vocab.stoi[w] for w in src])
example["src_map"] = src_map
if "tgt" in example:
tgt = example["tgt"]
mask = torch.LongTensor(
[0] + [src_vocab.stoi[w] for w in tgt] + [0])
example["alignment"] = mask
yield example
class ShardedTextCorpusIterator(object):
"""
This is the iterator for text corpus, used for sharding large text
corpus into small shards, to avoid hogging memory.
Inside this iterator, it automatically divides the corpus file into
shards of size `shard_size`. Then, for each shard, it processes
into (example_dict, n_features) tuples when iterates.
"""
def __init__(self, corpus_path, line_truncate, side, shard_size,
assoc_iter=None):
"""
Args:
corpus_path: the corpus file path.
line_truncate: the maximum length of a line to read.
0 for unlimited.
side: "src" or "tgt".
shard_size: the shard size, 0 means not sharding the file.
assoc_iter: if not None, it is the associate iterator that
this iterator should align its step with.
"""
try:
# The codecs module seems to have bugs with seek()/tell(),
# so we use io.open().
self.corpus = io.open(corpus_path, "r", encoding="utf-8")
except IOError:
sys.stderr.write("Failed to open corpus file: %s" % corpus_path)
sys.exit(1)
self.line_truncate = line_truncate
self.side = side
self.shard_size = shard_size
self.assoc_iter = assoc_iter
self.last_pos = 0
self.line_index = -1
self.eof = False
def __iter__(self):
"""
Iterator of (example_dict, nfeats).
On each call, it iterates over as many (example_dict, nfeats) tuples
until this shard's size equals to or approximates `self.shard_size`.
"""
iteration_index = -1
if self.assoc_iter is not None:
# We have associate iterator, just yields tuples
# util we run parallel with it.
while self.line_index < self.assoc_iter.line_index:
line = self.corpus.readline()
if line == '':
raise AssertionError(
"Two corpuses must have same number of lines!")
self.line_index += 1
iteration_index += 1
yield self._example_dict_iter(line, iteration_index)
if self.assoc_iter.eof:
self.eof = True
self.corpus.close()
else:
# Yield tuples util this shard's size reaches the threshold.
self.corpus.seek(self.last_pos)
while True:
if self.shard_size != 0 and self.line_index % 64 == 0:
# This part of check is time consuming on Py2 (but
# it is quite fast on Py3, weird!). So we don't bother
# to check for very line. Instead we chekc every 64
# lines. Thus we are not dividing exactly per
# `shard_size`, but it is not too much difference.
cur_pos = self.corpus.tell()
if cur_pos >= self.last_pos + self.shard_size:
self.last_pos = cur_pos
return
line = self.corpus.readline()
if line == '':
self.eof = True
self.corpus.close()
return
self.line_index += 1
iteration_index += 1
yield self._example_dict_iter(line, iteration_index)
def hit_end(self):
""" ? """
return self.eof
@property
def num_feats(self):
"""
We peek the first line and seek back to
the beginning of the file.
"""
saved_pos = self.corpus.tell()
line = self.corpus.readline().split()
if self.line_truncate:
line = line[:self.line_truncate]
_, _, self.n_feats = TextDataset.extract_text_features(line)
self.corpus.seek(saved_pos)
return self.n_feats
def _example_dict_iter(self, line, index):
line = line.split()
if self.line_truncate:
line = line[:self.line_truncate]
words, feats, n_feats = TextDataset.extract_text_features(line)
example_dict = {self.side: words, "indices": index}
if feats:
# All examples must have same number of features.
aeq(self.n_feats, n_feats)
prefix = self.side + "_feat_"
example_dict.update((prefix + str(j), f)
for j, f in enumerate(feats))
return example_dict
| [
"[email protected]"
] | |
a56e1a4d979e7c04e5529f06b36b64e8bd597195 | eade1861db1968645e0e17dfaa5250a4b8245b98 | /instacart/lgb.py | d145e050723dd158e83d255a6eeb91d52741768a | [] | no_license | piupiuup/competition | 5b5da56fed336e07cf99cef8f5bfe89a8f771900 | 076c30df3d2647cb3580c543e604375e84590ca7 | refs/heads/master | 2022-09-30T14:47:01.244084 | 2020-05-30T12:56:02 | 2020-05-30T12:56:02 | 268,074,180 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39,569 | py | import os
import numpy as np
import pandas as pd
import lightgbm as lgb
import scipy.stats as scs
IDIR = r'C:\Users\csw\Desktop\python\instacart\data\\'
def f1(y_true,y_pred):
if (type(y_true) == float) or (len(y_true)==0):
if (type(y_pred) == float) or (len(y_pred)==0):
return 1
else:
y_true = []
if type(y_pred) == float:
y_pred = []
TP = len(set(y_true) & set(y_pred)) #预测为a类且正确的数量
MP = len(y_true) #a类实际的数量
MN = len(y_pred) #预测为a类的数量
return 2*TP/(MP+MN)
def instacart_grade(y_true,y_pred):
return np.mean([f1(x, y) for x, y in zip(y_true['products'].values, y_pred['products'].values)])
# 第一种按照阈值获取结果
def get_result(data):
result = data.groupby('order_id',as_index=False)['product_id'].agg({'products':lambda x:list(x)})
return result
# 第二种按照最佳阀值获取结果
def get_result2(data):
'''
:param data: pd.DataFrame格式 包含['order_id','product_id','pred']
:return: 返回 pd.DataFrame 格式结果 ['order_id','products']
'''
# 寻找最佳阀值
def get_max_exp(pred_list, n_product):
f1_temp = 0 # 期望f1
TP = 0 # 期望正确个数
exp = 1
for pred in pred_list:
exp = exp * (1-pred)
for pred in pred_list:
n_product += 1
TP += pred
f1 = TP/n_product
if f1 < f1_temp:
if exp > f1_temp:
return 1.01
else:
return pred
else:
f1_temp = f1
return 0
user_n_product = data.groupby('order_id')['pred'].sum()
user_n_product = dict(user_n_product)
temp = data.copy()
temp.sort_values('pred',ascending=False,inplace=True)
grouped = temp.groupby('order_id')
result = {}
for order_id, grouped in grouped:
TRESHOLD = get_max_exp(grouped['pred'].values,user_n_product[order_id]) #输入概率备选商品的购买概率,获取最佳阀值
result[order_id] = list(grouped['product_id'].values[grouped['pred'].values>TRESHOLD]) # 根据阀值选择商品
result = pd.Series(result).to_frame()
result.reset_index(inplace=True)
result.columns = ['order_id','products']
return result
# 将list转换为str
def list_to_str(arr):
if (type(arr) != list) or (len(arr) == 0):
return 'None'
else:
s = str(arr[0])
for i in range(len(arr)-1):
s += ' ' + str(arr[i+1])
return s
# 基尼系数
def gini(arr):
arr = list(arr)
arr = sorted(arr)
for i in reversed(range(len(arr))):
arr[i] = sum(arr[:(i + 1)])
gini = 1+1/len(arr)-2*sum(arr)/arr[-1]/len(arr)
return gini
# 计算偏度
def skew(arr):
return scs.skew(arr)
# 分组排序
def rank(data, feat_arr, feat2, ascending=True, name='rank'):
data.sort_values(feat_arr+[feat2],inplace=True,ascending=ascending)
data[name] = range(data.shape[0])
min_rank = data.groupby(feat_arr,as_index=False)[name].agg({'min_rank':'min'})
data = pd.merge(data,min_rank,on=feat_arr,how='left')
data[name] = data[name] - data['min_rank']
del data['min_rank']
return data
# 读取order
def get_user_order():
df_path = r'F:\cache\instacart_cache\user_order.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
df = pd.read_csv(IDIR + 'orders.csv')
df.sort_values(['user_id', 'order_number'], ascending=False, inplace=True)
dates = [0]
date = 0
for i in df['days_since_prior_order'].values:
date += i
if np.isnan(date):
date = 0
dates.append(date)
df['date'] = dates[:-1]
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 读取prior
def get_prior():
df_path = r'F:\cache\instacart_cache\prior.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
df = pd.read_csv(IDIR + 'order_products__prior.csv')
user_order = get_user_order()
df = pd.merge(df,user_order,on='order_id',how='left')
df.sort_values(['user_id','product_id','order_number'],ascending=True,inplace=True)
product = get_product()
df = pd.merge(df,product[['product_id','aisle_id','department_id']])
del df['eval_set']
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 读取train
def get_train():
df_path = r'F:\cache\instacart_cache\train.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
df = pd.read_csv(IDIR + 'order_products__train.csv')
user_order = get_user_order()
df = pd.merge(df, user_order, on='order_id', how='left')
df['label'] = 1
del df['eval_set']
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 读取product
def get_product():
df_path = r'F:\cache\instacart_cache\product.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
df = pd.read_csv(IDIR + 'products.csv')
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 构造样本集
def get_candicate(prior,user_order):
df_path = r'F:\cache\instacart_cache\candicate.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
user_order_temp = user_order[user_order['eval_set'] != 'prior']
df = pd.merge(user_order_temp[['user_id','order_id']],
prior[['user_id','product_id']], on='user_id', how='left')
df = df.drop_duplicates(['user_id', 'product_id'])
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 用户活跃天数
def get_user_feat(prior,user_order):
df_path = r'F:\cache\instacart_cache\user_feat.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
user_order_temp = user_order[user_order['eval_set'] == 'prior']
df = user_order_temp.groupby('user_id')['order_id'].agg({'user_n_order':'count'}) # 用户购买次数
df['user_n_day'] = user_order_temp.groupby('user_id')['days_since_prior_order'].sum() # 用户购买时间跨度
df['user_n_item'] = prior.groupby('user_id')['product_id'].count() # 用户购买商品总个数
df['user_n_product'] = prior.groupby('user_id')['product_id'].nunique() # 用户购买商品种类数
df['user_avg_day_per_order'] = df['user_n_day'] / (df['user_n_order']-1) # 用户平均每隔多少天购买一次
df['user_avg_item_per_order'] = df['user_n_item'] / df['user_n_order'] # 用户平均每次购买多少个
df['user_avg_item_per_day'] = df['user_avg_item_per_order'] / (df['user_avg_day_per_order']+0.01) # 用户平均每天购买都少个
# 用户平均每次购买的新增商品
temp = prior[~prior['days_since_prior_order'].isnull()]
df['user_n_new_product'] = temp[temp['reordered']==0].groupby('user_id')['reordered'].count()# 用户购买新增商品个数
df['user_avg_new_per_order'] = df['user_n_new_product'] / (df['user_n_order']-1) # 用户平均每次购买多少个
user_product_n_item = get_user_product_avg_day_per_item(prior)
df['user_avg_order_per_product'] = user_product_n_item.groupby('user_id')['user_product_n_item'].mean()
df['user_avg_order_per_product'] = df['user_avg_order_per_product']/df['user_n_order']
df['user_percent_of_new'] = df['user_avg_new_per_order']/df['user_avg_item_per_order']
del temp,df['user_n_new_product']
df.reset_index(inplace=True)
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 用户基础特征
def get_user_feat(prior,user_order):
df_path = r'F:\cache\instacart_cache\user_feat.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
user_order_temp = user_order[user_order['eval_set'] == 'prior']
df = user_order_temp.groupby('user_id')['order_id'].agg({'user_n_order':'count'}) # 用户购买次数
df['user_n_day'] = user_order_temp.groupby('user_id')['days_since_prior_order'].sum() # 用户购买时间跨度
df['user_n_item'] = prior.groupby('user_id')['product_id'].count() # 用户购买商品总个数
df['user_n_product'] = prior.groupby('user_id')['product_id'].nunique() # 用户购买商品种类数
df['user_avg_day_per_order'] = df['user_n_day'] / (df['user_n_order']-1) # 用户平均每隔多少天购买一次
df['user_avg_item_per_order'] = df['user_n_item'] / df['user_n_order'] # 用户平均每次购买多少个
df['user_avg_item_per_day'] = df['user_avg_item_per_order'] / (df['user_avg_day_per_order']+0.01) # 用户平均每天购买都少个
# 用户平均每次购买的新增商品
temp = prior[~prior['days_since_prior_order'].isnull()]
df['user_n_new_product'] = temp[temp['reordered']==0].groupby('user_id')['reordered'].count()# 用户购买新增商品个数
df['user_avg_new_per_order'] = df['user_n_new_product'] / (df['user_n_order']-1) # 用户平均每次购买多少个
user_product_n_item = get_user_product_avg_day_per_item(prior)
df['user_avg_order_per_product'] = user_product_n_item.groupby('user_id')['user_product_n_item'].mean()
df['user_avg_order_per_product'] = df['user_avg_order_per_product']/df['user_n_order']
df['user_percent_of_new'] = df['user_avg_new_per_order']/df['user_avg_item_per_order']
del temp,df['user_n_new_product']
df.reset_index(inplace=True)
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 商品基础特征
def get_product_feat(prior):
df_path = r'F:\cache\instacart_cache\product_feat.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
df = prior.groupby('product_id')['product_id'].agg({'product_item_count':'count'}) # 这个产品被所有人购买过多少次
df['product_n_user'] = prior.groupby('product_id')['user_id'].nunique() # 这个产品被多少分购买过
df['product_avg_item_per_user'] = df['product_item_count'] / df['product_n_user'] # 平均每人购买多少次
temp = prior.groupby(['product_id', 'user_id'], as_index=False)['order_dow'].count()
df['product_std_pre_user'] = temp.groupby('product_id')['order_dow'].std() # 每个人购买次数的方差
df['product_skew_pre_user'] = temp.groupby('product_id')['order_dow'].agg({'product_skew_pre_user':skew})# 每个人购买次数的偏度指数
df.reset_index(inplace=True)
product = get_product()
df = pd.merge(df,product[['product_id', 'aisle_id', 'department_id']],on='product_id',how='left')
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# aisle基础特征
def get_aisle_feat(prior):
df_path = r'F:\cache\instacart_cache\aisle_feat.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
df = prior.groupby('aisle_id')['aisle_id'].agg({'aisle_item_count':'count'}) # 这个aisle被所有人购买过多少次
df['aisle_n_user'] = prior.groupby('aisle_id')['user_id'].nunique() # 这个aisle被多少分购买过
df['aisle_avg_item_per_user'] = df['aisle_item_count'] / df['aisle_n_user'] # 平均每人购买多少次
temp = prior.groupby(['aisle_id', 'user_id'], as_index=False)['aisle-id'].agg({'aisle_user_n_item':'count'})
df['aisle_std_pre_user'] = temp.groupby('aisle_id')['aisle_user_n_item'].std() # 每个人购买次数的方差
df['aisle_skew_pre_user'] = temp.groupby('aisle_id')['aisle_user_n_item'].agg({'aisle_skew_pre_user':skew})# 每个人购买次数的偏度指数
df.reset_index(inplace=True)
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 平均多少天购买一次
def get_product_mdn_per_day(prior):
df_path = r'F:\cache\instacart_cache\product_mdn_per_day.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
user_product_avg_day_per_item = get_user_product_avg_day_per_item(prior)
df = user_product_avg_day_per_item.groupby('product_id',as_index=False)[
'user_product_avg_day_per_item'].agg({'product_mdn_per_day':'median'})
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 产品平均每次被购买的概率
def get_product_mdn_per_order(prior):
df_path = r'F:\cache\instacart_cache\product_mdn_per_order.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
user_product_expectation_per_order = get_user_product_expectation_per_order(prior)
df = user_product_expectation_per_order.groupby('product_id', as_index=False)[
'user_product_expectation_per_order1'].agg({'product_mdn_per_order': 'median'})
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 商品被重复购买的几率
def get_product_percent_less_than_2(prior):
df_path = r'F:\cache\instacart_cache\product_percent_less_than_2.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
user_product_n_item = prior.groupby(['user_id', 'product_id'], as_index=False)['user_id'].agg(
{'user_product_n_item': 'count'})
user_product_n_item['less than 2'] = (user_product_n_item['user_product_n_item'] < 2).astype(np.int32)
product_percent_less_than_2 = user_product_n_item.groupby('product_id')[
'less than 2'].sum() / user_product_n_item.groupby('product_id').size()
df = pd.DataFrame(product_percent_less_than_2).reset_index() # 有多少人购买了一次就不再购买了
df.columns = ['product_id','product_percent_less_than_2']
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 所有产品的order中位数
def get_product_avg_order(prior) :
df_path = r'F:\cache\instacart_cache\product_avg_order.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
user_product_avg_order = get_user_product_avg_order(prior)
df = user_product_avg_order.groupby('product_id',as_index=False)[
'user_product_avg_order'].agg({'product_avg_order':'median'})
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 商品第一次购买次数/商品购买的总次数
def get_product_precent_reorder(prior):
df_path = r'F:\cache\instacart_cache\product_precent_reorder.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
df = prior.groupby('product_id')['user_id'].agg({'product_n_user': 'nunique'})
df['product_n_item'] = prior.groupby('product_id')['user_id'].count()
df['product_precent_reorder'] = df['product_n_user']/df['product_n_item']
df.reset_index(inplace=True)
df = df[['product_id','product_precent_reorder']]
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 商品最近一次购买占全部购买的比例
def get_product_precent_last(prior):
df_path = r'F:\cache\instacart_cache\product_precent_last.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
temp = prior.groupby('user_id',as_index=False)['order_number'].max()
temp = pd.merge(temp,prior,on=['user_id','order_number'],how='left')
df = prior.groupby('product_id')['product_id'].agg({'product_n_item':'count'})
df['product_last_n_item'] = temp.groupby('product_id')['product_id'].count()
df = df.reset_index().fillna(0)
df['product_precent_last'] = df['product_last_n_item']/df['product_n_item']
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 商品重复购买率
def get_product_rebuy_rate(prior):
df_path = r'F:\cache\instacart_cache\product_rebuy_rate.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
temp = prior.copy()
temp['user_product_rank'] = temp.groupby(['user_id', 'product_id']).cumcount() + 1
temp['buy'] = temp['user_product_rank'].apply(lambda x: x * (x + 1) / 2 - 1)
temp['rebuy'] = temp['user_product_rank'].apply(lambda x: x * (x - 1) / 2)
df = temp.groupby('product_id').agg({'buy': {'product_sum_of_buy': 'sum'},
'rebuy': {'product_sum_of_rebuy': 'sum'}}).fillna(0)
df.columns = df.columns.droplevel(0)
df.reset_index(inplace=True)
df['product_rebuy_rate'] = df['product_sum_of_rebuy'] / df['product_sum_of_buy']
df = df[['product_id', 'product_rebuy_rate']]
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 行为基础特征
def get_action_feat(user_order):
df_path = r'F:\cache\instacart_cache\action.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
df = user_order[user_order['eval_set'] != 'prior'][[
'order_id', 'order_number', 'order_dow', 'order_hour_of_day', 'days_since_prior_order']]
df.rename(columns={'order_number':'user_n_order','days_since_prior_order':'user_last_days'},inplace=True)
#次数, 周几,时间,距离上次天数
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 此用户购买此商品多少次
def get_user_product_n_item(prior):
df_path = r'F:\cache\instacart_cache\user_product_n_item.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
df = prior.groupby(['user_id','product_id'],as_index=False)[
'user_id'].agg({'user_product_n_item':'count'})
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 此用户平均多少天购买一次此商品
def get_user_product_avg_day_per_item(prior):
df_path = r'F:\cache\instacart_cache\user_product_avg_day_per_item.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
temp = prior.copy()
temp.sort_values('date',ascending=True,inplace=True)
user_product_max_date = temp.drop_duplicates(['user_id','product_id'],keep='last')[['user_id','product_id','date']]
user_product_n_item = prior.groupby(['user_id','product_id'],as_index=False)['user_id'].agg({'user_product_n_item':'count'})
df = pd.merge(user_product_max_date,user_product_n_item,on=['user_id','product_id'],how='left')
df['user_product_avg_day_per_item'] = df['date']/(df['user_product_n_item']-1+0.01)
df = df[['user_id','product_id','user_product_n_item','user_product_avg_day_per_item']]
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 产品平均每次被购买的概率
def get_user_product_expectation_per_order(prior):
df_path = r'F:\cache\instacart_cache\user_user_product_expectation_per_order.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
temp = prior.copy()
temp.sort_values('order_number', inplace=True)
user_product_min_order = temp.drop_duplicates(['user_id', 'product_id'], keep='first')[
['user_id', 'product_id', 'order_number']]
user_product_max_order = temp.groupby(['user_id', 'product_id'],as_index=False)[
'order_number'].agg({'user_product_max_order':'max'})
df = pd.merge(user_product_min_order,user_product_max_order,on=['user_id', 'product_id'],how='left')
df['user_product_n_order'] = df['user_product_max_order'] - df['order_number']
user_product_n_item = prior.groupby(['user_id', 'product_id'], as_index=False)['user_id'].agg(
{'user_product_n_item': 'count'})
df = pd.merge(df,user_product_n_item,on=['user_id', 'product_id'],how='left')
df['user_product_expectation_per_order1'] = (df['user_product_n_item'] - 0.5) / (
df['user_product_n_order'] + 0.01)
df['user_product_expectation_per_order2'] = (df['user_product_n_item'] - 0.5) / (
df['user_product_max_order'] + 0.01)
df = df[['user_id', 'product_id', 'user_product_expectation_per_order1',
'user_product_expectation_per_order2']]
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 购买的平均order_number
def get_user_product_avg_order(prior):
df_path = r'F:\cache\instacart_cache\user_product_avg_order.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
df = prior.groupby(['user_id','product_id'],as_index=False)[
'order_number'].agg({'user_product_avg_order':'mean'})
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 本次购买距离上一次购买时间
def get_user_product_last_time(prior):
df_path = r'F:\cache\instacart_cache\user_product_last_time.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
temp = prior.copy()
user_order = get_user_order()
temp.sort_values('date', ascending=True, inplace=True)
user_product_min_date = temp.drop_duplicates(['user_id', 'product_id'], keep='first')[
['user_id', 'product_id', 'order_number', 'date']]
user_product_min_date.rename(columns={'order_number':'user_product_last_order'},inplace=True)
df = pd.merge(user_product_min_date, user_order[user_order['eval_set']!='prior'], on='user_id', how='left')
df['user_product_last_time'] = df['date'] + df['days_since_prior_order']
user_product_avg_day_per_item = get_user_product_avg_day_per_item(prior)
product_mdn_per_day = get_product_mdn_per_day(prior)
df = pd.merge(df, user_product_avg_day_per_item, on=['user_id','product_id'],how='left')
df = pd.merge(df, product_mdn_per_day, on='product_id', how='left')
df['expectation_of_day_product'] = df['user_product_last_time'] / (df['product_mdn_per_day']+0.01)
df['expectation_of_day_user_product'] = df['user_product_last_time'] / (df['user_product_avg_day_per_item']+0.01)
df = df[['user_id', 'product_id', 'user_product_last_time','user_product_last_order',
'expectation_of_day_product','expectation_of_day_user_product']]
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 本次购买距离第一次购买时间
def get_user_product_first_time(prior):
df_path = r'F:\cache\instacart_cache\user_product_first_time.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
temp = prior.copy()
user_order = get_user_order()
temp.sort_values('date', ascending=True, inplace=True)
user_product_max_date = temp.drop_duplicates(['user_id', 'product_id'], keep='last')[
['user_id', 'product_id', 'order_number', 'date']]
user_product_max_date.rename(columns={'order_number':'user_product_first_order'},inplace=True)
df = pd.merge(user_product_max_date, user_order[user_order['eval_set']!='prior'], on='user_id', how='left')
df['user_product_first_time'] = df['date'] + df['days_since_prior_order']
df = df[['user_id', 'product_id', 'user_product_first_order','user_product_first_time']]
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 整体商品在一天内的分布:
def get_all_product_hour(prior):
df_path = r'F:\cache\instacart_cache\all_product_hour.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
df = prior.groupby(['order_hour_of_day'],as_index=False)['user_id'].agg({'all_product_hour':'count'})
df['all_product_hour'] = df['all_product_hour']/(df['all_product_hour'].sum())
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 整体商品在一周内的分布:
def get_all_product_week(prior):
df_path = r'F:\cache\instacart_cache\all_product_week.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
df = prior.groupby(['order_dow'], as_index=False)['user_id'].agg({'all_product_week': 'count'})
df['all_product_week'] = df['all_product_week'] / (df['all_product_week'].sum())
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 商品购买热度在一天内的分布
def get_product_hour(prior):
df_path = r'F:\cache\instacart_cache\product_hour.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
df = prior.groupby(['product_id','order_hour_of_day'],as_index=False)['user_id'].agg({'product_hour':'count'})
product_day = df.groupby('product_id',as_index=False)['product_hour'].agg({'product_day':'sum'})
df = pd.merge(df,product_day,on='product_id',how='left')
df['product_hour'] = df['product_hour']/df['product_day']
all_product_hour = get_all_product_hour(prior)
df = pd.merge(df, all_product_hour, on='order_hour_of_day', how='left')
df['product_hour'] = df['product_hour'] / df['all_product_hour']
df = df[['product_id','order_hour_of_day','product_hour']]
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 商品购买热度在一周内的分布
def get_product_week(prior):
df_path = r'F:\cache\instacart_cache\product_week.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
df = prior.groupby(['product_id','order_dow'],as_index=False)['user_id'].agg({'product_week':'count'})
product_day = df.groupby('product_id',as_index=False)['product_week'].agg({'product_all_week':'sum'})
df = pd.merge(df,product_day,on='product_id',how='left')
df['product_week'] = df['product_week']/df['product_all_week']
all_product_week = get_all_product_week(prior)
df = pd.merge(df, all_product_week, on='order_dow', how='left')
df['product_week'] = df['product_week'] / df['all_product_week']
df = df[['product_id','order_dow','product_week']]
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# aisle购买热度在一天内的分布
def get_aisle_hour(prior):
df_path = r'F:\cache\instacart_cache\aisle_hour.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
product = get_product()
temp = pd.merge(prior,product,on='product_id',how='left')
df = temp.groupby(['aisle_id','order_hour_of_day'],as_index=False)['user_id'].agg({'aisle_hour':'count'})
aisle_day = df.groupby('aisle_id',as_index=False)['aisle_hour'].agg({'aisle_day':'sum'})
df = pd.merge(df,aisle_day,on='aisle_id',how='left')
df['aisle_hour'] = df['aisle_hour']/df['aisle_day']
all_product_hour = get_all_product_hour(prior)
df = pd.merge(df, all_product_hour, on='order_hour_of_day', how='left')
df['aisle_hour'] = df['aisle_hour'] / df['all_product_hour']
df = df[['aisle_id','order_hour_of_day','aisle_hour']]
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# aisle购买热度在一周内的分布
def get_aisle_week(prior):
df_path = r'F:\cache\instacart_cache\aisle_week.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
product = get_product()
temp = pd.merge(prior, product, on='product_id', how='left')
df = temp.groupby(['aisle_id','order_dow'],as_index=False)['user_id'].agg({'aisle_week':'count'})
product_all_week = df.groupby('aisle_id',as_index=False)['aisle_week'].agg({'aisle_all_week':'sum'})
df = pd.merge(df,product_all_week,on='aisle_id',how='left')
df['aisle_week'] = df['aisle_week']/df['aisle_all_week']
all_product_week = get_all_product_week(prior)
df = pd.merge(df, all_product_week, on='order_dow', how='left')
df['aisle_week'] = df['aisle_week'] / df['all_product_week']
df = df[['aisle_id','order_dow','aisle_week']]
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
# 添加二次特征
def get_second_feat(df):
df['user_product_last_order'] = df['user_n_order'] - df['user_product_last_order']
df['user_product_first_order'] = df['user_n_order'] - df['user_product_first_order']
return df
#构建用户训练集和测试集
def make_train_set():
df_path = r'F:\cache\instacart_cache\train_set.hdf'
if os.path.exists(df_path) & 1:
df = pd.read_hdf(df_path, 'w')
else:
prior = get_prior()
train = get_train()
user_order = get_user_order()
df = get_candicate(prior,user_order) # 构造样本
action_feat = get_action_feat(user_order) # 构造行为基础特征
user_product_n_item = get_user_product_n_item(prior) # 用户购买此商品多少次
user_product_avg_day_per_item = get_user_product_avg_day_per_item(prior) # 此用户平均多少天购买一次此商品
user_product_expectation_per_order = get_user_product_expectation_per_order(prior) # 产品平均每次被购买的概率
user_product_avg_order = get_user_product_avg_order(prior) # 购买的平均order_number
user_product_last_time = get_user_product_last_time(prior) # 本次购买距离上一次购买时间
user_product_first_time = get_user_product_first_time(prior) # 本次购买距离第一次购买时间
user_n_day = get_user_n_day(user_order) # 用户活跃天数
user_feat = get_user_feat(prior,user_order) # 构造用户基础特征
product_feat = get_product_feat(prior) # 构造商品基础特征
product_mdn_per_day = get_product_mdn_per_day(prior) # 全部用户平均多少天购买一次
product_mdn_per_order = get_product_mdn_per_order(prior) # 产品平均每次被购买的概率
product_percent_less_than_2 = get_product_percent_less_than_2(prior) # 产品被用户重复购买的概率
product_avg_order = get_product_avg_order(prior) # 所有产品的order中位数
product_precent_reorder = get_product_precent_reorder(prior) # 商品第一次购买次数/商品购买的总次数
product_precent_last = get_product_precent_last(prior) # 商品最近一次购买占全部购买的比例
product_rebuy_rate = get_product_rebuy_rate(prior) # 商品重复购买率
aisle_feat = get_aisle_feat(prior) # aisle基础特征
# 本次购买距离上一次购买次数
product_hour = get_product_hour(prior) # 商品购买热度在一天内的分布
product_week = get_product_week(prior) # 商品购买热度在一周内的分布
aisle_hour = get_aisle_hour(prior) # aisle购买热度在一天内的分布
aisle_week = get_aisle_week(prior) # aisle购买热度在一周内的分布
#department_hour = get_department_hour(prior) # department购买热度在一天内的分布
#department_week = get_department_week(prior) # department购买热度在一周内的分布
print('将特征组合到一起')
df = pd.merge(df, user_feat, on='user_id', how='left')
df = pd.merge(df, action_feat, on='order_id', how='left')
df = pd.merge(df, product_feat, on='product_id', how='left')
df = pd.merge(df, product_mdn_per_day, on='product_id', how='left')
df = pd.merge(df, product_mdn_per_order, on='product_id', how='left')
df = pd.merge(df, product_percent_less_than_2, on='product_id', how='left')
df = pd.merge(df, product_avg_order, on='product_id', how='left')
df = pd.merge(df, product_precent_reorder, on='product_id', how='left')
df = pd.merge(df, product_precent_last, on='product_id', how='left')
df = pd.merge(df, product_rebuy_rate, on='product_id', how='left')
df = pd.merge(df, aisle_feat, on='aisle', how='left')
df = pd.merge(df, user_product_avg_day_per_item, on=['user_id','product_id'], how='left')
df = pd.merge(df, user_product_expectation_per_order,on=['user_id', 'product_id'], how='left')
df = pd.merge(df, user_product_avg_order, on=['user_id', 'product_id'], how='left')
df = pd.merge(df, user_product_last_time, on=['user_id', 'product_id'], how='left')
df = pd.merge(df, user_product_first_time, on=['user_id', 'product_id'], how='left')
df = pd.merge(df, product_hour, on=['product_id', 'order_hour_of_day'], how='left')
df = pd.merge(df, product_week, on=['product_id', 'order_dow'], how='left')
df = pd.merge(df, aisle_hour, on=['aisle_id', 'order_hour_of_day'], how='left')
df = pd.merge(df, aisle_week, on=['aisle_id', 'order_dow'], how='left')
df = get_second_feat(df) # 添加二次特征
print('添加label')
df = pd.merge(df, train[['user_id', 'product_id', 'label']], on=['user_id', 'product_id'], how='left')
df['label'].fillna(0, inplace=True)
df.to_hdf(df_path, 'w', complib='blosc', complevel=5)
return df
df = make_train_set()
df = df.fillna(-100)
user_order = get_user_order()
train_user_list = list(user_order[user_order['eval_set']=='train']['user_id'].unique())
test_user_list = list(user_order[user_order['eval_set']=='test']['user_id'].unique())
df_train = df[df['user_id'].isin(train_user_list)]
df_test = df[df['user_id'].isin(test_user_list)]
# 线下调参
train = df_train[:int(df_train.shape[0]*0.7)]
test = df_train[int(df_train.shape[0]*0.7):]
features = [ 'user_n_order', 'user_n_day',
'user_n_item', 'user_n_product', 'user_avg_day_per_order',
'user_avg_item_per_order', 'user_avg_item_per_day',
'user_avg_new_per_order', 'user_percent_of_new', 'product_item_count',
'product_n_user', 'product_avg_item_per_user', 'product_std_pre_user',
'product_skew_pre_user', 'aisle_id', 'department_id',
'product_mdn_per_day', 'product_mdn_per_order',
'product_percent_less_than_2', 'product_avg_order',
'product_precent_reorder', 'order_dow', 'order_hour_of_day',
'days_since_prior_order', 'user_product_n_item',
'user_product_avg_day_per_item', 'user_product_expectation_per_order1',
'user_product_expectation_per_order2',
'user_product_avg_order', 'user_product_last_time',
'user_product_last_order', 'expectation_of_day_product',
'expectation_of_day_user_product', 'user_product_first_order',
'user_product_first_time', 'product_hour', 'product_week',
'aisle_hour','aisle_week','user_avg_order_per_product',
'product_precent_last','product_rebuy_rate']
lgb_train = lgb.Dataset(train[features],train.label)
lgb_eval = lgb.Dataset(test[features],test.label, reference=lgb_train)
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'binary_logloss',
'max_depth':5,
'num_leaves': 31,
'learning_rate': 0.1,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'verbose': 0
}
print('Start training...')
# train
gbm = lgb.train(params,
lgb_train,
num_boost_round=5000,
valid_sets=lgb_eval,
verbose_eval = 10,
early_stopping_rounds=10)
preds = gbm.predict(test[features])
test['pred'] = preds
TRESHOLD = 0.175
y_true = get_result(test[test['label']==1])
y_true = pd.merge(test[['order_id']].drop_duplicates(),y_true,on='order_id',how='left')
#y_pred = get_result(test[test['pred']>TRESHOLD])
y_pred = get_result2(test)
y_pred = pd.merge(y_true[['order_id']],y_pred,on='order_id',how='left')
print('f1得分为:%f' % (instacart_grade(y_true,y_pred)))
y_true = get_result(test[test['label']==1])
order_n_product = test[test['label']==1].groupby('order_id').size()
y_pred = get_result2(test,order_n_product)
'''
# xgb参数测试
import xgboost
xgb_train = xgboost.DMatrix(train[features],train.label)
xgb_eval = xgboost.DMatrix(test[features],test.label)
xgb_params = {
"objective" : "reg:logistic"
,"eval_metric" : "logloss"
,"eta" : 0.1
,"max_depth" : 6
,"min_child_weight" :10
,"gamma" :0.70
,"subsample" :0.76
,"colsample_bytree" :0.95
,"alpha" :2e-05
,"lambda" :10
}
watchlist= [(xgb_eval, "test")]
bst = xgboost.train(params=xgb_params,
dtrain=xgb_train,
num_boost_round=5000,
evals=watchlist,
verbose_eval=10,
early_stopping_rounds=10)
'''
################### 线上提交 ###################
d_train = lgb.Dataset(df_train[features],df_train.label)
d_test = lgb.Dataset(df_test[features], reference=lgb_train)
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'binary_logloss',
'max_depth':5,
'num_leaves': 31,
'learning_rate': 0.1,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'verbose': 0
}
ROUNDS = 612
print('light GBM train :-)')
bst = lgb.train(params, d_train, ROUNDS)
print('light GBM predict')
preds = bst.predict(df_test[features])
df_test['pred'] = preds
TRESHOLD = 0.2
#y_pred = get_result(test[test['pred']>TRESHOLD])
y_pred = get_result2(df_test)
y_pred['products'] = y_pred['products'].apply(lambda x: list_to_str(x))
y_pred = pd.merge(user_order[user_order['eval_set']=='test'][['order_id']],y_pred,on='order_id',how='left')
y_pred.to_csv(r'C:\Users\csw\Desktop\python\instacart\submission\0724(1).csv', index=False)
d = dict()
for row in df_test.itertuples():
if row.pred > TRESHOLD:
try:
d[row.order_id] += ' ' + str(row.product_id)
except:
d[row.order_id] = str(row.product_id)
for order in user_order[user_order['eval_set']=='test'].order_id:
if order not in d:
d[order] = 'None'
sub = pd.DataFrame.from_dict(d, orient='index')
sub.reset_index(inplace=True)
sub.columns = ['order_id', 'products']
sub.to_csv(r'C:\Users\csw\Desktop\python\instacart\submission\0723(1).csv', index=False)
| [
"[email protected]"
] | |
7af29a7adb10691bd1d3489aa0ba317b07c05725 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03433/s436709756.py | e163d74f4c2ebdcbfc9df05eb5845a02f71786d2 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | import sys
def input(): return sys.stdin.readline().strip()
def resolve():
def main():
n=int(input())
a=int(input())
for i in range(21):
for j in range(a+1):
if 500*i+1*j==n:
return 'Yes'
return 'No'
print(main())
resolve() | [
"[email protected]"
] | |
977adbcee4f7e66c212151bd3cd2debce6e3a296 | a63d907ad63ba6705420a6fb2788196d1bd3763c | /src/datamgr/metadata/metadata/service/cli/sync.py | c2b92b897f7fd23b10f29fadae1e110bf5e28230 | [
"MIT"
] | permissive | Tencent/bk-base | a38461072811667dc2880a13a5232004fe771a4b | 6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2 | refs/heads/master | 2022-07-30T04:24:53.370661 | 2022-04-02T10:30:55 | 2022-04-02T10:30:55 | 381,257,882 | 101 | 51 | NOASSERTION | 2022-04-02T10:30:56 | 2021-06-29T06:10:01 | Python | UTF-8 | Python | false | false | 2,562 | py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
import click
from tinyrpc import RPCClient
from tinyrpc.protocols.jsonrpc import JSONRPCProtocol
from tinyrpc.transports.http import HttpPostClientTransport
from metadata.runtime import rt_context
from metadata.util.i18n import lazy_selfish as _
module_logger = logging.getLogger(__name__)
@click.command()
@click.option('--min_n', type=int, help=_('Min db operate number.'))
@click.option('--max_n', type=int, help=_('Max db operate number.'))
def replay_db_operate_log(min_n, max_n):
normal_conf = rt_context.config_collection.normal_config
rpc_client = RPCClient(
JSONRPCProtocol(),
HttpPostClientTransport(
'http://{}:{}/jsonrpc/2.0/'.format(normal_conf.ACCESS_RPC_SERVER_HOST, normal_conf.ACCESS_RPC_SERVER_PORT)
),
)
for i in range(min_n, max_n + 1):
print(i)
try:
rpc_client.call(
'bridge_sync',
[],
{"content_mode": "id", "db_operations_list": [i], "batch": False, "rpc_extra": {"language": "zh-hans"}},
)
except Exception:
module_logger.exception('Failt to replay.')
| [
"[email protected]"
] | |
711ef1d902fdb1f8db2c88e55e3da7671dffe1c3 | c974cf94626d04a83f3d5ccb25e06a99df537e21 | /python/ray/rllib/dqn/models.py | c3f4a08eb943e954dbe72a3c3e31f0a9cde49a0b | [
"MIT",
"Apache-2.0"
] | permissive | vtpp2014/ray | 00db3465812eb85890351dd345c43d2ed29745b8 | 4e4a4e4e062d37f3fb1c518ea5b0d0d7a32e5a60 | refs/heads/master | 2021-01-21T09:20:47.322556 | 2017-08-31T06:40:46 | 2017-08-31T06:40:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,153 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.layers as layers
from ray.rllib.models import ModelCatalog
def _build_q_network(inputs, num_actions, config):
dueling = config["dueling"]
hiddens = config["hiddens"]
frontend = ModelCatalog.get_model(inputs, 1, config["model_config"])
frontend_out = frontend.last_layer
with tf.variable_scope("action_value"):
action_out = frontend_out
for hidden in hiddens:
action_out = layers.fully_connected(
action_out, num_outputs=hidden, activation_fn=tf.nn.relu)
action_scores = layers.fully_connected(
action_out, num_outputs=num_actions, activation_fn=None)
if dueling:
with tf.variable_scope("state_value"):
state_out = frontend_out
for hidden in hiddens:
state_out = layers.fully_connected(
state_out, num_outputs=hidden, activation_fn=tf.nn.relu)
state_score = layers.fully_connected(
state_out, num_outputs=1, activation_fn=None)
action_scores_mean = tf.reduce_mean(action_scores, 1)
action_scores_centered = action_scores - tf.expand_dims(
action_scores_mean, 1)
return state_score + action_scores_centered
else:
return action_scores
def _build_action_network(
q_values, observations, num_actions, stochastic, eps):
deterministic_actions = tf.argmax(q_values, axis=1)
batch_size = tf.shape(observations)[0]
random_actions = tf.random_uniform(
tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.random_uniform(
tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(
chose_random, random_actions, deterministic_actions)
return tf.cond(
stochastic, lambda: stochastic_actions,
lambda: deterministic_actions)
def _huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta))
def _minimize_and_clip(optimizer, objective, var_list, clip_val=10):
"""Minimized `objective` using `optimizer` w.r.t. variables in
`var_list` while ensure the norm of the gradients for each
variable is clipped to `clip_val`
"""
gradients = optimizer.compute_gradients(objective, var_list=var_list)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, clip_val), var)
return optimizer.apply_gradients(gradients)
def _scope_vars(scope, trainable_only=False):
"""
Get variables inside a scope
The scope can be specified as a string
Parameters
----------
scope: str or VariableScope
scope in which the variables reside.
trainable_only: bool
whether or not to return only the variables that were marked as
trainable.
Returns
-------
vars: [tf.Variable]
list of variables in `scope`.
"""
return tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES
if trainable_only else tf.GraphKeys.VARIABLES,
scope=scope if isinstance(scope, str) else scope.name)
class DQNGraph(object):
def __init__(self, env, config):
self.env = env
num_actions = env.action_space.n
optimizer = tf.train.AdamOptimizer(learning_rate=config["lr"])
# Action inputs
self.stochastic = tf.placeholder(tf.bool, (), name="stochastic")
self.eps = tf.placeholder(tf.float32, (), name="eps")
self.cur_observations = tf.placeholder(
tf.float32, shape=(None,) + env.observation_space.shape)
# Action Q network
with tf.variable_scope("q_func") as scope:
q_values = _build_q_network(
self.cur_observations, num_actions, config)
q_func_vars = _scope_vars(scope.name)
# Action outputs
self.output_actions = _build_action_network(
q_values,
self.cur_observations,
num_actions,
self.stochastic,
self.eps)
# Replay inputs
self.obs_t = tf.placeholder(
tf.float32, shape=(None,) + env.observation_space.shape)
self.act_t = tf.placeholder(tf.int32, [None], name="action")
self.rew_t = tf.placeholder(tf.float32, [None], name="reward")
self.obs_tp1 = tf.placeholder(
tf.float32, shape=(None,) + env.observation_space.shape)
self.done_mask = tf.placeholder(tf.float32, [None], name="done")
self.importance_weights = tf.placeholder(
tf.float32, [None], name="weight")
# q network evaluation
with tf.variable_scope("q_func", reuse=True):
self.q_t = _build_q_network(self.obs_t, num_actions, config)
# target q network evalution
with tf.variable_scope("target_q_func") as scope:
self.q_tp1 = _build_q_network(self.obs_tp1, num_actions, config)
target_q_func_vars = _scope_vars(scope.name)
# q scores for actions which we know were selected in the given state.
q_t_selected = tf.reduce_sum(
self.q_t * tf.one_hot(self.act_t, num_actions), 1)
# compute estimate of best possible value starting from state at t + 1
if config["double_q"]:
with tf.variable_scope("q_func", reuse=True):
q_tp1_using_online_net = _build_q_network(
self.obs_tp1, num_actions, config)
q_tp1_best_using_online_net = tf.arg_max(q_tp1_using_online_net, 1)
q_tp1_best = tf.reduce_sum(
self.q_tp1 * tf.one_hot(
q_tp1_best_using_online_net, num_actions), 1)
else:
q_tp1_best = tf.reduce_max(self.q_tp1, 1)
q_tp1_best_masked = (1.0 - self.done_mask) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = self.rew_t + config["gamma"] * q_tp1_best_masked
# compute the error (potentially clipped)
self.td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)
errors = _huber_loss(self.td_error)
weighted_error = tf.reduce_mean(self.importance_weights * errors)
# compute optimization op (potentially with gradient clipping)
if config["grad_norm_clipping"] is not None:
self.optimize_expr = _minimize_and_clip(
optimizer, weighted_error, var_list=q_func_vars,
clip_val=config["grad_norm_clipping"])
else:
self.optimize_expr = optimizer.minimize(
weighted_error, var_list=q_func_vars)
# update_target_fn will be called periodically to copy Q network to
# target Q network
update_target_expr = []
for var, var_target in zip(
sorted(q_func_vars, key=lambda v: v.name),
sorted(target_q_func_vars, key=lambda v: v.name)):
update_target_expr.append(var_target.assign(var))
self.update_target_expr = tf.group(*update_target_expr)
def update_target(self, sess):
return sess.run(self.update_target_expr)
def act(self, sess, obs, eps, stochastic=True):
return sess.run(
self.output_actions,
feed_dict={
self.cur_observations: obs,
self.stochastic: stochastic,
self.eps: eps,
})
def train(
self, sess, obs_t, act_t, rew_t, obs_tp1, done_mask,
importance_weights):
td_err, _ = sess.run(
[self.td_error, self.optimize_expr],
feed_dict={
self.obs_t: obs_t,
self.act_t: act_t,
self.rew_t: rew_t,
self.obs_tp1: obs_tp1,
self.done_mask: done_mask,
self.importance_weights: importance_weights
})
return td_err
| [
"[email protected]"
] | |
8bc0bea26ee08645ad5f3477454cbb4412ca2cb6 | 747f759311d404af31c0f80029e88098193f6269 | /addons/productivity_analysis/__init__.py | d208633981a8c8e0baff59ac77e9e3b9b6a2b1e5 | [] | no_license | sgeerish/sirr_production | 9b0d0f7804a928c0c582ddb4ccb7fcc084469a18 | 1081f3a5ff8864a31b2dcd89406fac076a908e78 | refs/heads/master | 2020-05-19T07:21:37.047958 | 2013-09-15T13:03:36 | 2013-09-15T13:03:36 | 9,648,444 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | /home/openerp/production/extra-addons/productivity_analysis/__init__.py | [
"[email protected]"
] | |
3d36d8542ca387319e47bd61e32eb8a71c9f7a09 | 44d7c13c0b09b1433a16f2cf54df761a326630b9 | /predict_w2v.py | 7f0a3fe46be544186293cd8da95b65fd82c0cd50 | [] | no_license | gyy8426/MSRNN | 5f9abed5a6432bc9f575797fb54a5d4ff78bd874 | 2b91d8b647a4cca28bba28b26d9b88c69e6eac77 | refs/heads/master | 2022-06-18T23:17:39.494287 | 2022-06-07T15:05:30 | 2022-06-07T15:05:30 | 97,574,787 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 19,954 | py | from utils import *
import copy
import numpy as np
class Predict(object):
def build_sampler(self, layers, tparams,Wemb, options, use_noise, trng):
debug_print = []
#debug_print.append( theano.printing.Print('input_p.shapa')(input_p.shape))
# context: #annotations x dim
ctx0 = T.matrix('ctx_sampler', dtype='float32')
ctx_mask = T.vector('ctx_mask', dtype='float32')
ctx_ = ctx0
counts = ctx_mask.sum(-1)
ctx_mean = ctx_.sum(0)/counts
# initial state/cell
tu_init_state = [T.alloc(0., options['rnn_word_dim'])]
tu_init_memory = [T.alloc(0., options['rnn_word_dim'])]
mu_init_state = [T.alloc(0., options['rnn_cond_wv_dim'])]
mu_init_memory = [T.alloc(0., options['rnn_cond_wv_dim'])]
if options['smoothing'] :
a_init_state = [T.alloc(0., options['latent_size_a'])]
#a_init_memory = [T.alloc(0., options['latent_size_a'])]
else :
a_init_state = None
z_init_state = [T.alloc(0., options['latent_size_z'])]
mu_p_init = [T.alloc(0., options['latent_size_z'])]
print 'Building f_init...',
'''
f_init = theano.function([ctx0, ctx_mask], [ctx0]+tu_init_state+tu_init_memory+
mu_init_state+mu_init_memory+
a_init_state+a_init_memory+
z_init_state+
mu_p_init, name='f_init',
on_unused_input='ignore',
profile=False)
'''
f_init = theano.function([ctx0, ctx_mask], [ctx0]+tu_init_state+tu_init_memory+
mu_init_state+mu_init_memory+
a_init_state+
z_init_state+
mu_p_init, name='f_init',
on_unused_input='ignore',
profile=False)
print 'Done'
x = T.vector('x_sampler', dtype='int64')
tu_init_state = [T.matrix('tu_init_state', dtype='float32')]
tu_init_memory = [T.matrix('tu_init_memory', dtype='float32')]
mu_init_state = [T.matrix('mu_init_state', dtype='float32')]
mu_init_memory = [T.matrix('mu_init_memory', dtype='float32')]
if options['smoothing'] :
a_init_state = [T.matrix('a_init_state', dtype='float32')]
#a_init_memory = [T.matrix('a_init_memory', dtype='float32')]
# if it's the first word, emb should be all zero
emb = T.switch(x[:, None] < 0,
T.alloc(0., 1, Wemb.shape[1]), Wemb[x])
# emb ff
emb_ff1 = layers.get_layer('ff')[1](tparams, emb,activ=options['nonlin_decoder'],
prefix="emb_ff1")
emb_ff2 = layers.get_layer('ff')[1](tparams, emb_ff1,activ=options['nonlin_decoder'],
prefix='emb_ff2')
emb_drop = layers.dropout_layer(emb_ff2, use_noise, trng)
tu_gru = layers.get_layer('lstm')[1](options,tparams, emb, one_step=True,
init_state=tu_init_state[0],
init_memory=tu_init_memory[0],
prefix='tu_rnn')
#debug_print.append( theano.printing.Print('mu_init_state.shapa')(mu_init_state.shape))
mu_gru = layers.get_layer('lstm_cond')[1](options, tparams, tu_gru[0],
mask=None, context=ctx_mean,
one_step=True,
init_state=mu_init_state[0],
init_memory=mu_init_memory[0],
prefix='mu_rnn')
tu_next_state = [tu_gru[0]]
tu_next_memory = [tu_gru[1]]
mu_next_state = [mu_gru[0]]
mu_next_memory = [mu_gru[1]]
proj_h = mu_gru[0]
d_layer = proj_h
if options['use_dropout']:
d_drop_layer = layers.dropout_layer(d_layer, use_noise, trng)
input_a_layer = T.concatenate([d_drop_layer, emb_drop], axis=1)
if options['smoothing']:
a_layer = layers.get_layer('gru')[1](options, tparams, input_a_layer,one_step=True,
init_state=a_init_state[0],
prefix='a_rnn')
'''
a_layer = layers.get_layer('lstm')[1](options, tparams, input_a_layer,one_step=True,
init_state=a_init_state[0],init_memory=a_init_memory[0]
prefix='a_rnn')
'''
#a_layer = a_layer[:, ::-1]
a_next_state = [a_layer[0]]
#a_next_memory = [a_layer[1]]
input_a = a_layer[0]
else:
temp_a = layers.get_layer('ff')[1](options, tparams, input_a_layer,
prefix='a_layer_0')
for i in range(options['flat_mlp_num'] - 1):
temp_a = layers.get_layer('ff')[1](options, tparams, temp_a,
prefix='a_layer_' + str(i + 1))
a_layer = temp_a
input_a = a_layer
#debug_print.append( theano.printing.Print('a_layer.shapa')(a_layer.shape))
#################
###stochastic parts####
#################
# Define shared variables for quantities to be updated across batches (truncated BPTT)
z_init = [T.matrix('z', dtype='float32')]
mu_p_init = [T.matrix('mu_p_init', dtype='float32')]
stochastic_layer = layers.stochastic_layer_onestep(options,tparams,
input_p=d_drop_layer,input_q=input_a,
z_init=z_init[0],mu_p_init=mu_p_init[0],
num_units=options['latent_size_z'],
unroll_scan=options['unroll_scan'],
use_mu_residual_q=options['use_mu_residual_q']
)
z_layer = [stochastic_layer[0]]
mean_prior_layer = [stochastic_layer[1]]
log_var_prior_layer = stochastic_layer[2]
mean_q_layer = stochastic_layer[3]
log_var_q_layer = stochastic_layer[4]
z_dropout_layer = layers.dropout_layer(z_layer[0], use_noise, trng)
'''
z_layer_shp = z_dropout_layer.shape
z_layer_reshaped = z_dropout_layer.reshape([z_layer_shp[0]*z_layer_shp[1],
z_layer_shp[2]])
d_layer_shp = d_drop_layer.shape
d_layer_reshaped = d_drop_layer.reshape([d_layer_shp[0]*d_layer_shp[1],
d_layer_shp[2]])
'''
input_gen_ff = T.concatenate([d_drop_layer, z_dropout_layer], axis=1)
gen_word_emb_ff = layers.get_layer('ff')[1](tparams, input_gen_ff, activ=options['nonlin_decoder'],
prefix='gen_word_emb_ff')
logit = layers.get_layer('ff')[1](tparams, gen_word_emb_ff,
prefix='ff_logit_zd', activ='linear')
# logit_shp = logit.shape
next_probs = T.nnet.softmax(logit)
next_sample = trng.multinomial(pvals=next_probs).argmax(1)
# next word probability
print 'building f_next...'
'''
f_next = theano.function([x, ctx0, ctx_mask]+
tu_init_state+tu_init_memory+
mu_init_state+mu_init_memory+
a_init_state+a_init_memory
z_init+
mu_p_init,
[next_probs, next_sample]+
tu_next_state+tu_next_memory+
mu_next_state+mu_next_memory+
a_next_state+a_next_memory+
z_layer+
mean_prior_layer,
name='f_next', profile=False,
on_unused_input='ignore')
'''
f_next = theano.function([x, ctx0, ctx_mask]+
tu_init_state+tu_init_memory+
mu_init_state+mu_init_memory+
a_init_state+
z_init+
mu_p_init,
[next_probs, next_sample]+
tu_next_state+tu_next_memory+
mu_next_state+mu_next_memory+
a_next_state+
z_layer+
mean_prior_layer,
name='f_next', profile=False,
on_unused_input='ignore')
print 'Done'
return f_init, f_next
def gen_sample(self, tparams, f_init, f_next, ctx0, ctx_mask,
trng=None, k=1, maxlen=30, stochastic=False):
'''
ctx0: (26,1024)
ctx_mask: (26,)
'''
if k > 1:
assert not stochastic, 'Beam search does not support stochastic sampling'
sample = []
sample_score = []
if stochastic:
sample_score = 0
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
hyp_scores = np.zeros(live_k).astype('float32')
# [(26,1024),(512,),(512,)]
rval = f_init(ctx0, ctx_mask)
ctx0 = rval[0]
# next gru and stacked gru state and memory
next_states = []
next_memorys = []
n_layers_rnn = 2
n_rnn_return = 2
for lidx in xrange(n_layers_rnn):
next_states.append([])
next_memorys.append([])
next_states[lidx].append(rval[n_rnn_return*lidx+1])
next_states[lidx][-1] = next_states[lidx][-1].reshape([live_k, next_states[lidx][-1].shape[0]])
next_memorys[lidx].append(rval[n_rnn_return*lidx+2])
next_memorys[lidx][-1] = next_memorys[lidx][-1].reshape([live_k, next_memorys[lidx][-1].shape[0]])
#print "init gru state shape is ",len(next_states),',',len(next_states[0])
next_a_state = []
next_a_state.append([])
next_a_state[0].append(rval[-3])
'''
next_a_state = []
next_a_state.append([])
next_a_state[0].append(rval[-4])
next_a_memory = []
next_a_memory.append([])
next_a_memory[0].append(rval[-3])
'''
next_z = []
next_z.append([])
next_z[0].append(rval[-2])
next_mu_p = []
next_mu_p.append([])
next_mu_p[0].append(rval[-1])
#print "init next_mu_p shape is ",len(next_mu_p),',',len(next_mu_p[0]),','
next_w = -1 * np.ones((1,)).astype('int64')
# next_state: [(1,512)]
# next_memory: [(1,512)]
for ii in xrange(maxlen):
# return [(1, 50000), (1,), (1, 512), (1, 512)]
# next_w: vector
# ctx: matrix
# ctx_mask: vector
# next_state: [matrix]
# next_memory: [matrix]
#print "next_states ", len(next_states),',',len(next_states[1]),',',len(next_states[1][0]),',',len(next_states[1][0][0])
rval = f_next(*([next_w, ctx0, ctx_mask] +
next_states[0] + next_memorys[0] +
next_states[1] + next_memorys[1] +
next_a_state +
next_z +
next_mu_p))
next_p = rval[0]
next_w = rval[1] # already argmax sorted
next_states = []
next_memorys = []
for lidx in xrange(n_layers_rnn):
next_states.append([])
next_memorys.append([])
next_states[lidx].append(rval[n_rnn_return*lidx+2])
next_memorys[lidx].append(rval[n_rnn_return*lidx+3])
#print "gru state is ", len(next_states),',',len(next_states[0]),',',len(next_states[0][0])
next_a_state = [rval[-3]]
'''
next_a_state = [rval[-4]]
next_a_memory = [rval[-3]]
'''
next_z = [rval[-2]]
next_mu_p = [rval[-1]]
#print "init next_a shape is ",len(next_a),',',len(next_a[0]),','
#print "init next_mu_p shape is ",len(next_mu_p),',',len(next_mu_p[0]),','
if stochastic:
sample.append(next_w[0]) # take the most likely one
sample_score += next_p[0,next_w[0]]
if next_w[0] == 0:
break
else:
# the first run is (1,50000)
cand_scores = hyp_scores[:,None] - np.log(next_p)
cand_flat = cand_scores.flatten()
ranks_flat = cand_flat.argsort()[:(k-dead_k)]
voc_size = next_p.shape[1]
trans_indices = ranks_flat / voc_size # index of row
word_indices = ranks_flat % voc_size # index of col
costs = cand_flat[ranks_flat]
new_hyp_samples = []
new_hyp_scores = np.zeros(k-dead_k).astype('float32')
new_hyp_states = []
new_hyp_memories = []
new_hyp_a_state = []
new_hyp_a_state.append([])
#new_hyp_a_memory = []
#new_hyp_a_memory.append([])
new_hyp_z = []
new_hyp_z.append([])
new_hyp_mu_p = []
new_hyp_mu_p.append([])
for lidx in xrange(n_layers_rnn):
new_hyp_states.append([])
new_hyp_memories.append([])
for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
new_hyp_samples.append(hyp_samples[ti]+[wi])
new_hyp_scores[idx] = copy.copy(costs[idx])
for lidx in np.arange(n_layers_rnn):
new_hyp_states[lidx].append(copy.copy(next_states[lidx][0][ti]))
new_hyp_memories[lidx].append(copy.copy(next_memorys[lidx][0][ti]))
new_hyp_a_state[0].append( copy.copy(next_a_state[0][ti]))
#new_hyp_a_memory[0].append( copy.copy(next_a_memory[0][ti]))
new_hyp_z[0].append(copy.copy(next_z[0][ti]))
new_hyp_mu_p[0].append(copy.copy(next_mu_p[0][ti]))
#print "init new_hyp_states shape is ",len(new_hyp_states),',',len(new_hyp_states[0]),','
#print "init new_hyp_mu_p shape is ",len(new_hyp_mu_p),',',len(new_hyp_mu_p[0]),','
# check the finished samples
new_live_k = 0
hyp_samples = []
hyp_scores = []
hyp_states = []
hyp_a_state = []
hyp_a_state.append([])
hyp_a_memory = []
hyp_a_memory.append([])
hyp_z = []
hyp_z.append([])
hyp_mu_p = []
hyp_mu_p.append([])
hyp_memories = []
for lidx in xrange(n_layers_rnn):
hyp_states.append([])
hyp_memories.append([])
for idx in xrange(len(new_hyp_samples)):
if new_hyp_samples[idx][-1] == 0:
sample.append(new_hyp_samples[idx])
sample_score.append(new_hyp_scores[idx])
dead_k += 1
else:
new_live_k += 1
hyp_samples.append(new_hyp_samples[idx])
hyp_scores.append(new_hyp_scores[idx])
for lidx in xrange(n_layers_rnn):
hyp_states[lidx].append(new_hyp_states[lidx][idx])
hyp_memories[lidx].append(new_hyp_memories[lidx][idx])
hyp_a_state[0].append(new_hyp_a_state[0][idx])
#hyp_a_memory[0].append(new_hyp_a_memory[0][idx])
hyp_z[0].append(new_hyp_z[0][idx])
hyp_mu_p[0].append(new_hyp_mu_p[0][idx])
#print "init hyp_states shape is ",len(hyp_states),',',len(hyp_states[0]),','
#print "init hyp_mu_p shape is ",len(hyp_mu_p),',',len(hyp_mu_p[0]),','
hyp_scores = np.array(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_w = np.array([w[-1] for w in hyp_samples])
next_states = []
next_memorys = []
for lidx in xrange(n_layers_rnn):
next_states.append([])
next_memorys.append([])
next_states[lidx].append(np.array(hyp_states[lidx]))
next_memorys[lidx].append(np.array(hyp_memories[lidx]))
next_a_state=hyp_a_state
#next_a_memory=hyp_a_memory
next_z = hyp_z
next_mu_p = hyp_mu_p
#print "init next_states shape is ",len(next_states),',',len(next_states[0]),',',len(next_states[0][0])
#print "init next_mu_p shape is ",len(next_mu_p),',',len(next_mu_p[0]),','
if not stochastic:
# dump every remaining one
if live_k > 0:
for idx in xrange(live_k):
sample.append(hyp_samples[idx])
sample_score.append(hyp_scores[idx])
return sample, sample_score, next_states, next_a_state,next_z,next_mu_p
def sample_execute(self, engine, options, tparams, f_init, f_next, x, ctx, mask_ctx, trng):
stochastic = False
for jj in xrange(np.minimum(10, x.shape[1])):
sample, score, _, _,_,_ = self.gen_sample(tparams, f_init, f_next, ctx[jj], mask_ctx[jj],
trng=trng, k=5, maxlen=30, stochastic=stochastic)
if not stochastic:
best_one = np.argmin(score)
sample = sample[best_one]
else:
sample = sample
print 'Truth ', jj, ': ',
for vv in x[:, jj]:
if vv == 0:
break
if vv in engine.ix_word:
print engine.ix_word[vv],
else:
print 'UNK',
print
for kk, ss in enumerate([sample]):
print 'Sample (', jj, ') ', ': ',
for vv in ss:
if vv == 0:
break
if vv in engine.ix_word:
print engine.ix_word[vv],
else:
print 'UNK',
print
| [
"[email protected]"
] | |
ffd098afc7c7363e15f0181d9a02f89ec7709a69 | a77802040fac0c1207902946f3a1e8a2d9a4fbd0 | /examples/ccxt.pro/py/binance-reload-markets.py | 50ad40b078a268a69c9fd85cc548d15e0af10bb8 | [
"MIT"
] | permissive | ndubel/ccxt | 6eef84cddc9ac42db2fd03b9eaa04befd59775e6 | 982a12b1ab0f02a11911bee8a0aba8ad4f35ded1 | refs/heads/master | 2023-07-29T15:45:37.941234 | 2022-04-05T10:26:05 | 2022-04-05T10:26:05 | 331,591,556 | 1 | 0 | MIT | 2021-01-21T10:24:16 | 2021-01-21T10:24:15 | null | UTF-8 | Python | false | false | 1,360 | py | import ccxtpro
from asyncio import get_event_loop, gather
print('CCXT Pro version', ccxtpro.__version__)
async def watch_order_book(exchange, symbol):
while True:
try:
orderbook = await exchange.watch_order_book(symbol)
datetime = exchange.iso8601(exchange.milliseconds())
print(datetime, orderbook['nonce'], symbol, orderbook['asks'][0], orderbook['bids'][0])
except Exception as e:
print(type(e).__name__, str(e))
break
async def reload_markets(exchange, delay):
while True:
try:
await exchange.sleep(delay)
markets = await exchange.load_markets(True)
datetime = exchange.iso8601(exchange.milliseconds())
print(datetime, 'Markets reloaded')
except Exception as e:
print(type(e).__name__, str(e))
break
async def main(loop):
exchange = ccxtpro.binance({
'enableRateLimit': True,
'asyncio_loop': loop,
})
await exchange.load_markets()
# exchange.verbose = True
symbol = 'BTC/USDT'
delay = 60000 # every minute = 60 seconds = 60000 milliseconds
loops = [watch_order_book(exchange, symbol), reload_markets(exchange, delay)]
await gather(*loops)
await exchange.close()
loop = get_event_loop()
loop.run_until_complete(main(loop)) | [
"[email protected]"
] | |
ea01353fee3883e777cd572d6adebbe811a1ae98 | 8a61f0803e9d18772d867857a5a2a15e9549e6f5 | /pnu/urls.py | 6434f255de0516ae85606ecbb65c5bbecf72cc79 | [] | no_license | hjlee73/pnu-django-201904 | 94504935bf5bdb511d7b99c138e3ea3d9ac5f758 | 6e39a9b63525a8312968cabcc190f8661e1aa9e3 | refs/heads/master | 2020-05-20T18:19:55.389915 | 2019-04-12T08:22:12 | 2019-04-12T08:22:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | from django.contrib import admin
from django.urls import include, path
# FIXME: 이 코드는 RedirectView에 의해서 제거될 것입니다.
from django.shortcuts import redirect
def root(request):
return redirect("/shop/")
urlpatterns = [
path('admin/', admin.site.urls),
path('shop/', include('shop.urls')),
path('', root),
]
| [
"[email protected]"
] | |
552213ea699ee5c5dbeb98cc9cf8b5eaf76bed64 | cf6646983a0088248acfc5dafefd847a350bac94 | /posts/views/admin.py | 0e677adb3fb805b838df242ecf343a7ae63fbd7b | [
"MIT"
] | permissive | Toxblh/vas3k.club | a370239e95b496f234cd6dd018881f1dcfa78d69 | 97fa3c815b25a6af789ba04892627f6c6c822113 | refs/heads/master | 2022-11-09T01:43:56.581340 | 2020-07-05T18:51:21 | 2020-07-05T18:51:21 | 277,580,310 | 1 | 0 | MIT | 2020-07-06T15:30:57 | 2020-07-06T15:30:56 | null | UTF-8 | Python | false | false | 1,896 | py | from django.shortcuts import get_object_or_404, render
from auth.helpers import auth_required, moderator_role_required
from bot.common import render_html_message
from notifications.telegram.posts import announce_in_club_channel
from posts.admin import do_post_admin_actions
from posts.forms.admin import PostAdminForm, PostAnnounceForm
from posts.helpers import extract_any_image
from posts.models import Post
@auth_required
@moderator_role_required
def admin_post(request, post_slug):
post = get_object_or_404(Post, slug=post_slug)
if request.method == "POST":
form = PostAdminForm(request.POST)
if form.is_valid():
return do_post_admin_actions(request, post, form.cleaned_data)
else:
form = PostAdminForm()
return render(request, "admin/simple_form.html", {
"title": "Админить пост",
"post": post,
"form": form
})
@auth_required
@moderator_role_required
def announce_post(request, post_slug):
post = get_object_or_404(Post, slug=post_slug)
initial = {
"text": render_html_message("channel_post_announce.html", post=post),
"image": extract_any_image(post),
}
if request.method == "POST":
form = PostAnnounceForm(request.POST, initial=initial)
if form.is_valid():
announce_in_club_channel(
post=post,
announce_text=form.cleaned_data["text"],
image=form.cleaned_data["image"] if form.cleaned_data["with_image"] else None
)
return render(request, "message.html", {
"title": "Запощено ✅"
})
else:
form = PostAnnounceForm(initial=initial)
return render(request, "admin/simple_form.html", {
"title": "Анонсировать пост на канале",
"post": post,
"form": form
})
| [
"[email protected]"
] | |
048521fbf9c0d46993e2505ccc84c3e21302ed2a | ec65636f2f0183c43b1ec2eac343b9aa1fc7c459 | /train/abnormal_detection_new/10.133.200.69/session_active.py | 17249ec5279b8fc88290cc73764f849af4dd5cb5 | [] | no_license | tyroarchitect/AIOPs | db5441e5180fcace77b2d1022adb53bbd0b11f23 | 46fe93329a1847efa70e5b73bcbfd54469645cdd | refs/heads/master | 2020-04-16T13:45:02.963404 | 2018-11-15T06:50:57 | 2018-11-15T06:51:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,203 | py | import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# settings of lstm model
timesteps = 20
batch_size = 64
epochs = 5
lstm_size = 30
lstm_layers = 2
filename = "../../../datasets/1-10.133.200.69_20181027_20181109.csv"
model = "../../../model/abnormal_detection_model_new/10.133.200.69/session_active_model/SESSION_ACTIVE_MODEL"
column = "SESSION_ACTIVE"
start = 224559
end = 241838
class NewData(object):
def __init__(self, filename, column, timesteps, start, end):
self.timesteps = timesteps
self.filename = filename
self.column = column
self.start = start
self.end = end
self.train_x, self.train_y, self.test_x, self.test_y = self.preprocess()
def MaxMinNormalization(self, x, max_value, min_value):
"""
:param x: data
:param max_value: max value in the data
:param min_value: min value in the data
:return: normalization data
"""
x = (x - min_value) / (max_value - min_value)
return x
def generateGroupDataList(self, seq):
"""
:param seq: continuous sequence of value in data
:return: input data array and label data array in the format of numpy
"""
x = []
y = []
for i in range(len(seq) - self.timesteps):
x.append(seq[i: i + self.timesteps])
y.append(seq[i + self.timesteps])
return np.array(x, dtype=np.float32), np.array(y, dtype=np.float32)
def preprocess(self):
"""
:return: training data and testing data of given filename and column
"""
data = pd.read_csv(self.filename)
data = data["VALUE"].values.tolist()
data = data[self.start - 1:self.end]
data = self.MaxMinNormalization(data,
np.max(data, axis=0),
np.min(data, axis=0))
train_x, train_y = self.generateGroupDataList(data)
test_x, test_y = self.generateGroupDataList(data)
return train_x, train_y, test_x, test_y
def getBatches(self, x, y, batch_size):
for i in range(0, len(x), batch_size):
begin_i = i
end_i = i + batch_size if (i + batch_size) < len(x) else len(x)
yield x[begin_i:end_i], y[begin_i:end_i]
def initPlaceholder(timesteps):
x = tf.placeholder(tf.float32, [None, timesteps, 1], name='input_x')
y_ = tf.placeholder(tf.float32, [None, 1], name='input_y')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
return x, y_, keep_prob
def lstm_model(x, lstm_size, lstm_layers, keep_prob):
# define basis structure LSTM cell
def lstm_cell():
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
return drop
# multi layer LSTM cell
cell = tf.contrib.rnn.MultiRNNCell([lstm_cell() for _ in range(lstm_layers)])
# dynamic rnn
outputs, final_state = tf.nn.dynamic_rnn(cell, x, dtype=tf.float32)
# reverse
outputs = outputs[:, -1]
# fully connected
predictions = tf.contrib.layers.fully_connected(outputs, 1, activation_fn=tf.sigmoid)
return predictions
def train_model():
# prepare data
data = NewData(filename=filename, column=column, timesteps=timesteps, start=start, end=end)
# init placeholder
x, y, keep_prob = initPlaceholder(timesteps)
predictions = lstm_model(x,
lstm_size=lstm_size,
lstm_layers=lstm_layers,
keep_prob=keep_prob)
# mse loss function
cost = tf.losses.mean_squared_error(y, predictions)
# optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
tf.add_to_collection("predictions", predictions)
saver = tf.train.Saver()
# define session
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
tf.global_variables_initializer().run()
# batches counter
iteration = 1
# loop for training
for epoch in range(epochs):
for xs, ys in data.getBatches(data.train_x, data.train_y, batch_size):
feed_dict = {x: xs[:, :, None], y: ys[:, None], keep_prob: .5}
loss, train_step = sess.run([cost, optimizer], feed_dict=feed_dict)
if iteration % 100 == 0:
print('Epochs:{}/{}'.format(epoch, epochs),
'Iteration:{}'.format(iteration),
'Train loss: {}'.format(loss))
iteration += 1
# save model as checkpoint format to optional folder
saver.save(sess, model)
# test model
feed_dict = {x: data.test_x[:, :, None], keep_prob: 1.0}
results = sess.run(predictions, feed_dict=feed_dict)
plt.plot(results, 'r', label='predicted')
plt.plot(data.test_y, 'g--', label='real')
plt.legend()
plt.show()
if __name__ == "__main__":
train_model()
| [
"[email protected]"
] | |
899ddad0ccaa03f1cccb2693cbfe0916db0a2112 | cd781c114deb0ee56fcd8e35df038397ebf8dc09 | /Classes and Objects basics/Class.py | 90dd512abd00251408ad11e23ac9c5e7d11cfc57 | [] | no_license | GBoshnakov/SoftUni-Fund | 4549446c3bb355ff74c14d6071d968bde1886de5 | de9318caaf072a82a9be8c3dd4e74212b8edd79e | refs/heads/master | 2023-06-06T04:56:14.951452 | 2021-06-30T21:50:44 | 2021-06-30T21:50:44 | 381,817,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | class Class:
__students_count = 22
def __init__(self, name):
self.name = name
self.students = []
self.grades = []
def add_student(self, name, grade):
if Class.__students_count > len(self.students):
self.students.append(name)
self.grades.append(grade)
def get_average_grade(self):
return sum(self.grades) / len(self.grades)
def __repr__(self):
return f"The students in {self.name}: {', '.join(self.students)}. Average grade: {self.get_average_grade():.2f}"
a_class = Class("11B")
a_class.add_student("Peter", 4.80)
a_class.add_student("George", 6.00)
a_class.add_student("Amy", 3.50)
print(a_class)
| [
"[email protected]"
] | |
f2427f13f588cf7376778e89b011741ee33122e9 | 24eeb28433680606f9d1e099b19ec595552cf06b | /repo/plugin.video.shadow/resources/modules/mediaurl.py | e9b928533b79bd2f69b6f54703a5a709481a89e7 | [] | no_license | irmu/arda | d8ecdedc17bb01650b538dc9e00f438b6d0eed5a | 9b7cab3656c2497c812ab101a56ed661dd8cf4a7 | refs/heads/main | 2023-09-01T08:48:02.823681 | 2023-08-29T18:11:28 | 2023-08-29T18:11:28 | 151,835,016 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | '''
Copyright (C) 2014-2016 ddurdle
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
#
#
import logging
#
class mediaurl:
# CloudService v0.2.4
##
##
def __init__(self, url, qualityDesc, quality, order, title=''):
self.url = url
self.qualityDesc = qualityDesc
self.quality = quality
self.order = order
self.title = title
self.offline = False
def __repr__(self):
return '{}: {} {}'.format(self.__class__.__name__,
self.order)
def __cmp__(self, other):
if hasattr(other, 'order'):
return self.order.__cmp__(other.order)
def getKey(self):
return self.order
| [
"[email protected]"
] | |
7864ee4973a26fb92dac10a132763892fb308197 | b54eb04ec2de1dec11a7143c6b5049a1d031ddaf | /test/baselines/bench/monitor.py | de942e56796664c57ce2c88a82de1e85341665de | [] | no_license | Jerryxiaoyu/CR_CPG_RL | 78c4c6e7539f08465b1f55125e04f982b1323bf2 | 69213cc48440ea66c42fbe3ace35163174686321 | refs/heads/master | 2020-03-28T12:11:51.491796 | 2018-09-14T04:32:33 | 2018-09-14T04:32:33 | 148,277,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,832 | py | __all__ = ['Monitor', 'get_monitor_files', 'load_results']
import gym
from gym.core import Wrapper
import time
from glob import glob
import csv
import os.path as osp
import json
import numpy as np
class Monitor(Wrapper):
EXT = "monitor.csv"
f = None
def __init__(self, env, filename, allow_early_resets=False, reset_keywords=(), info_keywords=()):
Wrapper.__init__(self, env=env)
self.tstart = time.time()
if filename is None:
self.f = None
self.logger = None
else:
if not filename.endswith(Monitor.EXT):
if osp.isdir(filename):
filename = osp.join(filename, Monitor.EXT)
else:
filename = filename + "." + Monitor.EXT
self.f = open(filename, "wt")
self.f.write('#%s\n'%json.dumps({"t_start": self.tstart, 'env_id' : env.spec and env.spec.id}))
self.logger = csv.DictWriter(self.f, fieldnames=('r', 'l', 't')+reset_keywords+info_keywords)
self.logger.writeheader()
self.f.flush()
self.reset_keywords = reset_keywords
self.info_keywords = info_keywords
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_rewards = []
self.episode_lengths = []
self.episode_times = []
self.total_steps = 0
self.current_reset_info = {} # extra info about the current episode, that was passed in during reset()
def reset(self, **kwargs):
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError("Tried to reset an environment before done. If you want to allow early resets, wrap your env with Monitor(env, path, allow_early_resets=True)")
self.rewards = []
self.needs_reset = False
for k in self.reset_keywords:
v = kwargs.get(k)
if v is None:
raise ValueError('Expected you to pass kwarg %s into reset'%k)
self.current_reset_info[k] = v
return self.env.reset(**kwargs)
def step(self, action):
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
ob, rew, done, info = self.env.step(action)
self.rewards.append(rew)
if done:
self.needs_reset = True
eprew = sum(self.rewards)
eplen = len(self.rewards)
epinfo = {"r": round(eprew, 6), "l": eplen, "t": round(time.time() - self.tstart, 6)}
for k in self.info_keywords:
epinfo[k] = info[k]
self.episode_rewards.append(eprew)
self.episode_lengths.append(eplen)
self.episode_times.append(time.time() - self.tstart)
epinfo.update(self.current_reset_info)
if self.logger:
self.logger.writerow(epinfo)
self.f.flush()
info['episode'] = epinfo
self.total_steps += 1
return (ob, rew, done, info)
def close(self):
if self.f is not None:
self.f.close()
def get_total_steps(self):
return self.total_steps
def get_episode_rewards(self):
return self.episode_rewards
def get_episode_lengths(self):
return self.episode_lengths
def get_episode_times(self):
return self.episode_times
class LoadMonitorResultsError(Exception):
pass
def get_monitor_files(dir):
return glob(osp.join(dir, "*" + Monitor.EXT))
def load_results(dir):
import pandas
monitor_files = (
glob(osp.join(dir, "*monitor.json")) +
glob(osp.join(dir, "*monitor.csv"))) # get both csv and (old) json files
if not monitor_files:
raise LoadMonitorResultsError("no monitor files of the form *%s found in %s" % (Monitor.EXT, dir))
dfs = []
headers = []
for fname in monitor_files:
with open(fname, 'rt') as fh:
if fname.endswith('csv'):
firstline = fh.readline()
assert firstline[0] == '#'
header = json.loads(firstline[1:])
df = pandas.read_csv(fh, index_col=None)
headers.append(header)
elif fname.endswith('json'): # Deprecated json format
episodes = []
lines = fh.readlines()
header = json.loads(lines[0])
headers.append(header)
for line in lines[1:]:
episode = json.loads(line)
episodes.append(episode)
df = pandas.DataFrame(episodes)
else:
assert 0, 'unreachable'
df['t'] += header['t_start']
dfs.append(df)
df = pandas.concat(dfs)
df.sort_values('t', inplace=True)
df.reset_index(inplace=True)
df['t'] -= min(header['t_start'] for header in headers)
df.headers = headers # HACK to preserve backwards compatibility
return df
def test_monitor():
import pandas
import os
import uuid
env = gym.make("CartPole-v1")
env.seed(0)
mon_file = "/tmp/baselines-test-%s.monitor.csv" % uuid.uuid4()
menv = Monitor(env, mon_file)
menv.reset()
for _ in range(1000):
_, _, done, _ = menv.step(0)
if done:
menv.reset()
f = open(mon_file, 'rt')
firstline = f.readline()
assert firstline.startswith('#')
metadata = json.loads(firstline[1:])
assert metadata['env_id'] == "CartPole-v1"
assert set(metadata.keys()) == {'env_id', 'gym_version', 't_start'}, "Incorrect keys in monitor metadata"
last_logline = pandas.read_csv(f, index_col=None)
assert set(last_logline.keys()) == {'l', 't', 'r'}, "Incorrect keys in monitor logline"
f.close()
os.remove(mon_file) | [
"[email protected]"
] | |
2df22224aa226dc75cbe956bcad704c9efbeb719 | d12b59b33df5c467abf081d48e043dac70cc5a9c | /uhd_restpy/testplatform/sessions/ixnetwork/globals/protocolstack/dhcpv6serverglobals/dhcpv6serverglobals.py | 321f4303de80c42f3b8c01d087612a4cd30a8aff | [
"MIT"
] | permissive | ajbalogh/ixnetwork_restpy | 59ce20b88c1f99f95a980ff01106bda8f4ad5a0f | 60a107e84fd8c1a32e24500259738e11740069fd | refs/heads/master | 2023-04-02T22:01:51.088515 | 2021-04-09T18:39:28 | 2021-04-09T18:39:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,767 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class Dhcpv6ServerGlobals(Base):
"""Global settings placeholder for DHCPv6Server running over PPP/L2TP.
The Dhcpv6ServerGlobals class encapsulates a list of dhcpv6ServerGlobals resources that are managed by the user.
A list of resources can be retrieved from the server using the Dhcpv6ServerGlobals.find() method.
The list can be managed by using the Dhcpv6ServerGlobals.add() and Dhcpv6ServerGlobals.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'dhcpv6ServerGlobals'
_SDM_ATT_MAP = {
'DefaultLeaseTime': 'defaultLeaseTime',
'MaxLeaseTime': 'maxLeaseTime',
'ObjectId': 'objectId',
}
def __init__(self, parent):
super(Dhcpv6ServerGlobals, self).__init__(parent)
@property
def DefaultLeaseTime(self):
"""
Returns
-------
- number: The Life Time length in seconds that will be assigned to a lease if the requesting DHCP Client does not specify a specific expiration time.
"""
return self._get_attribute(self._SDM_ATT_MAP['DefaultLeaseTime'])
@DefaultLeaseTime.setter
def DefaultLeaseTime(self, value):
self._set_attribute(self._SDM_ATT_MAP['DefaultLeaseTime'], value)
@property
def MaxLeaseTime(self):
"""
Returns
-------
- number: The maximum Life Time length in seconds that will be assigned to a lease.
"""
return self._get_attribute(self._SDM_ATT_MAP['MaxLeaseTime'])
@MaxLeaseTime.setter
def MaxLeaseTime(self, value):
self._set_attribute(self._SDM_ATT_MAP['MaxLeaseTime'], value)
@property
def ObjectId(self):
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP['ObjectId'])
def update(self, DefaultLeaseTime=None, MaxLeaseTime=None):
"""Updates dhcpv6ServerGlobals resource on the server.
Args
----
- DefaultLeaseTime (number): The Life Time length in seconds that will be assigned to a lease if the requesting DHCP Client does not specify a specific expiration time.
- MaxLeaseTime (number): The maximum Life Time length in seconds that will be assigned to a lease.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, DefaultLeaseTime=None, MaxLeaseTime=None):
"""Adds a new dhcpv6ServerGlobals resource on the server and adds it to the container.
Args
----
- DefaultLeaseTime (number): The Life Time length in seconds that will be assigned to a lease if the requesting DHCP Client does not specify a specific expiration time.
- MaxLeaseTime (number): The maximum Life Time length in seconds that will be assigned to a lease.
Returns
-------
- self: This instance with all currently retrieved dhcpv6ServerGlobals resources using find and the newly added dhcpv6ServerGlobals resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained dhcpv6ServerGlobals resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, DefaultLeaseTime=None, MaxLeaseTime=None, ObjectId=None):
"""Finds and retrieves dhcpv6ServerGlobals resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve dhcpv6ServerGlobals resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all dhcpv6ServerGlobals resources from the server.
Args
----
- DefaultLeaseTime (number): The Life Time length in seconds that will be assigned to a lease if the requesting DHCP Client does not specify a specific expiration time.
- MaxLeaseTime (number): The maximum Life Time length in seconds that will be assigned to a lease.
- ObjectId (str): Unique identifier for this object
Returns
-------
- self: This instance with matching dhcpv6ServerGlobals resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of dhcpv6ServerGlobals data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the dhcpv6ServerGlobals resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"[email protected]"
] | |
10d43faffcf81b8dde6fa7ea3ea774c52a986c6b | 6923f79f1eaaba0ab28b25337ba6cb56be97d32d | /Numerical_Heat_Transfer_and_Fluid_Flow_Patankar/CFD_books_codes-master/Patnakar/2D_SIMPLE_algorithm.py | 1ede47be2bee4e11dc15afb666b92bb83cea0fe6 | [] | no_license | burakbayramli/books | 9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0 | 5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95 | refs/heads/master | 2023-08-17T05:31:08.885134 | 2023-08-14T10:05:37 | 2023-08-14T10:05:37 | 72,460,321 | 223 | 174 | null | 2022-10-24T12:15:06 | 2016-10-31T17:24:00 | Jupyter Notebook | UTF-8 | Python | false | false | 4,324 | py | import numpy as np
from matplotlib import pyplot as plt
# Peclet function scheme
def funcPeclet(P, n):
if n == 1:
# Central Difference
return 1 - 0.5*np.mod(P, 1)
if n == 2:
# Upwind
return 1
if n == 3:
# Hybrid
return max(0, 1 - (0.1 * pow(np.mod(P, 1), 1)))
if n == 4:
# Power law
return max(0, 1 - (0.1 * pow(np.mod(P, 1), 5)))
else:
# Return power law by default
return max(0, 1 - (0.1 * pow(np.mod(P, 1), 5)))
# Define the domain
x_len = 8
y_len = 8
x_points = 11
y_points = 11
del_x = x_len/float(x_points-1)
del_y = y_len/float(y_points-1)
x = np.arange(x_points+1)
y = np.arange(y_points+1)
f = 0.5
x_w = np.arange(x[1] - f, x[-2], 1)
x_e = np.arange(x[1] + f, x[-1], 1)
y_s = np.arange(y[1] - f, y[-2], 1)
y_n = np.arange(y[1] + f, y[-1], 1)
u = np.zeros((x_points-1, y_points-1))
v = np.zeros((x_points-1, y_points-1))
u_star = np.zeros((x_points-1, y_points-1))
v_star = np.zeros((x_points-1, y_points-1))
P = np.zeros((x_points, y_points))
P_star = np.zeros((x_points, y_points))
P_corr = np.zeros((x_points, y_points))
# Boundary conditions
u[0,:] = 10
v[:,0] = 11
P[0,:] = 20
P[-1,:] = 10
rho = 1
Sc = 50 # Linearization of source term
Sp = 0
Gamma = 1 # Assuming equal Gamma (diffusive coefficient) throughout the domain
n = 1 # Power scheme
alpha = 1 # Relaxation factor
n_itrs = 100
for itrs in range(n_itrs):
for i in range(1, x_points-2):
for j in range(1, y_points-2):
del_x_e = x[i + 1] - x[i]
del_x_w = x[i] - x[i - 1]
del_y_s = y[j] - y[j - 1]
del_y_n = y[j + 1] - y[j]
De, Dw = Gamma * del_y / float(del_x_e), Gamma * del_y / float(del_x_w)
Dn, Ds = Gamma * del_x / float(del_y_n), Gamma * del_x / float(del_y_s)
Dpe, Dpn = Gamma * del_y / float(del_x), Gamma * del_x / float(del_y)
Fe, Fw = rho * u[i+1,j] * del_y, rho * u[i-1,j] * del_y
Fn, Fs = rho * v[i,j+1] * del_x, rho * v[i,j-1] * del_x
Fpe, Fpn = rho * u[i,j] * del_y, rho * v[i,j] * del_x
Pe, Pw = Fe / float(De), Fw / float(Dw)
Pn, Ps = Fn / float(Dn), Fs / float(Ds)
Ppe, Ppn = Fpe / float(Dpe), Fpn / float(Dpn)
aE = De * funcPeclet(Pe, n) + max(-1 * Fe, 0)
aW = Dw * funcPeclet(Pw, n) + max(-1 * Fw, 0)
aN = Dn * funcPeclet(Pn, n) + max(-1 * Fn, 0)
aS = Ds * funcPeclet(Ps, n) + max(-1 * Fs, 0)
aP_e, aP_n = Dpe * funcPeclet(Ppe, n) + max(-1 * Fpe, 0), Dpn * funcPeclet(Ppn, n) + max(-1 * Fpn, 0)
b = Sc * del_x * del_y
u_star[i,j] = ((aE * u[i + 1, j] + aW * u[i - 1, j] + aN * v[i, j + 1] + aS * v[i, j - 1]) + b + (
P[i, j] - P[i + 1, j]) * del_y) / float(aP_e)
v_star[i,j] = ((aE * u[i + 1, j] + aW * u[i - 1, j] + aN * v[i, j + 1] + aS * v[i, j - 1]) + b + (
P[i, j] - P[i, j+1]) * del_x) / float(aP_n)
d_e = del_y/float(aP_e)
d_w = d_e
d_n = del_x/float(aP_n)
d_s = d_n
aE = rho * d_e * del_y
aW = rho * d_w * del_y
aN = rho * d_n * del_x
aS = rho * d_s * del_x
aP = aE + aW + aN + aS
b1 = rho * (u_star[i, j] - u_star[i + 1, j]) * del_y + rho * (v_star[i, j] - v_star[i, j + 1]) * del_x
P_corr[i,j] = (aE*P_corr[i+1, j] + aW*P_corr[i-1,j] + aN*P[i,j+1] + aS*P[i,j-1] + b1)/float(aP)
P[i,j] = P_star[i,j] + alpha*P_corr[i,j]
u[i, j] = u_star[i, j] + d_e * (P_corr[i, j] - P_corr[i + 1, j])
v[i, j] = v_star[i, j] + d_n * (P_corr[i, j] - P_corr[i, j + 1])
for i in range(0, x_points):
for j in range(0, y_points):
P_star[i,j] = P_corr[i,j]
print ("\n Pressure distribution is: \n" + str(P))
print ("\n The max pressure is: \t" + str(P.max()))
xx = np.linspace(0, x_len, x_points+1)
yy = np.linspace(0, y_len, y_points+1)
cmap = plt.pcolormesh(xx, yy, P) # https://scientific-python-101.readthedocs.io/matplotlib/pcolormesh_plots.html
plt.colorbar(cmap)
plt.show()
| [
"[email protected]"
] | |
b9d874a3c49d258fd8217ba238ad471f631b0566 | 6b5bf225a8453c5f190d8ab0e55978bb3eb7a904 | /dllutils.py | d61988930f6c224d1d149293f583a98d01e8aa4d | [
"BSD-3-Clause"
] | permissive | GrinnyManyform/pyzo | 2ffeb45d7743cc0cf88ca8606e60d12cef5c6158 | e15e4220dc33570f3f55ca9e47a24005fb6988f5 | refs/heads/master | 2021-01-20T16:42:16.496531 | 2016-09-16T11:26:12 | 2016-09-16T11:26:12 | 68,451,901 | 1 | 0 | null | 2016-09-17T12:51:13 | 2016-09-17T12:51:13 | null | UTF-8 | Python | false | false | 9,034 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2012 Almar Klein
# This module is distributed under the terms of the (new) BSD License.
""" Various utilities to modify Dynamic Link libraries.
Needed to build the Pyzo distro, and it's possible that this
functionality is needed to fix extension modules after installation in
a Pyzo distro.
This is a mix of utilities for Windows, Mac and Linux.
"""
import os
import stat
import sys
import subprocess
import time
import re
_COMMAND_TO_SEARCH_PATH = []
def get_command_to_set_search_path():
""" Get the command to change the RPATH of executables and dynamic
libraries. Returns None if there is no such command or if it
cannot be found.
"""
# Check if already computed
if _COMMAND_TO_SEARCH_PATH:
return _COMMAND_TO_SEARCH_PATH[0]
# Get name of the utility
# In Pyzo it should be present in 'shared'.
utilCommand = None
if sys.platform.startswith('win'):
return
if sys.platform.startswith('linux'):
utilname = 'patchelf'
if sys.platform.startswith('darwin'):
utilname = 'install_name_tool'
if True:
# Try old Pyzo
utilCommand = os.path.join(sys.prefix, 'shared', utilname)
if not os.path.isfile(utilCommand):
utilCommand = utilname
# Try new Pyzo / anaconda
utilCommand = os.path.join(sys.prefix, 'bin', utilname)
if not os.path.isfile(utilCommand):
utilCommand = utilname
# Test whether it exists
try:
subprocess.check_output(['which', utilCommand])
except Exception:
raise RuntimeError('Could not get command (%s) to set search path.' % utilCommand)
# Store and return
_COMMAND_TO_SEARCH_PATH.append(utilCommand)
return utilCommand
def set_search_path(fname, *args):
""" set_search_path(fname, *args)
For the given library/executable, set the search path to the
relative paths specified in args.
For Linux: The RPATH is the path to search for its dependencies.
http://enchildfone.wordpress.com/2010/03/23/a-description-of-rpath-origin-ld_library_path-and-portable-linux-binaries/
For Mac: We use the @rpath identifier to get similar behavior to
Linux. But each dependency must be specified. To realize this, we
need to check for each dependency whether it is on one of te given
search paths.
For Windows: not supported in any way. Windows searches next to the
library and then in system paths and PATH.
"""
# Prepare
args = [arg.lstrip('/') for arg in args if arg]
args = [arg for arg in args if arg != '.'] # Because we add empty dir anyway
args.append('') # make libs search next to themselves
command = get_command_to_set_search_path()
if sys.platform.startswith('linux'):
# Create search path value
rpath = ':'.join( ['$ORIGIN/'+arg for arg in args] )
# Modify rpath using a call to patchelf utility
cmd = [command, '--set-rpath', rpath, fname]
subprocess.check_call(cmd)
print('Set RPATH for %r' % os.path.basename(fname))
#print('Set RPATH for %r: %r' % (os.path.basename(fname), rpath))
elif sys.platform.startswith('darwin'):
# ensure write permissions
mode = os.stat(fname).st_mode
if not (mode & stat.S_IWUSR):
os.chmod(fname, mode | stat.S_IWUSR)
# let the file itself know its place (simpyl on rpath)
name = os.path.basename(fname)
subprocess.call(('install_name_tool', '-id', '@rpath/'+name, fname))
# find the references: call otool -L on the file
otool = subprocess.Popen(('otool', '-L', fname),
stdout = subprocess.PIPE)
references = otool.stdout.readlines()[1:]
# Replace each reference
rereferencedlibs = []
for reference in references:
# find the actual referenced file name
referencedFile = reference.decode().strip().split()[0]
if referencedFile.startswith('@'):
continue # the referencedFile is already a relative path
# Get lib name
_, name = os.path.split(referencedFile)
if name.lower() == 'python':
name = 'libpython' # Rename Python lib on Mac
# see if we provided the referenced file
potentiallibs = [os.path.join(os.path.dirname(fname), arg, name)
for arg in args]
# if so, change the reference and rpath
if any([os.path.isfile(p) for p in potentiallibs]):
subprocess.call(('install_name_tool', '-change',
referencedFile, '@rpath/'+name, fname))
for arg in args:
mac_add_rpath(fname, '@loader_path/' + arg)
mac_add_rpath(fname, '@executable_path/') # use libpython next to exe
rereferencedlibs.append(name)
if rereferencedlibs:
print('Replaced refs for "%s": %s' %
(os.path.basename(fname), ', '.join(rereferencedlibs)) )
elif sys.platform.startswith('win'):
raise RuntimeError('Windows has no way of setting the search path on a library or exe.')
else:
raise RuntimeError('Do not know how to set search path of library or exe on %s' % sys.platform)
def mac_add_rpath(fname, rpath):
""" mac_add_rpath(fname, rpath)
Set the rpath for a Mac library or executble. If the rpath is already
registered, it is ignored.
"""
cmd = ['install_name_tool', '-add_rpath', rpath, fname]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while p.poll() is None:
time.sleep(0.01)
if p.returncode:
msg = p.stdout.read().decode('utf-8')
if 'would duplicate path' in msg:
pass # Ignore t
else:
raise RuntimeError('Could not set rpath: ' + msg)
def remove_CRT_dependencies(dirname, recurse=True):
""" remove_CRT_dependencies(path, recurse=True)
Check all .dll and .pyd files in the given directory (and its
subdirectories if recurse is True), removing the dependency on the
Windows C runtime from the embedded manifest.
"""
dllExt = ['.dll', '.pyd']
for entry in os.listdir(dirname):
p = os.path.join(dirname, entry)
if recurse and os.path.isdir(p):
remove_CRT_dependencies(p, recurse)
elif os.path.isfile(p) and os.path.splitext(p)[1].lower() in dllExt:
remove_CRT_dependency(p)
def remove_CRT_dependency(filename):
""" remove_CRT_dependency(filename)
Modify the embedded manifest of a Windows dll (or pyd file),
such that it no longer depends on the Windows C runtime.
In effect, the dll will fall back to using the C runtime that
the executable depends on (and has loaded in memory).
This function is not necessary for dll's and pyd's that come with
Python, because these are build without the CRT dependencies for a
while. However, some third party packages (e.g. PySide) do have
these dependencies, and they need to be removed in order to work
on a system that does not have the C-runtime installed.
Based on this diff by C. Gohlke:
http://bugs.python.org/file15113/msvc9compiler_stripruntimes_regexp2.diff
See discussion at: http://bugs.python.org/issue4120
"""
if 'QtCore' in filename:
1/0
# Read the whole file
with open(filename, 'rb') as f:
try:
bb = f.read()
except IOError:
#raise IOError('Could not read %s'%filename)
print('Warning: could not read %s'%filename)
return
# Remove assemblyIdentity tag
# This code is different from that in python's distutils/msvc9compiler.py
# by removing re.DOTALL and replaceing the second DOT with "(.|\n|\r)",
# which means that the first DOT cannot contain newlines. Would we not do
# this, the match is too greedy (and causes tk85.dll to break).
pattern = r"""<assemblyIdentity.*?name=("|')Microsoft\."""\
r"""VC\d{2}\.CRT("|')(.|\n|\r)*?(/>|</assemblyIdentity>)"""
pattern = re.compile(pattern.encode('ascii'))
bb, hasMatch = _replacePatternWithSpaces(pattern, bb)
if hasMatch:
# Remove dependentAssembly tag if it's empty
pattern = "<dependentAssembly>\s*</dependentAssembly>".encode('ascii')
bb, hasMatch = _replacePatternWithSpaces(pattern, bb)
# Write back
with open(filename, "wb") as f:
f.write(bb)
print('Removed embedded MSVCR dependency for: %s' % filename)
def _replacePatternWithSpaces(pattern, bb):
match = re.search(pattern, bb)
if match is not None:
L = match.end() - match.start()
bb = re.sub(pattern, b" "*L, bb)
return bb, True
else:
return bb, False
| [
"[email protected]"
] | |
a7376cb9c16d643647caa204b56b259ea74c9399 | 0ef4371c87c2196d9c2d2706e51f4b452f6e9d19 | /4_Curso/Proyecto_Sistemas_Informáticos/model_exam_2/venv/lib/python3.7/site-packages/flake8/style_guide.py | c617f5ab89ccd6731e3ec8f31da49e238d4f9549 | [
"Apache-2.0"
] | permissive | AlejandroSantorum/Apuntes_Mat_IngInf | 49c41002314216a994aa60db04062e34abc065eb | c047e41d086f3028ec78ac3a663b9848862e52df | refs/heads/master | 2023-05-15T03:02:56.882342 | 2023-04-20T20:19:54 | 2023-04-20T20:19:54 | 212,392,195 | 29 | 10 | Apache-2.0 | 2023-09-09T13:03:45 | 2019-10-02T16:44:22 | Jupyter Notebook | UTF-8 | Python | false | false | 21,018 | py | """Implementation of the StyleGuide used by Flake8."""
import collections
import contextlib
import copy
import enum
import itertools
import linecache
import logging
import sys
from typing import Optional, Union
from flake8 import defaults
from flake8 import statistics
from flake8 import utils
__all__ = ("StyleGuide",)
LOG = logging.getLogger(__name__)
if sys.version_info < (3, 2):
from functools32 import lru_cache
else:
from functools import lru_cache
# TODO(sigmavirus24): Determine if we need to use enum/enum34
class Selected(enum.Enum):
"""Enum representing an explicitly or implicitly selected code."""
Explicitly = "explicitly selected"
Implicitly = "implicitly selected"
class Ignored(enum.Enum):
"""Enum representing an explicitly or implicitly ignored code."""
Explicitly = "explicitly ignored"
Implicitly = "implicitly ignored"
class Decision(enum.Enum):
"""Enum representing whether a code should be ignored or selected."""
Ignored = "ignored error"
Selected = "selected error"
@lru_cache(maxsize=512)
def find_noqa(physical_line):
return defaults.NOQA_INLINE_REGEXP.search(physical_line)
_Violation = collections.namedtuple(
"Violation",
[
"code",
"filename",
"line_number",
"column_number",
"text",
"physical_line",
],
)
class Violation(_Violation):
"""Class representing a violation reported by Flake8."""
def is_inline_ignored(self, disable_noqa):
# type: (Violation) -> bool
"""Determine if a comment has been added to ignore this line.
:param bool disable_noqa:
Whether or not users have provided ``--disable-noqa``.
:returns:
True if error is ignored in-line, False otherwise.
:rtype:
bool
"""
physical_line = self.physical_line
# TODO(sigmavirus24): Determine how to handle stdin with linecache
if disable_noqa:
return False
if physical_line is None:
physical_line = linecache.getline(self.filename, self.line_number)
noqa_match = find_noqa(physical_line)
if noqa_match is None:
LOG.debug("%r is not inline ignored", self)
return False
codes_str = noqa_match.groupdict()["codes"]
if codes_str is None:
LOG.debug("%r is ignored by a blanket ``# noqa``", self)
return True
codes = set(utils.parse_comma_separated_list(codes_str))
if self.code in codes or self.code.startswith(tuple(codes)):
LOG.debug(
"%r is ignored specifically inline with ``# noqa: %s``",
self,
codes_str,
)
return True
LOG.debug(
"%r is not ignored inline with ``# noqa: %s``", self, codes_str
)
return False
def is_in(self, diff):
"""Determine if the violation is included in a diff's line ranges.
This function relies on the parsed data added via
:meth:`~StyleGuide.add_diff_ranges`. If that has not been called and
we are not evaluating files in a diff, then this will always return
True. If there are diff ranges, then this will return True if the
line number in the error falls inside one of the ranges for the file
(and assuming the file is part of the diff data). If there are diff
ranges, this will return False if the file is not part of the diff
data or the line number of the error is not in any of the ranges of
the diff.
:returns:
True if there is no diff or if the error is in the diff's line
number ranges. False if the error's line number falls outside
the diff's line number ranges.
:rtype:
bool
"""
if not diff:
return True
# NOTE(sigmavirus24): The parsed diff will be a defaultdict with
# a set as the default value (if we have received it from
# flake8.utils.parse_unified_diff). In that case ranges below
# could be an empty set (which is False-y) or if someone else
# is using this API, it could be None. If we could guarantee one
# or the other, we would check for it more explicitly.
line_numbers = diff.get(self.filename)
if not line_numbers:
return False
return self.line_number in line_numbers
class DecisionEngine(object):
"""A class for managing the decision process around violations.
This contains the logic for whether a violation should be reported or
ignored.
"""
def __init__(self, options):
"""Initialize the engine."""
self.cache = {}
self.selected = tuple(options.select)
self.extended_selected = tuple(
sorted(options.extended_default_select, reverse=True)
)
self.enabled_extensions = tuple(options.enable_extensions)
self.all_selected = tuple(
sorted(self.selected + self.enabled_extensions, reverse=True)
)
self.ignored = tuple(
sorted(
itertools.chain(options.ignore, options.extend_ignore),
reverse=True,
)
)
self.using_default_ignore = set(self.ignored) == set(defaults.IGNORE)
self.using_default_select = set(self.selected) == set(defaults.SELECT)
def _in_all_selected(self, code):
return self.all_selected and code.startswith(self.all_selected)
def _in_extended_selected(self, code):
return self.extended_selected and code.startswith(
self.extended_selected
)
def was_selected(self, code):
# type: (str) -> Union[Selected, Ignored]
"""Determine if the code has been selected by the user.
:param str code:
The code for the check that has been run.
:returns:
Selected.Implicitly if the selected list is empty,
Selected.Explicitly if the selected list is not empty and a match
was found,
Ignored.Implicitly if the selected list is not empty but no match
was found.
"""
if self._in_all_selected(code):
return Selected.Explicitly
if not self.all_selected and self._in_extended_selected(code):
# If it was not explicitly selected, it may have been implicitly
# selected because the check comes from a plugin that is enabled by
# default
return Selected.Implicitly
return Ignored.Implicitly
def was_ignored(self, code):
# type: (str) -> Union[Selected, Ignored]
"""Determine if the code has been ignored by the user.
:param str code:
The code for the check that has been run.
:returns:
Selected.Implicitly if the ignored list is empty,
Ignored.Explicitly if the ignored list is not empty and a match was
found,
Selected.Implicitly if the ignored list is not empty but no match
was found.
"""
if self.ignored and code.startswith(self.ignored):
return Ignored.Explicitly
return Selected.Implicitly
def more_specific_decision_for(self, code):
# type: (Violation) -> Decision
select = find_first_match(code, self.all_selected)
extra_select = find_first_match(code, self.extended_selected)
ignore = find_first_match(code, self.ignored)
if select and ignore:
# If the violation code appears in both the select and ignore
# lists (in some fashion) then if we're using the default ignore
# list and a custom select list we should select the code. An
# example usage looks like this:
# A user has a code that would generate an E126 violation which
# is in our default ignore list and they specify select=E.
# We should be reporting that violation. This logic changes,
# however, if they specify select and ignore such that both match.
# In that case we fall through to our find_more_specific call.
# If, however, the user hasn't specified a custom select, and
# we're using the defaults for both select and ignore then the
# more specific rule must win. In most cases, that will be to
# ignore the violation since our default select list is very
# high-level and our ignore list is highly specific.
if self.using_default_ignore and not self.using_default_select:
return Decision.Selected
return find_more_specific(select, ignore)
if extra_select and ignore:
# At this point, select is false-y. Now we need to check if the
# code is in our extended select list and our ignore list. This is
# a *rare* case as we see little usage of the extended select list
# that plugins can use, so I suspect this section may change to
# look a little like the block above in which we check if we're
# using our default ignore list.
return find_more_specific(extra_select, ignore)
if select or (extra_select and self.using_default_select):
# Here, ignore was false-y and the user has either selected
# explicitly the violation or the violation is covered by
# something in the extended select list and we're using the
# default select list. In either case, we want the violation to be
# selected.
return Decision.Selected
if select is None and (
extra_select is None or not self.using_default_ignore
):
return Decision.Ignored
if (select is None and not self.using_default_select) and (
ignore is None and self.using_default_ignore
):
return Decision.Ignored
return Decision.Selected
def make_decision(self, code):
"""Decide if code should be ignored or selected."""
LOG.debug('Deciding if "%s" should be reported', code)
selected = self.was_selected(code)
ignored = self.was_ignored(code)
LOG.debug(
'The user configured "%s" to be "%s", "%s"',
code,
selected,
ignored,
)
if (
selected is Selected.Explicitly or selected is Selected.Implicitly
) and ignored is Selected.Implicitly:
decision = Decision.Selected
elif (
selected is Selected.Explicitly and ignored is Ignored.Explicitly
) or (
selected is Ignored.Implicitly and ignored is Selected.Implicitly
):
decision = self.more_specific_decision_for(code)
elif selected is Ignored.Implicitly or ignored is Ignored.Explicitly:
decision = Decision.Ignored # pylint: disable=R0204
return decision
def decision_for(self, code):
# type: (str) -> Decision
"""Return the decision for a specific code.
This method caches the decisions for codes to avoid retracing the same
logic over and over again. We only care about the select and ignore
rules as specified by the user in their configuration files and
command-line flags.
This method does not look at whether the specific line is being
ignored in the file itself.
:param str code:
The code for the check that has been run.
"""
decision = self.cache.get(code)
if decision is None:
decision = self.make_decision(code)
self.cache[code] = decision
LOG.debug('"%s" will be "%s"', code, decision)
return decision
class StyleGuideManager(object):
"""Manage multiple style guides for a single run."""
def __init__(self, options, formatter, decider=None):
"""Initialize our StyleGuide.
.. todo:: Add parameter documentation.
"""
self.options = options
self.formatter = formatter
self.stats = statistics.Statistics()
self.decider = decider or DecisionEngine(options)
self.style_guides = []
self.default_style_guide = StyleGuide(
options, formatter, self.stats, decider=decider
)
self.style_guides = list(
itertools.chain(
[self.default_style_guide],
self.populate_style_guides_with(options),
)
)
def populate_style_guides_with(self, options):
"""Generate style guides from the per-file-ignores option.
:param options:
The original options parsed from the CLI and config file.
:type options:
:class:`~optparse.Values`
:returns:
A copy of the default style guide with overridden values.
:rtype:
:class:`~flake8.style_guide.StyleGuide`
"""
per_file = utils.parse_files_to_codes_mapping(
options.per_file_ignores
)
for filename, violations in per_file:
yield self.default_style_guide.copy(
filename=filename, extend_ignore_with=violations
)
@lru_cache(maxsize=None)
def style_guide_for(self, filename):
"""Find the StyleGuide for the filename in particular."""
guides = sorted(
(g for g in self.style_guides if g.applies_to(filename)),
key=lambda g: len(g.filename or ""),
)
if len(guides) > 1:
return guides[-1]
return guides[0]
@contextlib.contextmanager
def processing_file(self, filename):
"""Record the fact that we're processing the file's results."""
guide = self.style_guide_for(filename)
with guide.processing_file(filename):
yield guide
def handle_error(
self,
code,
filename,
line_number,
column_number,
text,
physical_line=None,
):
# type: (str, str, int, int, str, Optional[str]) -> int
"""Handle an error reported by a check.
:param str code:
The error code found, e.g., E123.
:param str filename:
The file in which the error was found.
:param int line_number:
The line number (where counting starts at 1) at which the error
occurs.
:param int column_number:
The column number (where counting starts at 1) at which the error
occurs.
:param str text:
The text of the error message.
:param str physical_line:
The actual physical line causing the error.
:returns:
1 if the error was reported. 0 if it was ignored. This is to allow
for counting of the number of errors found that were not ignored.
:rtype:
int
"""
guide = self.style_guide_for(filename)
return guide.handle_error(
code, filename, line_number, column_number, text, physical_line
)
def add_diff_ranges(self, diffinfo):
"""Update the StyleGuides to filter out information not in the diff.
This provides information to the underlying StyleGuides so that only
the errors in the line number ranges are reported.
:param dict diffinfo:
Dictionary mapping filenames to sets of line number ranges.
"""
for guide in self.style_guides:
guide.add_diff_ranges(diffinfo)
class StyleGuide(object):
"""Manage a Flake8 user's style guide."""
def __init__(
self, options, formatter, stats, filename=None, decider=None
):
"""Initialize our StyleGuide.
.. todo:: Add parameter documentation.
"""
self.options = options
self.formatter = formatter
self.stats = stats
self.decider = decider or DecisionEngine(options)
self.filename = filename
if self.filename:
self.filename = utils.normalize_path(self.filename)
self._parsed_diff = {}
def __repr__(self):
"""Make it easier to debug which StyleGuide we're using."""
return "<StyleGuide [{}]>".format(self.filename)
def copy(self, filename=None, extend_ignore_with=None, **kwargs):
"""Create a copy of this style guide with different values."""
filename = filename or self.filename
options = copy.deepcopy(self.options)
options.ignore.extend(extend_ignore_with or [])
return StyleGuide(
options, self.formatter, self.stats, filename=filename
)
@contextlib.contextmanager
def processing_file(self, filename):
"""Record the fact that we're processing the file's results."""
self.formatter.beginning(filename)
yield self
self.formatter.finished(filename)
def applies_to(self, filename):
"""Check if this StyleGuide applies to the file.
:param str filename:
The name of the file with violations that we're potentially
applying this StyleGuide to.
:returns:
True if this applies, False otherwise
:rtype:
bool
"""
if self.filename is None:
return True
return utils.matches_filename(
filename,
patterns=[self.filename],
log_message='{!r} does %(whether)smatch "%(path)s"'.format(self),
logger=LOG,
)
def should_report_error(self, code):
# type: (str) -> Decision
"""Determine if the error code should be reported or ignored.
This method only cares about the select and ignore rules as specified
by the user in their configuration files and command-line flags.
This method does not look at whether the specific line is being
ignored in the file itself.
:param str code:
The code for the check that has been run.
"""
return self.decider.decision_for(code)
def handle_error(
self,
code,
filename,
line_number,
column_number,
text,
physical_line=None,
):
# type: (str, str, int, int, str, Optional[str]) -> int
"""Handle an error reported by a check.
:param str code:
The error code found, e.g., E123.
:param str filename:
The file in which the error was found.
:param int line_number:
The line number (where counting starts at 1) at which the error
occurs.
:param int column_number:
The column number (where counting starts at 1) at which the error
occurs.
:param str text:
The text of the error message.
:param str physical_line:
The actual physical line causing the error.
:returns:
1 if the error was reported. 0 if it was ignored. This is to allow
for counting of the number of errors found that were not ignored.
:rtype:
int
"""
disable_noqa = self.options.disable_noqa
# NOTE(sigmavirus24): Apparently we're provided with 0-indexed column
# numbers so we have to offset that here. Also, if a SyntaxError is
# caught, column_number may be None.
if not column_number:
column_number = 0
error = Violation(
code,
filename,
line_number,
column_number + 1,
text,
physical_line,
)
error_is_selected = (
self.should_report_error(error.code) is Decision.Selected
)
is_not_inline_ignored = error.is_inline_ignored(disable_noqa) is False
is_included_in_diff = error.is_in(self._parsed_diff)
if (
error_is_selected
and is_not_inline_ignored
and is_included_in_diff
):
self.formatter.handle(error)
self.stats.record(error)
return 1
return 0
def add_diff_ranges(self, diffinfo):
"""Update the StyleGuide to filter out information not in the diff.
This provides information to the StyleGuide so that only the errors
in the line number ranges are reported.
:param dict diffinfo:
Dictionary mapping filenames to sets of line number ranges.
"""
self._parsed_diff = diffinfo
def find_more_specific(selected, ignored):
if selected.startswith(ignored) and selected != ignored:
return Decision.Selected
return Decision.Ignored
def find_first_match(error_code, code_list):
startswith = error_code.startswith
for code in code_list:
if startswith(code):
break
else:
return None
return code
| [
"[email protected]"
] | |
5d73acd96034f8bcfb122c5a8fd790141f859aa3 | 50b2a447cccfbad70ca2f7d19d4550a708a74cbe | /blog/urls.py | 6bfee92f9dce6c0387b338a007edade69a9decda | [] | no_license | halsayed/portfolio_project | c07fa9f21f252d5cce130c78220ceefed2504675 | c22c3d219b126b71951377285b18ce1b438a1b1d | refs/heads/master | 2020-03-20T11:25:37.333806 | 2018-06-17T21:18:50 | 2018-06-17T21:18:50 | 137,402,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.allblogs, name='allblogs', ),
path('<int:blog_id>/', views.blogdetail, name='blogdetail')
] | [
"[email protected]"
] | |
6c04ed9d151a7bd672fa94d24da5c68676b8669c | bbe7d6d59ef6d7364ff06377df9658367a19c425 | /cogdominium/CogdoBarrelRoom.py | 2bda0b18b835d5204d88476acf5c89a676ed85f0 | [
"Apache-2.0"
] | permissive | DedMemez/ODS-August-2017 | 1b45c912ad52ba81419c1596644d8db2a879bd9b | 5d6214732e3245f63bfa250e3e9c881cc2dc28ad | refs/heads/master | 2021-01-22T18:37:51.626942 | 2017-08-19T02:04:51 | 2017-08-19T02:04:51 | 100,762,513 | 0 | 8 | null | null | null | null | UTF-8 | Python | false | false | 7,736 | py | # Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.cogdominium.CogdoBarrelRoom
from panda3d.core import Camera, Fog, Lens, Light, Point3, Vec3
import random
from direct.interval.IntervalGlobal import *
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals, ToontownTimer
from toontown.cogdominium import CogdoBarrelRoomConsts, CogdoBarrelRoomRewardPanel
from toontown.distributed import DelayDelete
class CogdoBarrelRoom:
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedCogdoBarrelRoom')
def __init__(self):
self.timer = None
self.model = None
self._isLoaded = False
self.dummyElevInNode = None
self.cogdoBarrelsNode = None
self.entranceNode = None
self.nearBattleNode = None
self.rewardUi = None
self.rewardUiTaskName = 'CogdoBarrelRoom-RewardUI'
self.rewardCameraTaskName = 'CogdoBarrelRoom-RewardCamera'
self.fog = None
self.defaultFar = None
self.stomperSfx = None
return
def destroy(self):
self.unload()
def load(self):
if self._isLoaded:
return
self.timer = ToontownTimer.ToontownTimer()
self.timer.stash()
self.model = loader.loadModel(CogdoBarrelRoomConsts.BarrelRoomModel)
self.model.setPos(*CogdoBarrelRoomConsts.BarrelRoomModelPos)
self.model.reparentTo(render)
self.model.stash()
self.entranceNode = self.model.attachNewNode('door-entrance')
self.entranceNode.setPos(0, -65, 0)
self.nearBattleNode = self.model.attachNewNode('near-battle')
self.nearBattleNode.setPos(0, -25, 0)
self.rewardUi = CogdoBarrelRoomRewardPanel.CogdoBarrelRoomRewardPanel()
self.hideRewardUi()
self.stomperSfx = loader.loadSfx(CogdoBarrelRoomConsts.StomperSound)
self.fog = Fog('barrel-room-fog')
self.fog.setColor(CogdoBarrelRoomConsts.BarrelRoomFogColor)
self.fog.setLinearRange(*CogdoBarrelRoomConsts.BarrelRoomFogLinearRange)
self.brBarrel = render.attachNewNode('@@CogdoBarrels')
for i in xrange(len(CogdoBarrelRoomConsts.BarrelProps)):
self.bPath = self.brBarrel.attachNewNode('%s%s' % (CogdoBarrelRoomConsts.BarrelPathName, i))
self.bPath.setPos(CogdoBarrelRoomConsts.BarrelProps[i]['pos'])
self.bPath.setH(CogdoBarrelRoomConsts.BarrelProps[i]['heading'])
self._isLoaded = True
def unload(self):
if self.model:
self.model.removeNode()
self.model = None
if self.timer:
self.timer.destroy()
self.timer = None
if self.rewardUi:
self.rewardUi.destroy()
self.rewardUi = None
if hasattr(self, 'fog'):
if self.fog:
render.setFogOff()
del self.fog
taskMgr.remove(self.rewardUiTaskName)
taskMgr.remove(self.rewardCameraTaskName)
self._isLoaded = False
return
def isLoaded(self):
return self._isLoaded
def show(self):
if not self.cogdoBarrelsNode:
self.cogdoBarrelsNode = render.find('**/@@CogdoBarrels')
if not self.cogdoBarrelsNode.isEmpty():
self.cogdoBarrelsNode.reparentTo(self.model)
self.cogdoBarrelsNode.unstash()
base.localAvatar.b_setAnimState('neutral')
self.defaultFar = base.camLens.getFar()
base.camLens.setFar(CogdoBarrelRoomConsts.BarrelRoomCameraFar)
base.camLens.setMinFov(settings['fov'] / (4.0 / 3.0))
self.showBattleAreaLight(True)
render.setFog(self.fog)
self.model.unstash()
def hide(self):
self.model.stash()
if self.defaultFar is not None:
base.camLens.setFar(self.defaultFar)
return
def activate(self):
self.notify.info('Activating barrel room: %d sec timer.' % CogdoBarrelRoomConsts.CollectionTime)
self.timer.unstash()
self.timer.posAboveShtikerBook()
self.timer.countdown(CogdoBarrelRoomConsts.CollectionTime)
base.cr.playGame.getPlace().fsm.request('walk')
def deactivate(self):
self.notify.info('Deactivating barrel room.')
self.timer.stop()
self.timer.stash()
def placeToonsAtEntrance(self, toons):
for i in xrange(len(toons)):
toons[i].setPosHpr(self.entranceNode, *CogdoBarrelRoomConsts.BarrelRoomPlayerSpawnPoints[i])
def placeToonsNearBattle(self, toons):
for i in xrange(len(toons)):
toons[i].setPosHpr(self.nearBattleNode, *CogdoBarrelRoomConsts.BarrelRoomPlayerSpawnPoints[i])
def showBattleAreaLight(self, visible = True):
lightConeNode = self.model.find('**/battleCone')
if lightConeNode != None and not lightConeNode.isEmpty():
if visible:
lightConeNode.show()
else:
lightConeNode.hide()
return
def getIntroInterval(self):
avatar = base.localAvatar
trackName = '__introBarrelRoom-%d' % avatar.doId
track = Parallel(name=trackName)
track.append(self.__stomperIntervals())
track.append(Sequence(Func(camera.reparentTo, render), Func(camera.setPosHpr, self.model, -20.0, -87.9, 12.0, -30, 0, 0), Func(base.transitions.irisIn, 0.5), Wait(1.0), LerpHprInterval(camera, duration=2.0, startHpr=Vec3(-30, 0, 0), hpr=Vec3(0, 0, 0), blendType='easeInOut'), Wait(2.5), LerpHprInterval(camera, duration=3.0, startHpr=Vec3(0, 0, 0), hpr=Vec3(-45, 0, 0), blendType='easeInOut'), Wait(2.5)))
track.delayDelete = DelayDelete.DelayDelete(avatar, 'introBarrelRoomTrack')
track.setDoneEvent(trackName)
return (track, trackName)
def __stomperIntervals(self):
ivals = [SoundInterval(self.stomperSfx)]
i = 0
for stomperDef in CogdoBarrelRoomConsts.StomperProps:
stomperNode = render.find(stomperDef['path'])
if stomperNode:
maxZ = random.uniform(10, 20)
minZ = maxZ - 10
if stomperDef['motion'] == 'up':
startZ, destZ = minZ, maxZ
else:
startZ, destZ = maxZ, minZ
stomperNode.setPos(Point3(0, 0, startZ))
ivals.append(LerpPosInterval(stomperNode, CogdoBarrelRoomConsts.StomperHaltTime, Point3(0, 0, destZ), blendType='easeOut'))
i += 1
return Parallel(*tuple(ivals))
def __rewardUiTimeout(self, callback):
self.hideRewardUi()
if callback is not None:
callback()
return
def __rewardCamera(self):
trackName = 'cogdoBarrelRoom-RewardCamera'
track = Sequence(Func(camera.reparentTo, render), Func(camera.setPosHpr, self.model, 0, 0, 11.0, 0, -14, 0), Func(self.showBattleAreaLight, False), name=trackName)
return (track, trackName)
def showRewardUi(self, callback = None):
track, trackName = self.__rewardCamera()
if CogdoBarrelRoomConsts.ShowRewardUI:
self.rewardUi.setRewards()
self.rewardUi.unstash()
taskMgr.doMethodLater(CogdoBarrelRoomConsts.RewardUiTime, self.__rewardUiTimeout, self.rewardUiTaskName, extraArgs=[callback])
return (track, trackName)
def setRewardResults(self, results):
self.rewardUi.setRewards(results)
def hideRewardUi(self):
self.rewardUi.stash()
taskMgr.remove(self.rewardUiTaskName) | [
"[email protected]"
] | |
46fe97177f3bbbce62cfb44bf9651cb61d005666 | 39d17148ac0517af1b0fdfc9e0f2d962183b7953 | /dev/benchmarks/bench_check.py | d393c4fdb7662e18e06543bc2386b656eabe00af | [
"Apache-2.0"
] | permissive | Erotemic/progiter | a784f69fbca990a53d7f5abd56527f56b720c7f1 | 2d8e1a90b2de59e74c21b5f026832a119aa840e1 | refs/heads/main | 2023-06-22T05:01:03.177353 | 2023-06-20T17:36:08 | 2023-06-20T17:36:08 | 134,924,299 | 15 | 2 | Apache-2.0 | 2023-06-11T21:35:08 | 2018-05-26T02:59:51 | Python | UTF-8 | Python | false | false | 3,753 | py | import ubelt as ub
import progiter
import timerit
def basic_benchmark():
"""
Run the simplest benchmark where we iterate over nothing and compare the
slowdown of using a progress iterator versus doing nothing.
"""
N = 100_000
ti = timerit.Timerit(21, bestof=3, verbose=2)
for timer in ti.reset('baseline'):
for i in range(N):
...
# for timer in ti.reset('ubelt progiter'):
# for i in ub.ProgIter(range(N)):
# ...
for timer in ti.reset('progiter, enabled=False'):
for i in progiter.ProgIter(range(N), enabled=False):
...
for timer in ti.reset('progiter, homogeneous=True'):
for i in progiter.ProgIter(range(N), homogeneous=True):
...
for timer in ti.reset('progiter, homogeneous=auto'):
for i in progiter.ProgIter(range(N), homogeneous='auto'):
...
for timer in ti.reset('progiter, homogeneous=False'):
for i in progiter.ProgIter(range(N), homogeneous=False):
...
import tqdm
for timer in ti.reset('tqdm'):
for i in tqdm.tqdm(range(N)):
...
if 1:
from rich.live import Live
from rich.progress import Progress as richProgress
for timer in ti.reset('rich.progress'):
prog_manager = richProgress()
task_id = prog_manager.add_task(description='', total=N)
live_context = Live(prog_manager)
with live_context:
for i in range(N):
prog_manager.update(task_id, advance=1)
import pandas as pd
df = pd.DataFrame.from_dict(ti.rankings['mean'], orient='index', columns=['mean'])
df.loc[list(ti.rankings['min'].keys()), 'min'] = list(ti.rankings['min'].values())
df['mean_rel_overhead'] = df['mean'] / df.loc['baseline', 'mean']
df['min_rel_overhead'] = df['min'] / df.loc['baseline', 'min']
print(df.to_string())
def other_tests():
N = 100
###########
with ub.Timer(label='progiter fixed freq=10'):
for i in progiter.ProgIter(range(N), freq=10, adjust=False):
pass
with ub.Timer(label='ubelt fixed freq=10'):
for i in ub.ProgIter(range(N), freq=10, adjust=False):
pass
with ub.Timer(label='progiter fixed freq=1'):
for i in progiter.ProgIter(range(N), freq=1, adjust=False):
pass
with ub.Timer(label='ubelt fixed freq=1'):
for i in ub.ProgIter(range(N), freq=1, adjust=False):
pass
import timerit
import time
ti = timerit.Timerit(100000, bestof=10, verbose=2)
for timer in ti.reset('time.process_time()'):
with timer:
time.process_time()
for timer in ti.reset('time.process_time_ns()'):
with timer:
time.process_time_ns()
for timer in ti.reset('time.time()'):
with timer:
time.time()
for timer in ti.reset('time.time_ns()'):
with timer:
time.time_ns()
for timer in ti.reset('time.perf_counter()'):
with timer:
time.perf_counter()
for timer in ti.reset('time.perf_counter_ns()'):
with timer:
time.perf_counter_ns()
for timer in ti.reset('time.thread_time()'):
with timer:
time.thread_time()
for timer in ti.reset('time.monotonic()'):
with timer:
time.monotonic()
for timer in ti.reset('time.monotonic_ns()'):
with timer:
time.monotonic_ns()
print('ti.rankings = {}'.format(ub.repr2(ti.rankings, nl=2, align=':', precision=8)))
if __name__ == '__main__':
"""
CommandLine:
python ~/code/progiter/dev/benchmarks/bench_check.py
"""
basic_benchmark()
| [
"[email protected]"
] | |
88983ba58579d66596f8efac20102f270c9fba97 | a97fb0584709e292a475defc8506eeb85bb24339 | /source code/code-Python 3.0.1/ch405.py | 6791b224e8992f7ce92ddf7376f1155f90e2870e | [] | no_license | AAQ6291/PYCATCH | bd297858051042613739819ed70c535901569079 | 27ec4094be785810074be8b16ef84c85048065b5 | refs/heads/master | 2020-03-26T13:54:57.051016 | 2018-08-17T09:05:19 | 2018-08-17T09:05:19 | 144,963,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
## 宣告user變數與passwd變數來接收使用者輸入的帳號與密碼
user = input("login:")
passwd = input("password (empty for guest):")
## 使用string.strip()函數將使用者輸入的空白字元刪除, 因為使用者可能會輸入空白字元
user = user.strip()
passwd = passwd.strip()
if (user == "" and passwd == "") or (user =="" and passwd !=""):
print("username or password cannot be empty.")
elif user == "admin" and passwd == "!d^*^BM(;.":
print("welcome administrator!")
elif user == "guest" and passwd == "":
print("welcome, you're guest.")
elif user == "huang" and passwd == "12345":
print("hello, huang!")
else:
print("wrong username or password.")
| [
"[email protected]"
] | |
64b5a847f9463a91bd9efb546c74636a4fb7aec1 | 8c73955b7b3b8e7893e8ff3d78341a99a66f6c12 | /src/train.py | 8c7ed1b65cf8e7129a30f83b730aafb482fdd906 | [] | no_license | akiFQC/shinra-attribute-extraction | ba9452d005830b6c24c80d166a8ff3bcf82a70b8 | 633f65ec5b61b8937fdf9cf24fe4ae07960e93fd | refs/heads/main | 2023-07-14T03:22:16.927915 | 2021-08-12T08:34:03 | 2021-08-12T08:34:03 | 396,015,806 | 0 | 0 | null | 2021-08-14T13:22:14 | 2021-08-14T13:22:13 | null | UTF-8 | Python | false | false | 5,631 | py | import argparse
import sys
from pathlib import Path
import json
import torch
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pad_sequence
import torch.optim as optim
from transformers import AutoTokenizer, AutoModel
from tqdm import tqdm
from seqeval.metrics import f1_score, classification_report
import mlflow
from sklearn.model_selection import train_test_split
from dataset import ShinraData
from dataset import NerDataset, ner_collate_fn, decode_iob
from model import BertForMultilabelNER, create_pooler_matrix
from predict import predict
device = "cuda:1" if torch.cuda.is_available() else "cpu"
class EarlyStopping():
def __init__(self, patience=0, verbose=0):
self._step = 0
self._score = - float('inf')
self.patience = patience
self.verbose = verbose
def validate(self, score):
if self._score > score:
self._step += 1
if self._step > self.patience:
if self.verbose:
print('early stopping')
return True
else:
self._step = 0
self._score = score
return False
def parse_arg():
parser = argparse.ArgumentParser()
parser.add_argument("--input_path", type=str, help="Specify input path in SHINRA2020")
parser.add_argument("--model_path", type=str, help="Specify attribute_list path in SHINRA2020")
parser.add_argument("--lr", type=float, help="Specify attribute_list path in SHINRA2020")
parser.add_argument("--bsz", type=int, help="Specify attribute_list path in SHINRA2020")
parser.add_argument("--epoch", type=int, help="Specify attribute_list path in SHINRA2020")
parser.add_argument("--grad_acc", type=int, help="Specify attribute_list path in SHINRA2020")
parser.add_argument("--grad_clip", type=float, help="Specify attribute_list path in SHINRA2020")
parser.add_argument("--note", type=str, help="Specify attribute_list path in SHINRA2020")
args = parser.parse_args()
return args
def evaluate(model, dataset, attributes, args):
total_preds, total_trues = predict(model, dataset, device)
total_preds = decode_iob(total_preds, attributes)
total_trues = decode_iob(total_trues, attributes)
f1 = f1_score(total_trues, total_preds)
return f1
def train(model, train_dataset, valid_dataset, attributes, args):
optimizer = optim.AdamW(model.parameters(), lr=args.lr)
# scheduler = get_scheduler(
# args.bsz, args.grad_acc, args.epoch, args.warmup, optimizer, len(train_dataset))
early_stopping = EarlyStopping(patience=10, verbose=1)
losses = []
for e in range(args.epoch):
train_dataloader = DataLoader(train_dataset, batch_size=args.bsz, collate_fn=ner_collate_fn, shuffle=True)
bar = tqdm(total=len(train_dataset))
total_loss = 0
model.train()
for step, inputs in enumerate(train_dataloader):
input_ids = inputs["tokens"]
word_idxs = inputs["word_idxs"]
labels = inputs["labels"]
labels = [pad_sequence([torch.tensor(l) for l in label], padding_value=-1, batch_first=True).to(device)
for label in labels]
input_ids = pad_sequence([torch.tensor(t) for t in input_ids], padding_value=0, batch_first=True).to(device)
attention_mask = input_ids > 0
pooling_matrix = create_pooler_matrix(input_ids, word_idxs, pool_type="head").to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
labels=labels,
pooling_matrix=pooling_matrix)
loss = outputs[0]
loss.backward()
total_loss += loss.item()
mlflow.log_metric("Trian batch loss", loss.item(), step=(e+1) * step)
bar.set_description(f"[Epoch] {e + 1}")
bar.set_postfix({"loss": loss.item()})
bar.update(args.bsz)
if (step + 1) % args.grad_acc == 0:
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.grad_clip
)
optimizer.step()
# scheduler.step()
optimizer.zero_grad()
losses.append(total_loss / (step+1))
mlflow.log_metric("Trian loss", losses[-1], step=e)
valid_f1 = evaluate(model, valid_dataset, attributes, args)
mlflow.log_metric("Valid F1", valid_f1, step=e)
if early_stopping._score < valid_f1:
torch.save(model.state_dict(), args.model_path + "best.model")
if e + 1 > 30 and early_stopping.validate(valid_f1):
break
if __name__ == "__main__":
args = parse_arg()
bert = AutoModel.from_pretrained("cl-tohoku/bert-base-japanese")
tokenizer = AutoTokenizer.from_pretrained("cl-tohoku/bert-base-japanese")
# dataset = [ShinraData(), ....]
dataset = ShinraData.from_shinra2020_format(Path(args.input_path))
dataset = [d for d in dataset if d.nes is not None]
model = BertForMultilabelNER(bert, len(dataset[0].attributes)).to(device)
train_dataset, valid_dataset = train_test_split(dataset, test_size=0.1)
train_dataset = NerDataset([d for train_d in train_dataset for d in train_d.ner_inputs], tokenizer)
valid_dataset = NerDataset([d for valid_d in valid_dataset for d in valid_d.ner_inputs], tokenizer)
mlflow.start_run()
mlflow.log_params(vars(args))
train(model, train_dataset, valid_dataset, dataset[0].attributes, args)
torch.save(model.state_dict(), args.model_path + "last.model")
mlflow.end_run()
| [
"[email protected]"
] | |
93b678058a71e6771be47a04bd1790e2246fbffb | bbac91977974702ce52fc0be2e279dc7750f7401 | /backend/serializers.py | 435150b1d1ced60909c704c0a6783b467c8af0fc | [] | no_license | Occy88/BiddingSystem | bbb0cc85a3621622cbbcb1313fbe1f1fc74a8f72 | a8619bad0efee8d2256ef11f358d99c21e5a67b2 | refs/heads/master | 2023-01-13T08:53:16.450312 | 2019-12-19T18:19:59 | 2019-12-19T18:19:59 | 226,520,713 | 0 | 0 | null | 2022-12-13T00:50:21 | 2019-12-07T13:48:08 | JavaScript | UTF-8 | Python | false | false | 2,235 | py | from rest_framework import serializers
from .models import Bid, Session
from django.contrib.auth.models import Group
from django.conf import settings
from pydoc import locate
from guardian.shortcuts import assign_perm, remove_perm
class BidSerializer(serializers.ModelSerializer):
# to work out all the fk relationships be clever about what to show...
# perhaps nothing?
# perhaps Groups?
# shipment_sites = serializers.PrimaryKeyRelatedField(many=True, queryset=ShipmentSite.objects.all())
class Meta:
model = Bid
fields = ('id','user', 'time', 'price', 'quantity')
def create(self, validated_data):
"""
Create and return a new `supplier` instance, given the validated data.
"""
# validated_data.pop('shipments', None)
bid = Bid.objects.create(**validated_data)
return bid
class SessionSerializer(serializers.ModelSerializer):
# to work out all the fk relationships be clever about what to show...
# perhaps nothing?
# perhaps Groups?
# shipment_sites = serializers.PrimaryKeyRelatedField(many=True, queryset=ShipmentSite.objects.all())
bid_set = BidSerializer(many=True)
class Meta:
model = Session
fields = ('id', 'time_start' ,'active', 'bid_set')
def create(self, validated_data):
"""
Create and return a new `supplier` instance, given the validated data.
"""
# validated_data.pop('shipments', None)
bid = Bid.objects.create(**validated_data)
return bid
class SessionSerializer(serializers.ModelSerializer):
# to work out all the fk relationships be clever about what to show...
# perhaps nothing?
# perhaps Groups?
# shipment_sites = serializers.PrimaryKeyRelatedField(many=True, queryset=ShipmentSite.objects.all())
bid_set = BidSerializer(many=True)
class Meta:
model = Session
fields = ('id', 'time_start', 'active', 'bid_set')
def create(self, validated_data):
"""
Create and return a new `supplier` instance, given the validated data.
"""
# validated_data.pop('shipments', None)
bid = Session.objects.create(**validated_data)
return bid
| [
"[email protected]"
] | |
9ef29961f26a8da8006d6b5e60b61e46c0c62589 | 50500f7e7afc0a401cfa99bdaf438d3db90072f2 | /biothings_explorer/tests/test_biolink.py | 4e4ee740fe09c60983633aab11272b78500c4821 | [
"Apache-2.0"
] | permissive | andrewsu/bte_schema | 903112b8344512bfe608d77948d864bb134596df | b727dfded0d10b32ef215094715171ef94c38e34 | refs/heads/master | 2020-07-27T02:59:40.552124 | 2019-11-12T17:48:18 | 2019-11-12T17:48:18 | 208,845,945 | 0 | 0 | Apache-2.0 | 2019-09-16T16:23:42 | 2019-09-16T16:23:42 | null | UTF-8 | Python | false | false | 7,109 | py | import unittest
from biothings_explorer.registry import Registry
from biothings_explorer.user_query_dispatcher import SingleEdgeQueryDispatcher
class TestSingleHopQuery(unittest.TestCase):
def setUp(self):
self.reg = Registry()
def test_anatomy2gene(self):
# test <chemical, interactswith, anatomy>
seqd = SingleEdgeQueryDispatcher(input_cls='AnatomicalEntity',
input_id='bts:uberon',
output_cls='Gene',
output_id='bts:hgnc',
pred='bts:associatedWith',
values='UBERON:0004720',
registry=self.reg)
seqd.query()
self.assertTrue('30881' in seqd.G)
def test_disease2gene(self):
# test <chemical, interactswith, anatomy>
seqd = SingleEdgeQueryDispatcher(input_cls='AnatomicalEntity',
input_id='bts:uberon',
output_cls='Gene',
output_id='bts:hgnc',
pred='bts:associatedWith',
values='UBERON:0004720',
registry=self.reg)
seqd.query()
self.assertTrue('30881' in seqd.G)
def test_disease2pathway(self):
seqd = SingleEdgeQueryDispatcher(input_cls='DiseaseOrPhenotypicFeature',
input_id='bts:mondo',
output_cls='Pathway',
output_id='bts:reactome',
pred='bts:associatedWith',
values='MONDO:0018492',
registry=self.reg)
seqd.query()
self.assertTrue('R-HSA-110330' in seqd.G)
def test_disease2phenotype(self):
seqd = SingleEdgeQueryDispatcher(input_cls='DiseaseOrPhenotypicFeature',
input_id='bts:mondo',
output_cls='PhenotypicFeature',
output_id='bts:mondo',
pred='bts:associatedWith',
values='MONDO:0010997',
registry=self.reg)
seqd.query()
self.assertTrue('HP:0002063' in seqd.G)
def test_gene2anatomy(self):
seqd = SingleEdgeQueryDispatcher(input_cls='Gene',
input_id='bts:entrez',
output_cls='AnatomicalEntity',
output_id='bts:uberon',
pred='bts:associatedWith',
values='13434',
registry=self.reg)
seqd.query()
self.assertTrue('UBERON:0000988' in seqd.G)
def test_gene2phenotype(self):
seqd = SingleEdgeQueryDispatcher(input_cls='Gene',
input_id='bts:entrez',
output_cls='PhenotypicFeature',
output_id='bts:hp',
pred='bts:associatedWith',
values='13434',
registry=self.reg)
seqd.query()
self.assertTrue('HP:0040218' in seqd.G)
def test_geneinteraction(self):
seqd = SingleEdgeQueryDispatcher(input_cls='Gene',
input_id='bts:entrez',
output_cls='Gene',
output_id='bts:hp',
pred='bts:molecularlyInteractsWith',
values='1017',
registry=self.reg)
seqd.query()
self.assertTrue('27230' in seqd.G)
def test_pathway2disease(self):
# test <chemical, interactswith, anatomy>
seqd = SingleEdgeQueryDispatcher(input_cls='Pathway',
input_id='bts:reactome',
output_cls='DiseaseOrPhenotypicFeature',
output_id='bts:mondo',
pred='bts:associatedWith',
values='R-HSA-210745',
registry=self.reg)
seqd.query()
self.assertTrue('MONDO:0017885' in seqd.G)
def test_pathway2phenotype(self):
seqd = SingleEdgeQueryDispatcher(input_cls='Pathway',
input_id='bts:reactome',
output_cls='PhenotypicFeature',
output_id='bts:hp',
pred='bts:associatedWith',
values='R-HSA-210745',
registry=self.reg)
seqd.query()
self.assertTrue('HP:0004904' in seqd.G)
def test_phenotype2disease(self):
seqd = SingleEdgeQueryDispatcher(input_cls='PhenotypicFeature',
input_id='bts:hp',
output_cls='DiseaseOrPhenotypicFeature',
output_id='bts:mondo',
pred='bts:associatedWith',
values='HP:0004904',
registry=self.reg)
seqd.query()
self.assertTrue('MONDO:0010894' in seqd.G)
def test_phenotype2gene(self):
seqd = SingleEdgeQueryDispatcher(input_cls='PhenotypicFeature',
input_id='bts:hp',
output_cls='Gene',
output_id='bts:hgnc',
pred='bts:associatedWith',
values='HP:0004904',
registry=self.reg)
seqd.query()
self.assertTrue('4195' in seqd.G)
def test_phenotype2pathway(self):
seqd = SingleEdgeQueryDispatcher(input_cls='PhenotypicFeature',
input_id='bts:hp',
output_cls='Pathway',
output_id='bts:reactome',
pred='bts:associatedWith',
values='HP:0004904',
registry=self.reg)
seqd.query()
self.assertTrue('R-HSA-210745' in seqd.G)
| [
"[email protected]"
] | |
35663816d738485bdf96919fc9ce7de0352203df | aa7de5b75b65404715676121d61a9b06348d5f62 | /dashboard/dashboard/add_point_queue.py | 5bfbef37ce63c0d91e299373ded283810fc6d96b | [
"BSD-3-Clause"
] | permissive | benschmaus/catapult | 3ca2ede51e4a23082e634fa07a03c11158bd6d9a | f388b1f6b90c670b6524fd68a295bae26ba8db70 | refs/heads/master | 2021-01-20T07:53:45.431708 | 2017-07-17T18:03:09 | 2017-07-17T18:03:09 | 90,060,605 | 0 | 1 | null | 2017-05-02T17:38:42 | 2017-05-02T17:38:41 | null | UTF-8 | Python | false | false | 12,511 | py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""URL endpoint to add new graph data to the datastore."""
import json
import logging
from google.appengine.api import datastore_errors
from google.appengine.ext import ndb
from dashboard import add_point
from dashboard import find_anomalies
from dashboard import graph_revisions
from dashboard import units_to_direction
from dashboard.common import datastore_hooks
from dashboard.common import request_handler
from dashboard.common import stored_object
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import graph_data
BOT_WHITELIST_KEY = 'bot_whitelist'
class AddPointQueueHandler(request_handler.RequestHandler):
"""Request handler to process points and add them to the datastore.
This request handler is intended to be used only by requests using the
task queue; it shouldn't be directly from outside.
"""
def get(self):
"""A get request is the same a post request for this endpoint."""
self.post()
def post(self):
"""Adds a set of points from the post data.
Request parameters:
data: JSON encoding of a list of dictionaries. Each dictionary represents
one point to add. For each dict, one Row entity will be added, and
any required TestMetadata or Master or Bot entities will be created.
"""
datastore_hooks.SetPrivilegedRequest()
data = json.loads(self.request.get('data'))
_PrewarmGets(data)
bot_whitelist = stored_object.Get(BOT_WHITELIST_KEY)
all_put_futures = []
added_rows = []
monitored_test_keys = []
for row_dict in data:
try:
new_row, parent_test, put_futures = _AddRow(row_dict, bot_whitelist)
added_rows.append(new_row)
is_monitored = parent_test.sheriff and parent_test.has_rows
if is_monitored:
monitored_test_keys.append(parent_test.key)
all_put_futures.extend(put_futures)
except add_point.BadRequestError as e:
logging.error('Could not add %s, it was invalid.', e.message)
except datastore_errors.BadRequestError as e:
logging.info('While trying to store %s', row_dict)
logging.error('Datastore request failed: %s.', e.message)
return
ndb.Future.wait_all(all_put_futures)
tests_keys = [k for k in monitored_test_keys if not _IsRefBuild(k)]
# Updating of the cached graph revisions should happen after put because
# it requires the new row to have a timestamp, which happens upon put.
futures = [
graph_revisions.AddRowsToCacheAsync(added_rows),
find_anomalies.ProcessTestsAsync(tests_keys)]
ndb.Future.wait_all(futures)
def _PrewarmGets(data):
"""Prepares the cache so that fetching is faster later.
The add_point request handler does a LOT of gets, and it's possible for
each to take seconds.
However, NDB will does automatic in-context caching:
https://developers.google.com/appengine/docs/python/ndb/cache#incontext
This means that doing an async get() at the start will cache the result, so
that we can prewarm the cache for everything we'll need throughout the
request at the start.
Args:
data: The request json.
"""
# Prewarm lookups of masters, bots, and tests.
master_keys = {ndb.Key('Master', r['master']) for r in data}
bot_keys = {ndb.Key('Master', r['master'], 'Bot', r['bot']) for r in data}
test_keys = set()
for row in data:
start = '%s/%s' % (row['master'], row['bot'])
test_parts = row['test'].split('/')
for part in test_parts:
if not part:
break
start += '/%s' % part
test_keys.add(ndb.Key('TestMetadata', start))
ndb.get_multi_async(list(master_keys) + list(bot_keys) + list(test_keys))
def _AddRow(row_dict, bot_whitelist):
"""Adds a Row entity to the datastore.
There are three main things that are needed in order to make a new entity;
the ID, the parent key, and all of the properties. Making these three
things, and validating the related input fields, are delegated to
sub-functions.
Args:
row_dict: A dictionary obtained from the JSON that was received.
bot_whitelist: A list of whitelisted bots names.
Returns:
A triple: The new row, the parent test, and a list of entity put futures.
Raises:
add_point.BadRequestError: The input dict was invalid.
RuntimeError: The required parent entities couldn't be created.
"""
parent_test = _GetParentTest(row_dict, bot_whitelist)
test_container_key = utils.GetTestContainerKey(parent_test.key)
columns = add_point.GetAndValidateRowProperties(row_dict)
columns['internal_only'] = parent_test.internal_only
row_id = add_point.GetAndValidateRowId(row_dict)
# Update the last-added revision record for this test.
master, bot, test = row_dict['master'], row_dict['bot'], row_dict['test']
test_path = '%s/%s/%s' % (master, bot, test)
last_added_revision_entity = graph_data.LastAddedRevision(
id=test_path, revision=row_id)
entity_put_futures = []
entity_put_futures.append(last_added_revision_entity.put_async())
# If the row ID isn't the revision, that means that the data is Chrome OS
# data, and we want the default revision to be Chrome version.
if row_id != row_dict.get('revision'):
columns['a_default_rev'] = 'r_chrome_version'
# Create the entity and add it asynchronously.
new_row = graph_data.Row(id=row_id, parent=test_container_key, **columns)
entity_put_futures.append(new_row.put_async())
return new_row, parent_test, entity_put_futures
def _GetParentTest(row_dict, bot_whitelist):
"""Gets the parent test for a Row based on an input dictionary.
Args:
row_dict: A dictionary from the data parameter.
bot_whitelist: A list of whitelisted bot names.
Returns:
A TestMetadata entity.
Raises:
RuntimeError: Something went wrong when trying to get the parent test.
"""
master_name = row_dict.get('master')
bot_name = row_dict.get('bot')
test_name = row_dict.get('test').strip('/')
units = row_dict.get('units')
higher_is_better = row_dict.get('higher_is_better')
improvement_direction = _ImprovementDirection(higher_is_better)
internal_only = BotInternalOnly(bot_name, bot_whitelist)
benchmark_description = row_dict.get('benchmark_description')
parent_test = GetOrCreateAncestors(
master_name, bot_name, test_name, internal_only=internal_only,
benchmark_description=benchmark_description, units=units,
improvement_direction=improvement_direction)
return parent_test
def _ImprovementDirection(higher_is_better):
"""Returns an improvement direction (constant from alerts_data) or None."""
if higher_is_better is None:
return None
return anomaly.UP if higher_is_better else anomaly.DOWN
def BotInternalOnly(bot_name, bot_whitelist):
"""Checks whether a given bot name is internal-only.
If a bot name is internal only, then new data for that bot should be marked
as internal-only.
"""
if not bot_whitelist:
logging.warning(
'No bot whitelist available. All data will be internal-only. If this '
'is not intended, please add a bot whitelist using /edit_site_config.')
return True
return bot_name not in bot_whitelist
def GetOrCreateAncestors(
master_name, bot_name, test_name, internal_only=True,
benchmark_description='', units=None, improvement_direction=None):
"""Gets or creates all parent Master, Bot, TestMetadata entities for a Row."""
master_entity = _GetOrCreateMaster(master_name)
_GetOrCreateBot(bot_name, master_entity.key, internal_only)
# Add all ancestor tests to the datastore in order.
ancestor_test_parts = test_name.split('/')
test_path = '%s/%s' % (master_name, bot_name)
suite = None
for index, ancestor_test_name in enumerate(ancestor_test_parts):
# Certain properties should only be updated if the TestMetadata is for a
# leaf test.
is_leaf_test = (index == len(ancestor_test_parts) - 1)
test_properties = {
'units': units if is_leaf_test else None,
'internal_only': internal_only,
}
if is_leaf_test and improvement_direction is not None:
test_properties['improvement_direction'] = improvement_direction
ancestor_test = _GetOrCreateTest(
ancestor_test_name, test_path, test_properties)
if index == 0:
suite = ancestor_test
test_path = ancestor_test.test_path
if benchmark_description and suite.description != benchmark_description:
suite.description = benchmark_description
return ancestor_test
def _GetOrCreateMaster(name):
"""Gets or creates a new Master."""
existing = graph_data.Master.get_by_id(name)
if existing:
return existing
new_entity = graph_data.Master(id=name)
new_entity.put()
return new_entity
def _GetOrCreateBot(name, parent_key, internal_only):
"""Gets or creates a new Bot under the given Master."""
existing = graph_data.Bot.get_by_id(name, parent=parent_key)
if existing:
if existing.internal_only != internal_only:
existing.internal_only = internal_only
existing.put()
return existing
logging.info('Adding bot %s/%s', parent_key.id(), name)
new_entity = graph_data.Bot(
id=name, parent=parent_key, internal_only=internal_only)
new_entity.put()
return new_entity
def _GetOrCreateTest(name, parent_test_path, properties):
"""Either gets an entity if it already exists, or creates one.
If the entity already exists but the properties are different than the ones
specified, then the properties will be updated first. This implies that a
new point is being added for an existing TestMetadata, so if the TestMetadata
has been previously marked as deprecated then it can be updated and marked as
non-deprecated.
If the entity doesn't yet exist, a new one will be created with the given
properties.
Args:
name: The string ID of the Test to get or create.
parent_test_path: The test_path of the parent entity.
properties: A dictionary of properties that should be set.
Returns:
An entity (which has already been put).
Raises:
datastore_errors.BadRequestError: Something went wrong getting the entity.
"""
test_path = '%s/%s' % (parent_test_path, name)
existing = graph_data.TestMetadata.get_by_id(test_path)
if not existing:
# Add improvement direction if this is a new test.
if 'units' in properties and 'improvement_direction' not in properties:
units = properties['units']
direction = units_to_direction.GetImprovementDirection(units)
properties['improvement_direction'] = direction
elif 'units' not in properties or properties['units'] is None:
properties['improvement_direction'] = anomaly.UNKNOWN
else:
print properties
new_entity = graph_data.TestMetadata(id=test_path, **properties)
new_entity.put()
# TODO(sullivan): Consider putting back Test entity in a scoped down
# form so we can check if it exists here.
return new_entity
# Flag indicating whether we want to re-put the entity before returning.
properties_changed = False
if existing.deprecated:
existing.deprecated = False
properties_changed = True
# Special case to update improvement direction from units for TestMetadata
# entities when units are being updated. If an improvement direction is
# explicitly provided in the properties, then we can skip this check since it
# will get overwritten below. Additionally, by skipping we avoid
# touching the entity and setting off an expensive put() operation.
if properties.get('improvement_direction') is None:
units = properties.get('units')
if units:
direction = units_to_direction.GetImprovementDirection(units)
if direction != existing.improvement_direction:
properties['improvement_direction'] = direction
# Go through the list of general properties and update if necessary.
for prop, value in properties.items():
if (hasattr(existing, prop) and value is not None and
getattr(existing, prop) != value):
setattr(existing, prop, value)
properties_changed = True
if properties_changed:
existing.put()
return existing
def _IsRefBuild(test_key):
"""Checks whether a TestMetadata is for a reference build test run."""
test_parts = test_key.id().split('/')
return test_parts[-1] == 'ref' or test_parts[-1].endswith('_ref')
| [
"[email protected]"
] | |
352e3c026ac9e26500a33e7d5012aeabdb121d71 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/ospfv3/domaf.py | 02ac83a1a12bc7d0b567879cf7630854453672ad | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,405 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class DomAf(Mo):
"""
The OSPF address family domain (VRF) information.
"""
meta = ClassMeta("cobra.model.ospfv3.DomAf")
meta.moClassName = "ospfv3DomAf"
meta.rnFormat = "domaf-%(type)s"
meta.category = MoCategory.REGULAR
meta.label = "Address Family Domain"
meta.writeAccessMask = 0x8008020040001
meta.readAccessMask = 0x8008020040001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.ospfv3.RibLeakP")
meta.childClasses.add("cobra.model.ospfv3.ExtRtSum")
meta.childClasses.add("cobra.model.ospfv3.InterLeakP")
meta.childClasses.add("cobra.model.ospfv3.DefRtLeakP")
meta.childClasses.add("cobra.model.ospfv3.LeakCtrlP")
meta.childNamesAndRnPrefix.append(("cobra.model.ospfv3.InterLeakP", "interleak-"))
meta.childNamesAndRnPrefix.append(("cobra.model.ospfv3.ExtRtSum", "extrtsum-"))
meta.childNamesAndRnPrefix.append(("cobra.model.ospfv3.DefRtLeakP", "defrtleak"))
meta.childNamesAndRnPrefix.append(("cobra.model.ospfv3.LeakCtrlP", "leakctrl"))
meta.childNamesAndRnPrefix.append(("cobra.model.ospfv3.RibLeakP", "ribleak"))
meta.parentClasses.add("cobra.model.ospfv3.Dom")
meta.superClasses.add("cobra.model.nw.ProtDom")
meta.superClasses.add("cobra.model.nw.Conn")
meta.superClasses.add("cobra.model.nw.Item")
meta.superClasses.add("cobra.model.nw.CpDom")
meta.superClasses.add("cobra.model.nw.GEp")
meta.superClasses.add("cobra.model.ospf.Af")
meta.superClasses.add("cobra.model.l3.ProtDom")
meta.rnPrefixes = [
('domaf-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 16434, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 128)]
meta.props.add("name", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "type", "type", 17480, PropCategory.REGULAR)
prop.label = "Type"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.defaultValue = 2
prop.defaultValueStr = "ipv6-ucast"
prop._addConstant("ipv4-ucast", "ipv4-unicast-address-family", 1)
prop._addConstant("ipv6-ucast", "ipv6-unicast-address-family", 2)
meta.props.add("type", prop)
meta.namingProps.append(getattr(meta.props, "type"))
def __init__(self, parentMoOrDn, type, markDirty=True, **creationProps):
namingVals = [type]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
ddce670d767f93002e8d4bf8043d71eab1477f7d | f72ecf85bc1d6b4014af4b35f7677adb7c3a77f3 | /venv/bin/flask | 86ab5e43da087b90a51022571fa6526c52effbb4 | [] | no_license | PropeReferio/covid19dashapp | cef5a803a26a00fc5a7adca57625d7f3de8710f8 | aea672aca23e0d6782080c966b24da6d826e1f91 | refs/heads/master | 2022-07-14T23:09:21.063273 | 2020-11-01T18:46:14 | 2020-11-01T18:46:14 | 253,976,374 | 0 | 0 | null | 2022-06-22T01:41:02 | 2020-04-08T03:32:05 | Python | UTF-8 | Python | false | false | 264 | #!/home/bo/Desktop/Freelance/cloud-covid-dash/covid19-dash/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
3ffaa9adfc101e07ad05a1009bd90c9f436236e6 | 8ecf4930f9aa90c35e5199d117068b64a8d779dd | /TopQuarkAnalysis/SingleTop/test/Mu_2011A_08Nov_part_13_cfg.py | c75a13863b8955018a45775a881133abc70b43f0 | [] | no_license | fabozzi/ST_44 | 178bd0829b1aff9d299528ba8e85dc7b7e8dd216 | 0becb8866a7c758d515e70ba0b90c99f6556fef3 | refs/heads/master | 2021-01-20T23:27:07.398661 | 2014-04-14T15:12:32 | 2014-04-14T15:12:32 | 18,765,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,786 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("SingleTopSystematics")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True),
FailPath = cms.untracked.vstring('ProductNotFound','Type Mismatch')
)
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff") ### real data
process.GlobalTag.globaltag = cms.string("START44_V13::All")
#Load B-Tag
#MC measurements from 36X
#process.load ("RecoBTag.PerformanceDB.PoolBTagPerformanceDBMC36X")
#process.load ("RecoBTag.PerformanceDB.BTagPerformanceDBMC36X")
##Measurements from Fall10
#process.load ("RecoBTag.PerformanceDB.BTagPerformanceDB1011")
#process.load ("RecoBTag.PerformanceDB.PoolBTagPerformanceDB1011")
#Spring11
process.load ("RecoBTag.PerformanceDB.PoolBTagPerformanceDB1107")
process.load ("RecoBTag.PerformanceDB.BTagPerformanceDB1107")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
# Process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(20000))
process.source = cms.Source ("PoolSource",
fileNames = cms.untracked.vstring (
'file:/tmp/mmerola/DataMerged.root',
#'rfio:/castor/cern.ch/user/m/mmerola/SingleTop_2012/MergedJune/DataMerged.root',
),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
#eventsToProcess = cms.untracked.VEventRange('1:19517967-1:19517969'),
)
#from Data import *
#process.source.fileNames = Data_ntuple
#process.source.fileNames = cms.untracked.vstring("file:/tmp/mmerola/DataMerged.root")
#PileUpSync
#Output
#process.TFileService = cms.Service("TFileService", fileName = cms.string("/castor/cern.ch/user/m/mmerola/SingleTop_2012/TreesJune/Mu_2011A_08Nov_part_13.root"))
process.TFileService = cms.Service("TFileService", fileName = cms.string("/tmp/mmerola/Mu_2011A_08Nov_part_13.root"))
#process.TFileService = cms.Service("TFileService", fileName = cms.string("testNoPU.root"))
#process.load("SingleTopAnalyzers_cfi")
process.load("SingleTopRootPlizer_cfi")
process.load("SingleTopFilters_cfi")
#from SingleTopPSets_cfi import *
#from SingleTopPSetsFall11_cfi import *
from SingleTopPSetsFall_cfi import *
process.TreesEle.dataPUFile = cms.untracked.string("pileUpDistr.root")
process.TreesMu.dataPUFile = cms.untracked.string("pileUpDistr.root")
#process.TreesEle.doTurnOn = cms.untracked.bool(False)
process.TreesEle.channelInfo = DataEle
process.TreesMu.channelInfo = DataMu
#process.PlotsEle.channelInfo = DataEle
#process.PlotsMu.channelInfo = DataMu
#process.TreesMu.systematics = cms.untracked.vstring();
#doPU = cms.untracked.bool(False)
#process.WeightProducer.doPU = cms.untracked.bool(False)
#process.TreesMu.doQCD = cms.untracked.bool(False)
#process.TreesEle.doQCD = cms.untracked.bool(False)
#process.TreesMu.doResol = cms.untracked.bool(False)
#process.TreesEle.doResol = cms.untracked.bool(False)
#process.TreesMu.doPU = cms.untracked.bool(False)
#process.TreesEle.doPU = cms.untracked.bool(False)
channel_instruction = "mu" #SWITCH_INSTRUCTION
#channel_instruction = "allmc" #SWITCH_INSTRUCTION
MC_instruction = False #TRIGGER_INSTRUCTION
process.HLTFilterMu.isMC = MC_instruction
process.HLTFilterEle.isMC = MC_instruction
process.HLTFilterMuOrEle.isMC = MC_instruction
process.HLTFilterMuOrEleMC.isMC = MC_instruction
#process.PUWeightsPath = cms.Path(
# process.WeightProducer
#)
if channel_instruction == "allmc":
# process.TreesMu.doResol = cms.untracked.bool(True)
# process.TreesEle.doResol = cms.untracked.bool(True)
# process.TreesEle.doTurnOn = cms.untracked.bool(True)
process.PathSysMu = cms.Path(
process.HLTFilterMuMC *
process.TreesMu
)
process.PathSysEle = cms.Path(
process.HLTFilterEleMC *
process.TreesEle
)
if channel_instruction == "all":
process.TreesEle.doTurnOn = cms.untracked.bool(False)
process.TreesEle.doPU = cms.untracked.bool(False)
process.TreesMu.doPU = cms.untracked.bool(False)
process.PathSys = cms.Path(
# process.PlotsMu +
# process.PlotsEle +
process.HLTFilterMuOrEle *
process.TreesMu +
process.TreesEle
)
if channel_instruction == "mu":
process.TreesMu.doPU = cms.untracked.bool(False)
process.TreesMu.doResol = cms.untracked.bool(False)
process.PathSysMu = cms.Path(
# process.PlotsMu +
# process.PlotsEle +
# process.HLTFilterMu *
process.HLTFilterMuData *
process.TreesMu
)
if channel_instruction == "ele":
process.TreesEle.doTurnOn = cms.untracked.bool(False)
process.TreesEle.doPU = cms.untracked.bool(False)
process.TreesEle.doResol = cms.untracked.bool(False)
process.PathSysMu = cms.Path(
# process.PlotsMu +
# process.PlotsEle +
process.HLTFilterEle *
process.TreesEle
)
if channel_instruction == "muqcd":
process.TreesMu.doPU = cms.untracked.bool(False)
process.TreesMu.doResol = cms.untracked.bool(False)
process.PathSysMu = cms.Path(
# process.PlotsMu +
# process.PlotsEle +
process.HLTFilterMuQCD *
process.TreesMu
)
if channel_instruction == "eleqcd":
process.TreesEle.doTurnOn = cms.untracked.bool(False)
process.TreesEle.doPU = cms.untracked.bool(False)
process.TreesEle.doResol = cms.untracked.bool(False)
process.TreesEle.isControlSample = cms.untracked.bool(True)
process.PathSysEle = cms.Path(
# process.PlotsMu +
# process.PlotsEle +
process.HLTFilterEleQCD *
process.TreesEle
)
process.source.fileNames = cms.untracked.vstring('file:/tmp/mmerola/Mu_2011A_08Nov_part_13Merged.root',) | [
"[email protected]"
] | |
0c1eaf6ef7a2bb9f5dddea73b74a3522c161aa8e | 31a0b0749c30ff37c3a72592387f9d8195de4bd6 | /release/ray_release/cluster_manager/full.py | 766362e1e86753db1292ea1840b740d60951cc41 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | longshotsyndicate/ray | 15100bad514b602a3fa39bfe205288e7bec75d90 | 3341fae573868338b665bcea8a1c4ee86b702751 | refs/heads/master | 2023-01-28T15:16:00.401509 | 2022-02-18T05:35:47 | 2022-02-18T05:35:47 | 163,961,795 | 1 | 1 | Apache-2.0 | 2023-01-14T08:01:02 | 2019-01-03T11:03:35 | Python | UTF-8 | Python | false | false | 4,391 | py | import time
from ray_release.exception import (
ClusterCreationError,
ClusterStartupError,
ClusterStartupTimeout,
ClusterStartupFailed,
)
from ray_release.logger import logger
from ray_release.cluster_manager.minimal import MinimalClusterManager
from ray_release.util import format_link, anyscale_cluster_url
REPORT_S = 30.0
class FullClusterManager(MinimalClusterManager):
"""Full manager.
Builds app config and compute template and starts/terminated session
using SDK.
"""
def start_cluster(self, timeout: float = 600.0):
logger.info(f"Creating cluster {self.cluster_name}")
try:
result = self.sdk.create_cluster(
dict(
name=self.cluster_name,
project_id=self.project_id,
cluster_environment_build_id=self.cluster_env_build_id,
cluster_compute_id=self.cluster_compute_id,
idle_timeout_minutes=self.autosuspend_minutes,
)
)
self.cluster_id = result.result.id
except Exception as e:
raise ClusterCreationError(f"Error creating cluster: {e}") from e
# Trigger session start
logger.info(f"Starting cluster {self.cluster_name} ({self.cluster_id})")
cluster_url = anyscale_cluster_url(
project_id=self.project_id, session_id=self.cluster_id
)
logger.info(f"Link to cluster: {format_link(cluster_url)}")
try:
result = self.sdk.start_cluster(self.cluster_id, start_cluster_options={})
cop_id = result.result.id
completed = result.result.completed
except Exception as e:
raise ClusterStartupError(
f"Error starting cluster with name "
f"{self.cluster_name} and {self.cluster_id} ({cluster_url}): "
f"{e}"
) from e
# Wait for session
logger.info(f"Waiting for cluster {self.cluster_name}...")
start_time = time.monotonic()
timeout_at = start_time + timeout
next_status = start_time + 30
while not completed:
now = time.monotonic()
if now >= timeout_at:
raise ClusterStartupTimeout(
f"Time out when creating cluster {self.cluster_name}"
)
if now >= next_status:
logger.info(
f"... still waiting for cluster {self.cluster_name} "
f"({int(now - start_time)} seconds) ..."
)
next_status += 30
# Sleep 1 sec before next check.
time.sleep(1)
result = self.sdk.get_cluster_operation(cop_id, _request_timeout=30)
completed = result.result.completed
result = self.sdk.get_cluster(self.cluster_id)
if result.result.state != "Running":
raise ClusterStartupFailed(
f"Cluster did not come up - most likely the nodes are currently "
f"not available. Please check the cluster startup logs: "
f"{cluster_url} (cluster state: {result.result.state})"
)
def terminate_cluster(self, wait: bool = False):
if self.cluster_id:
# Just trigger a request. No need to wait until session shutdown.
result = self.sdk.terminate_cluster(
cluster_id=self.cluster_id, terminate_cluster_options={}
)
if not wait:
return
# Only do this when waiting
cop_id = result.result.id
completed = result.result.completed
while not completed:
# Sleep 1 sec before next check.
time.sleep(1)
cluster_operation_response = self.sdk.get_cluster_operation(
cop_id, _request_timeout=30
)
cluster_operation = cluster_operation_response.result
completed = cluster_operation.completed
result = self.sdk.get_cluster(self.cluster_id)
while result.result.state != "Terminated":
time.sleep(1)
result = self.sdk.get_cluster(self.cluster_id)
def get_cluster_address(self) -> str:
return f"anyscale://{self.project_name}/{self.cluster_name}"
| [
"[email protected]"
] | |
17a8ecc861f7d8cd4aca3b5ccae2459c6969877e | e6d4a87dcf98e93bab92faa03f1b16253b728ac9 | /algorithms/python/reverseString/reverseString.py | 8d30b5defc2ca207f40965dab0c6cfe7eaa1a921 | [] | no_license | MichelleZ/leetcode | b5a58e1822e3f6ef8021b29d9bc9aca3fd3d416f | a390adeeb71e997b3c1a56c479825d4adda07ef9 | refs/heads/main | 2023-03-06T08:16:54.891699 | 2023-02-26T07:17:47 | 2023-02-26T07:17:47 | 326,904,500 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Source: https://leetcode.com/problems/reverse-string/
# Author: Miao Zhang
# Date: 2021-02-03
class Solution:
def reverseString(self, s: List[str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
return s.reverse()
| [
"[email protected]"
] | |
5d6169c3b0d31fc740377c9c1c9ff5e09fb5a40d | 71bc873c20fbc45bb5e13095d2474496818a23f9 | /service_coldstart/code/redundant_code/send_email_withAttachment.py | 6027018a46aac7ab761d72c23456d9367df891aa | [] | no_license | 2877992943/lianyun | f31c44ea2e266bae51cae4fa464d1bae368c8d3f | a872d6cd1b2eff402bcccb326d33d086816d87af | refs/heads/master | 2021-01-20T16:17:20.226401 | 2017-05-10T06:49:31 | 2017-05-10T06:49:31 | 90,830,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,904 | py | #!/usr/bin/env python
#coding: utf-8
import pandas as pd
"""
http://www.cnblogs.com/leetao94/p/5460520.html
"""
import smtplib
from email.mime.text import MIMEText
from email.header import Header
import email.MIMEMultipart
import email.MIMEText
import email.MIMEBase
import os.path
def SendEmail(fromAdd, toAdd, subject, attachfile, htmlText):
strFrom = fromAdd;
strTo = toAdd;
msg =MIMEText(htmlText);
msg['Content-Type'] = 'Text/HTML';
msg['Subject'] = Header(subject,'gb2312');
msg['To'] = strTo;
msg['From'] = strFrom;
smtp = smtplib.SMTP('smtp.exmail.qq.com');
smtp.login('[email protected]','yr13371695096YR');
try:
smtp.sendmail(strFrom,strTo,msg.as_string());
finally:
smtp.close;
def send_with_attachment(From,To,filename,num_leads,csvName):
email_csv_name='temp_'+csvName+'.csv';#print '1',From,To,filename,num_leads,csvName
email_csv_name=email_csv_name.decode('utf-8')
if To.find('@yunkecn.com')==-1:
To='[email protected]'
server = smtplib.SMTP('smtp.exmail.qq.com')
server.login('[email protected]','yr13371695096YR')
# 构造MIMEMultipart对象做为根容器
main_msg = email.MIMEMultipart.MIMEMultipart()
# 构造MIMEText对象做为邮件显示内容并附加到根容器
text_msg = email.MIMEText.MIMEText("find the attachment please.")
main_msg.attach(text_msg)
# 构造MIMEBase对象做为文件附件内容并附加到根容器
contype = 'application/octet-stream'
maintype, subtype = contype.split('/', 1)
## 读入文件内容并格式化
## select num_leads
df=pd.read_csv(filename,encoding='utf-8');#print df.shape
if num_leads<df.shape[0]:
df=df[:num_leads];#print df.shape[0]
df.to_csv(filename,index=False,encoding='utf-8')
### if required num leads > df.shape[0],use df directly
data = open(filename, 'rb')
file_msg = email.MIMEBase.MIMEBase(maintype, subtype)
file_msg.set_payload(data.read( ))
data.close( )
email.Encoders.encode_base64(file_msg)
## 设置附件头
basename = os.path.basename(filename);print basename,filename,email_csv_name
file_msg.add_header('Content-Disposition',
'attachment', filename = filename)
main_msg.attach(file_msg)
# 设置根容器属性
main_msg['From'] = From
main_msg['To'] = To
main_msg['Subject'] = "attachment :%s email to user:%s"%(csvName,To)
main_msg['Date'] = email.Utils.formatdate( )
# 得到格式化后的完整文本
fullText = main_msg.as_string( )
# 用smtp发送邮件
try:
server.sendmail(From, To, fullText)
finally:
server.quit()
if __name__ == "__main__":
send_with_attachment('[email protected]','[email protected]','tmp.csv',5,'宁波_1999')# '宁波_1999' in csvName as csvname will not work
| [
"[email protected]"
] | |
f326d396098af535a466262caabde1b751d25673 | e8fef7552fb0c354d1084d2b4e7bf16efb7b9021 | /tests/__init__.py | de4f4ac5908f313553e18bad2d117aacd1fda75c | [
"MIT"
] | permissive | TristanTTran/feets | 180ea67030c88b9eebb77923fe1af6746f654b30 | 48b16a5f2b95c0a4c05b47a88b396250faf168d6 | refs/heads/master | 2023-03-28T01:07:48.636265 | 2021-03-26T01:28:36 | 2021-03-26T01:28:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,350 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2017 Juan Cabral
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
# DOC
# =============================================================================
"""All feets tests"""
| [
"[email protected]"
] | |
41783f6cd99c7b929bf77c9029ac35890c0097a7 | 1bed2f766620acf085ed2d7fd3e354a3482b8960 | /script/hassfest/translations.py | 9c4f75f1b2d264f2ca51ebbe306546e2a3b05dba | [
"Apache-2.0"
] | permissive | elupus/home-assistant | 5cbb79a2f25a2938a69f3988534486c269b77643 | 564150169bfc69efdfeda25a99d803441f3a4b10 | refs/heads/dev | 2023-08-28T16:36:04.304864 | 2022-09-16T06:35:12 | 2022-09-16T06:35:12 | 114,460,522 | 2 | 2 | Apache-2.0 | 2023-02-22T06:14:54 | 2017-12-16T12:50:55 | Python | UTF-8 | Python | false | false | 14,840 | py | """Validate integration translation files."""
from __future__ import annotations
from functools import partial
from itertools import chain
import json
import re
import voluptuous as vol
from voluptuous.humanize import humanize_error
import homeassistant.helpers.config_validation as cv
from homeassistant.util import slugify
from script.translations import upload
from .model import Config, Integration
UNDEFINED = 0
REQUIRED = 1
REMOVED = 2
RE_REFERENCE = r"\[\%key:(.+)\%\]"
# Only allow translation of integration names if they contain non-brand names
ALLOW_NAME_TRANSLATION = {
"cert_expiry",
"cpuspeed",
"emulated_roku",
"faa_delays",
"garages_amsterdam",
"google_travel_time",
"homekit_controller",
"islamic_prayer_times",
"local_ip",
"nmap_tracker",
"rpi_power",
"waze_travel_time",
}
REMOVED_TITLE_MSG = (
"config.title key has been moved out of config and into the root of strings.json. "
"Starting Home Assistant 0.109 you only need to define this key in the root "
"if the title needs to be different than the name of your integration in the "
"manifest."
)
MOVED_TRANSLATIONS_DIRECTORY_MSG = (
"The '.translations' directory has been moved, the new name is 'translations', "
"starting with Home Assistant 0.112 your translations will no longer "
"load if you do not move/rename this "
)
def allow_name_translation(integration: Integration):
"""Validate that the translation name is not the same as the integration name."""
# Only enforce for core because custom integrations can't be
# added to allow list.
return integration.core and (
integration.domain in ALLOW_NAME_TRANSLATION
or integration.quality_scale == "internal"
)
def check_translations_directory_name(integration: Integration) -> None:
"""Check that the correct name is used for the translations directory."""
legacy_translations = integration.path / ".translations"
translations = integration.path / "translations"
if translations.is_dir():
# No action required
return
if legacy_translations.is_dir():
integration.add_error("translations", MOVED_TRANSLATIONS_DIRECTORY_MSG)
def find_references(strings, prefix, found):
"""Find references."""
for key, value in strings.items():
if isinstance(value, dict):
find_references(value, f"{prefix}::{key}", found)
continue
match = re.match(RE_REFERENCE, value)
if match:
found.append({"source": f"{prefix}::{key}", "ref": match.groups()[0]})
def removed_title_validator(config, integration, value):
"""Mark removed title."""
if not config.specific_integrations:
raise vol.Invalid(REMOVED_TITLE_MSG)
# Don't mark it as an error yet for custom components to allow backwards compat.
integration.add_warning("translations", REMOVED_TITLE_MSG)
return value
def lowercase_validator(value):
"""Validate value is lowercase."""
if value.lower() != value:
raise vol.Invalid("Needs to be lowercase")
return value
def gen_data_entry_schema(
*,
config: Config,
integration: Integration,
flow_title: int,
require_step_title: bool,
mandatory_description: str | None = None,
):
"""Generate a data entry schema."""
step_title_class = vol.Required if require_step_title else vol.Optional
schema = {
vol.Optional("flow_title"): cv.string_with_no_html,
vol.Required("step"): {
str: {
step_title_class("title"): cv.string_with_no_html,
vol.Optional("description"): cv.string_with_no_html,
vol.Optional("data"): {str: cv.string_with_no_html},
vol.Optional("data_description"): {str: cv.string_with_no_html},
vol.Optional("menu_options"): {str: cv.string_with_no_html},
}
},
vol.Optional("error"): {str: cv.string_with_no_html},
vol.Optional("abort"): {str: cv.string_with_no_html},
vol.Optional("progress"): {str: cv.string_with_no_html},
vol.Optional("create_entry"): {str: cv.string_with_no_html},
}
if flow_title == REQUIRED:
schema[vol.Required("title")] = cv.string_with_no_html
elif flow_title == REMOVED:
schema[vol.Optional("title", msg=REMOVED_TITLE_MSG)] = partial(
removed_title_validator, config, integration
)
def data_description_validator(value):
"""Validate data description."""
for step_info in value["step"].values():
if "data_description" not in step_info:
continue
for key in step_info["data_description"]:
if key not in step_info["data"]:
raise vol.Invalid(f"data_description key {key} is not in data")
return value
validators = [vol.Schema(schema), data_description_validator]
if mandatory_description is not None:
def validate_description_set(value):
"""Validate description is set."""
steps = value["step"]
if mandatory_description not in steps:
raise vol.Invalid(f"{mandatory_description} needs to be defined")
if "description" not in steps[mandatory_description]:
raise vol.Invalid(f"Step {mandatory_description} needs a description")
return value
validators.append(validate_description_set)
if not allow_name_translation(integration):
def name_validator(value):
"""Validate name."""
for step_id, info in value["step"].items():
if info.get("title") == integration.name:
raise vol.Invalid(
f"Do not set title of step {step_id} if it's a brand name "
"or add exception to ALLOW_NAME_TRANSLATION"
)
return value
validators.append(name_validator)
return vol.All(*validators)
def gen_strings_schema(config: Config, integration: Integration) -> vol.Schema:
"""Generate a strings schema."""
return vol.Schema(
{
vol.Optional("title"): cv.string_with_no_html,
vol.Optional("config"): gen_data_entry_schema(
config=config,
integration=integration,
flow_title=REMOVED,
require_step_title=False,
mandatory_description=(
"user" if integration.integration_type == "helper" else None
),
),
vol.Optional("options"): gen_data_entry_schema(
config=config,
integration=integration,
flow_title=UNDEFINED,
require_step_title=False,
),
vol.Optional("device_automation"): {
vol.Optional("action_type"): {str: cv.string_with_no_html},
vol.Optional("condition_type"): {str: cv.string_with_no_html},
vol.Optional("trigger_type"): {str: cv.string_with_no_html},
vol.Optional("trigger_subtype"): {str: cv.string_with_no_html},
},
vol.Optional("state"): cv.schema_with_slug_keys(
cv.schema_with_slug_keys(str, slug_validator=lowercase_validator),
slug_validator=vol.Any("_", cv.slug),
),
vol.Optional("system_health"): {
vol.Optional("info"): {str: cv.string_with_no_html}
},
vol.Optional("config_panel"): cv.schema_with_slug_keys(
cv.schema_with_slug_keys(
cv.string_with_no_html, slug_validator=lowercase_validator
),
slug_validator=vol.Any("_", cv.slug),
),
vol.Optional("application_credentials"): {
vol.Optional("description"): cv.string_with_no_html,
},
vol.Optional("issues"): {
str: vol.All(
cv.has_at_least_one_key("description", "fix_flow"),
vol.Schema(
{
vol.Required("title"): cv.string_with_no_html,
vol.Exclusive(
"description", "fixable"
): cv.string_with_no_html,
vol.Exclusive("fix_flow", "fixable"): gen_data_entry_schema(
config=config,
integration=integration,
flow_title=UNDEFINED,
require_step_title=False,
),
},
),
)
},
}
)
def gen_auth_schema(config: Config, integration: Integration):
"""Generate auth schema."""
return vol.Schema(
{
vol.Optional("mfa_setup"): {
str: gen_data_entry_schema(
config=config,
integration=integration,
flow_title=REQUIRED,
require_step_title=True,
)
}
}
)
def gen_platform_strings_schema(config: Config, integration: Integration):
"""Generate platform strings schema like strings.sensor.json.
Example of valid data:
{
"state": {
"moon__phase": {
"full": "Full"
}
}
}
"""
def device_class_validator(value):
"""Key validator for platform states.
Platform states are only allowed to provide states for device classes they prefix.
"""
if not value.startswith(f"{integration.domain}__"):
raise vol.Invalid(
f"Device class need to start with '{integration.domain}__'. Key {value} is invalid. See https://developers.home-assistant.io/docs/internationalization/core#stringssensorjson"
)
slug_friendly = value.replace("__", "_", 1)
slugged = slugify(slug_friendly)
if slug_friendly != slugged:
raise vol.Invalid(
f"invalid device class {value}. After domain__, needs to be all lowercase, no spaces."
)
return value
return vol.Schema(
{
vol.Optional("state"): cv.schema_with_slug_keys(
cv.schema_with_slug_keys(str, slug_validator=lowercase_validator),
slug_validator=device_class_validator,
)
}
)
ONBOARDING_SCHEMA = vol.Schema({vol.Required("area"): {str: cv.string_with_no_html}})
def validate_translation_file(config: Config, integration: Integration, all_strings):
"""Validate translation files for integration."""
if config.specific_integrations:
check_translations_directory_name(integration)
strings_files = [integration.path / "strings.json"]
# Also validate translations for custom integrations
if config.specific_integrations:
# Only English needs to be always complete
strings_files.append(integration.path / "translations/en.json")
references = []
if integration.domain == "auth":
strings_schema = gen_auth_schema(config, integration)
elif integration.domain == "onboarding":
strings_schema = ONBOARDING_SCHEMA
elif integration.domain == "binary_sensor":
strings_schema = gen_strings_schema(config, integration).extend(
{
vol.Optional("device_class"): cv.schema_with_slug_keys(
cv.string_with_no_html, slug_validator=vol.Any("_", cv.slug)
)
}
)
else:
strings_schema = gen_strings_schema(config, integration)
for strings_file in strings_files:
if not strings_file.is_file():
continue
name = str(strings_file.relative_to(integration.path))
try:
strings = json.loads(strings_file.read_text())
except ValueError as err:
integration.add_error("translations", f"Invalid JSON in {name}: {err}")
continue
try:
strings_schema(strings)
except vol.Invalid as err:
integration.add_error(
"translations", f"Invalid {name}: {humanize_error(strings, err)}"
)
else:
if strings_file.name == "strings.json":
find_references(strings, name, references)
if strings.get(
"title"
) == integration.name and not allow_name_translation(integration):
integration.add_error(
"translations",
"Don't specify title in translation strings if it's a brand name "
"or add exception to ALLOW_NAME_TRANSLATION",
)
platform_string_schema = gen_platform_strings_schema(config, integration)
platform_strings = [integration.path.glob("strings.*.json")]
if config.specific_integrations:
platform_strings.append(integration.path.glob("translations/*.en.json"))
for path in chain(*platform_strings):
name = str(path.relative_to(integration.path))
try:
strings = json.loads(path.read_text())
except ValueError as err:
integration.add_error("translations", f"Invalid JSON in {name}: {err}")
continue
try:
platform_string_schema(strings)
except vol.Invalid as err:
msg = f"Invalid {path.name}: {humanize_error(strings, err)}"
if config.specific_integrations:
integration.add_warning("translations", msg)
else:
integration.add_error("translations", msg)
else:
find_references(strings, path.name, references)
if config.specific_integrations:
return
# Validate references
for reference in references:
parts = reference["ref"].split("::")
search = all_strings
key = parts.pop(0)
while parts and key in search:
search = search[key]
key = parts.pop(0)
if parts or key not in search:
integration.add_error(
"translations",
f"{reference['source']} contains invalid reference {reference['ref']}: Could not find {key}",
)
def validate(integrations: dict[str, Integration], config: Config):
"""Handle JSON files inside integrations."""
if config.specific_integrations:
all_strings = None
else:
all_strings = upload.generate_upload_data()
for integration in integrations.values():
validate_translation_file(config, integration, all_strings)
| [
"[email protected]"
] | |
09959fbc007813a97153e55b336706d844a29ad6 | 9e0090384cdfd194188587897208f17ed031780b | /05-django-template/pjt05/manage.py | b159b3a3fdc4cf92d6370142cf28cc973586ee6c | [] | no_license | auscanaoy/flask-django-practice | ec1d5dec0d34e3d0100f4107d577bc8061988813 | 4eef9a3d0371deec9c860949f789f4d60866078a | refs/heads/master | 2023-04-03T09:31:04.002674 | 2021-04-10T05:15:32 | 2021-04-10T05:15:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pjt05.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
e21891fa297ee7c4ca8a4e8d84c6160a9d883849 | 84c5ce2f75ec8d4d9704dc993682ba52745a3e12 | /m_layer/m_layer.py | d1b535b8a4da916f3951a00e1000943a97f0ea0c | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | yang-song/google-research | d0610748ae8056cfa2f89345053b386e976a1b82 | 34119b64214af089db8c675e6a03b30a0f608f66 | refs/heads/master | 2022-12-16T06:05:40.561237 | 2020-09-17T20:39:13 | 2020-09-17T20:42:11 | 296,450,555 | 1 | 0 | Apache-2.0 | 2020-09-17T21:55:10 | 2020-09-17T21:55:09 | null | UTF-8 | Python | false | false | 3,595 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# coding=utf-8
"""Code for creating the M-layer as a keras layer."""
import tensorflow as tf
class MLayer(tf.keras.layers.Layer):
"""The M-layer: Lie Algebra generator-embedding and matrix exponentiation.
This is a Keras implementation of the M-layer described in (2020)[1].
#### References
[1]: Thomas Fischbacher, Iulia M. Comsa, Krzysztof Potempa, Moritz Firsching,
Luca Versari, Jyrki Alakuijala "Intelligent Matrix Exponentiation", ICML 2020.
TODO(firsching): add link to paper.
"""
def __init__(self,
dim_m,
matrix_init=None,
with_bias=False,
matrix_squarings_exp=None,
**kwargs):
"""Initializes the instance.
Args:
dim_m: The matrix to be exponentiated in the M-layer has the shape (dim_m,
dim_m).
matrix_init: What initializer to use for the matrix. `None` defaults to
`normal` initalization.
with_bias: Whether a bias should be included in layer after
exponentiation.
matrix_squarings_exp: None to compute tf.linalg.expm(M), an integer `k` to
instead approximate it with (I+M/2**k)**(2**k).
**kwargs: keyword arguments passed to the Keras layer base class.
"""
self._dim_m = dim_m
self._rep_to_exp_tensor = None
self._matrix_init = matrix_init or 'normal'
self._with_bias = with_bias
self._matrix_bias = None
self._matrix_squarings_exp = matrix_squarings_exp
super(MLayer, self).__init__(**kwargs)
def build(self, input_shape):
dim_rep = input_shape[-1]
self._rep_to_exp_tensor = self.add_weight(
name='rep_to_exp_tensor',
shape=(dim_rep, self._dim_m, self._dim_m),
initializer=self._matrix_init,
trainable=True)
if self._with_bias:
self._matrix_bias = self.add_weight(
name='matrix_bias',
shape=(1, self._dim_m, self._dim_m),
initializer='uniform',
trainable=True)
super(MLayer, self).build(input_shape)
def call(self, x):
if not self._with_bias:
mat = tf.einsum('amn,...a->...mn', self._rep_to_exp_tensor, x)
else:
mat = tf.einsum('amn,...a->...mn', self._rep_to_exp_tensor,
x) + self._matrix_bias
if self._matrix_squarings_exp is None:
return tf.linalg.expm(mat)
# Approximation of exp(mat) as (1+mat/k)**k with k = 2**MATRIX_SQUARINGS_EXP
mat = mat * 0.5**self._matrix_squarings_exp + tf.eye(self._dim_m)
for _ in range(self.matATRIX_SQUARINGS_EXP):
mat = tf.einsum('...ij,...jk->...ik', mat, mat)
return mat
def compute_output_shape(self, input_shape):
return input_shape[0], self._dim_m, self._dim_m
def get_config(self):
config = dict(super().get_config())
config['dim_m'] = self._dim_m
config['matrix_init'] = self._matrix_init
config['with_bias'] = self._with_bias
config['matrix_squarings_exp'] = self._matrix_squarings_exp
return config
| [
"[email protected]"
] | |
5311c7de6d4c19bc5c23b7024f94490d6e4da152 | 648f742d6db2ea4e97b83c99b6fc49abd59e9667 | /common/vault/oas/models/global_parameters_create_global_parameter_request.py | 13908154762177b2ca58227508b96bd210f59d49 | [] | no_license | jmiller-tm/replit | c56ce63718f6eb2d9b53bd09d3f7b3ef3496cb86 | c8e6af3268c4ef8da66516154850919ea79055dc | refs/heads/main | 2023-08-30T00:49:35.738089 | 2021-11-16T23:09:08 | 2021-11-16T23:09:08 | 428,809,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,259 | py | # coding: utf-8
"""
vault/kernel/core_api/proto/v1/accounts/core_api_account_schedule_tags.proto
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: version not set
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GlobalParametersCreateGlobalParameterRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'request_id': 'str',
'global_parameter': 'GlobalParametersGlobalParameter',
'initial_value': 'str'
}
attribute_map = {
'request_id': 'request_id',
'global_parameter': 'global_parameter',
'initial_value': 'initial_value'
}
def __init__(self, request_id=None, global_parameter=None, initial_value=None): # noqa: E501
"""GlobalParametersCreateGlobalParameterRequest - a model defined in Swagger""" # noqa: E501
self._request_id = None
self._global_parameter = None
self._initial_value = None
self.discriminator = None
self.request_id = request_id
self.global_parameter = global_parameter
self.initial_value = initial_value
@property
def request_id(self):
"""Gets the request_id of this GlobalParametersCreateGlobalParameterRequest. # noqa: E501
A unique string ID used for idempotency. Required. # noqa: E501
:return: The request_id of this GlobalParametersCreateGlobalParameterRequest. # noqa: E501
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""Sets the request_id of this GlobalParametersCreateGlobalParameterRequest.
A unique string ID used for idempotency. Required. # noqa: E501
:param request_id: The request_id of this GlobalParametersCreateGlobalParameterRequest. # noqa: E501
:type: str
"""
if request_id is None:
raise ValueError("Invalid value for `request_id`, must not be `None`") # noqa: E501
self._request_id = request_id
@property
def global_parameter(self):
"""Gets the global_parameter of this GlobalParametersCreateGlobalParameterRequest. # noqa: E501
:return: The global_parameter of this GlobalParametersCreateGlobalParameterRequest. # noqa: E501
:rtype: GlobalParametersGlobalParameter
"""
return self._global_parameter
@global_parameter.setter
def global_parameter(self, global_parameter):
"""Sets the global_parameter of this GlobalParametersCreateGlobalParameterRequest.
:param global_parameter: The global_parameter of this GlobalParametersCreateGlobalParameterRequest. # noqa: E501
:type: GlobalParametersGlobalParameter
"""
if global_parameter is None:
raise ValueError("Invalid value for `global_parameter`, must not be `None`") # noqa: E501
self._global_parameter = global_parameter
@property
def initial_value(self):
"""Gets the initial_value of this GlobalParametersCreateGlobalParameterRequest. # noqa: E501
This will be used to create a `GlobalParameterValue` associated with the newly created `GlobalParameter`. The `effective_timestamp` of the created `GlobalParameterValue` will be the Unix epoch. Required. # noqa: E501
:return: The initial_value of this GlobalParametersCreateGlobalParameterRequest. # noqa: E501
:rtype: str
"""
return self._initial_value
@initial_value.setter
def initial_value(self, initial_value):
"""Sets the initial_value of this GlobalParametersCreateGlobalParameterRequest.
This will be used to create a `GlobalParameterValue` associated with the newly created `GlobalParameter`. The `effective_timestamp` of the created `GlobalParameterValue` will be the Unix epoch. Required. # noqa: E501
:param initial_value: The initial_value of this GlobalParametersCreateGlobalParameterRequest. # noqa: E501
:type: str
"""
if initial_value is None:
raise ValueError("Invalid value for `initial_value`, must not be `None`") # noqa: E501
self._initial_value = initial_value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(GlobalParametersCreateGlobalParameterRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GlobalParametersCreateGlobalParameterRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
124c566b11061583dd8deb7c339c1629fd4a5c2d | a6894d17fdbceb56d4364f0e279d03b16a181396 | /working-env/lib/python2.5/TurboGears-1.0.2.2-py2.5.egg/turbogears/i18n/data/fr.py | b6840bd42c607e4582867cded964637bda4f6444 | [] | no_license | thraxil/gtreed | c1c5a19178c1f50ff5e61887b13ff7b004da1d25 | ca228848364edb204b15a7411fd6192379781c78 | refs/heads/master | 2020-04-18T03:02:15.468044 | 2008-12-10T20:02:12 | 2008-12-10T20:02:12 | 88,388 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,518 | py | # Formatting configuration for locale fr
languages={'gv': 'manx', 'gu': 'goudjrati', 'gd': u'ga\xe9lique \xe9cossais', 'ga': 'irlandais', 'gn': 'guarani', 'gl': 'galicien', 'lg': 'ganda', 'lb': 'luxembourgeois', 'la': 'latin', 'ln': 'lingala', 'lo': 'lao', 'tt': 'tatare', 'tr': 'turc', 'ts': 'tsonga', 'li': 'limbourgeois', 'lv': 'letton', 'to': 'tonga', 'lt': 'lithuanien', 'lu': 'luba-katanga', 'tk': u'turkm\xe8ne', 'th': u'tha\xef', 'ti': 'tigrigna', 'tg': 'tadjik', 'te': u't\xe9lougou', 'haw': u'hawa\xefen', 'yi': 'yiddish', 'yo': 'yoruba', 'de': 'allemand', 'da': 'danois', 'dz': 'dzongkha', 'st': 'sotho du Sud', 'dv': 'maldivien', 'qu': 'quechua', 'el': 'grec', 'eo': u'esp\xe9ranto', 'en': 'anglais', 'zh': 'chinois', 'ee': u'\xe9w\xe9', 'za': 'zhuang', 'mh': 'marshall', 'uk': 'ukrainien', 'eu': 'basque', 'et': 'estonien', 'es': 'espagnol', 'ru': 'russe', 'rw': 'rwanda', 'rm': u'rh\xe9to-roman', 'rn': 'roundi', 'ro': 'roumain', 'bn': 'bengali', 'be': u'bi\xe9lorusse', 'bg': 'bulgare', 'ba': 'bachkir', 'wa': 'wallon', 'wo': 'wolof', 'bm': 'bambara', 'jv': 'javanais', 'bo': u'tib\xe9tain', 'bh': 'bihari', 'bi': 'bichlamar', 'br': 'breton', 'bs': 'bosniaque', 'ja': 'japonais', 'om': 'galla', 'oj': 'ojibwa', 'root': 'racine', 'ty': 'tahitien', 'oc': 'occitan', 'tw': 'twi', 'os': u'oss\xe8te', 'or': 'oriya', 'xh': 'xhosa', 'ch': 'chamorro', 'co': 'corse', 'ca': 'catalan', 'ce': u'tch\xe9tch\xe8ne', 'cy': 'gallois', 'cs': u'tch\xe8que', 'cr': 'cree', 'cv': 'tchouvache', 've': 'venda', 'ps': 'pachto', 'kok': 'konkani', 'pt': 'portugais', 'tl': 'tagalog', 'pa': 'pendjabi', 'vi': 'vietnamien', 'pi': 'pali', 'pl': 'polonais', 'hz': 'herero', 'hy': u'arm\xe9nien', 'hr': 'croate', 'iu': 'inuktitut', 'ht': u'ha\xeftien', 'hu': 'hongrois', 'hi': 'hindi', 'ho': 'hiri motu', 'ha': 'haoussa', 'he': u'h\xe9breu', 'mg': 'malgache', 'uz': 'ouzbek', 'ml': 'malayalam', 'mo': 'moldave', 'mn': 'mongol', 'mi': 'maori', 'ik': 'inupiaq', 'mk': u'mac\xe9donien', 'ur': 'ourdou', 'mt': 'maltais', 'ms': 'malais', 'mr': 'marathe', 'ug': u'ou\xefgour', 'ta': 'tamoul', 'my': 'birman', 'sq': 'albanais', 'aa': 'afar', 'ab': 'abkhaze', 'ae': 'avestique', 'ss': 'swati', 'af': 'afrikaans', 'tn': 'setswana', 'sw': 'swahili', 'is': 'islandais', 'am': 'amharique', 'it': 'italien', 'an': 'aragonais', 'ii': 'yi de Sichuan', 'ia': 'interlingua', 'as': 'assamais', 'ar': 'arabe', 'su': 'soundanais', 'io': 'ido', 'av': 'avar', 'ay': 'aymara', 'az': u'az\xe9ri', 'ie': u'interlingu\xeb', 'id': u'indon\xe9sien', 'ig': 'igbo', 'sk': 'slovaque', 'sr': 'serbe', 'nl': u'n\xe9erlandais', 'nn': u'nynorsk norv\xe9gien', 'no': u'norv\xe9gien', 'na': 'nauruan', 'nb': u'bokm\xe5l norv\xe9gien', 'nd': u'nd\xe9b\xe9l\xe9 du Nord', 'ne': u'n\xe9palais', 'ng': 'ndonga', 'ny': 'nyanja', 'vo': u'volap\xfck', 'zu': 'zoulou', 'so': 'somali', 'nr': u'nd\xe9b\xe9l\xe9 du Sud', 'nv': 'navaho', 'sn': 'shona', 'fr': u'fran\xe7ais', 'sm': 'samoan', 'fy': 'frison', 'sv': u'su\xe9dois', 'fa': 'persan', 'ff': 'peul', 'fi': 'finnois', 'fj': 'fidjien', 'sa': 'sanskrit', 'fo': u'f\xe9ro\xefen', 'ka': u'g\xe9orgien', 'kg': 'kongo', 'kk': 'kazakh', 'kj': 'kuanyama', 'ki': 'kikuyu', 'ko': u'cor\xe9en', 'kn': 'kannada', 'km': 'khmer', 'kl': 'groenlandais', 'ks': 'kashmiri', 'kr': 'kanouri', 'si': 'singhalais', 'sh': 'serbo-croate', 'kw': 'cornique', 'kv': 'komi', 'ku': 'kurde', 'sl': u'slov\xe8ne', 'sc': 'sarde', 'ky': 'kirghize', 'sg': 'sango', 'se': 'sami du Nord', 'sd': 'sindhi'}
countries={'BD': 'Bangladesh', 'BE': 'Belgique', 'BF': 'Burkina Faso', 'BG': 'Bulgarie', 'BA': u'Bosnie-Herz\xe9govine', 'BB': 'Barbade', 'WF': 'Wallis et Futuna', 'BM': 'Bermudes', 'BN': 'Brunei', 'BO': 'Bolivie', 'BH': u'Bahre\xefn', 'BI': 'Burundi', 'BJ': 'Benin', 'BT': 'Bhoutan', 'JM': u'Jama\xefque', 'BV': u'\xcele Bouvet', 'BW': 'Botswana', 'WS': 'Samoa', 'BR': u'Br\xe9sil', 'BS': 'Bahamas', 'BY': u'B\xe9larus', 'BZ': 'Belize', 'RU': 'Russie', 'RW': 'Rwanda', 'TL': 'Timor-Leste', 'RE': u'R\xe9union', 'TM': 'Turkmenistan', 'TJ': 'Tadjikistan', 'RO': 'Roumanie', 'TK': 'Tokelau', 'GW': u'Guin\xe9e-Bissau', 'GU': 'Guam', 'GT': 'Guatemala', 'GS': u'G\xe9orgie du Sud, \xceles Sandwich du Sud', 'GR': u'Gr\xe8ce', 'GQ': u'Guin\xe9e \xc9quatoriale', 'GP': 'Guadeloupe', 'JP': 'Japon', 'GY': 'Guyane', 'GF': u'Guyane Fran\xe7aise', 'GE': u'G\xe9orgie', 'GD': 'Grenade', 'GB': 'Royaume-Uni', 'GA': 'Gabon', 'SV': 'El Salvador', 'GN': u'Guin\xe9e', 'GM': 'Gambie', 'GL': 'Groenland', 'GI': 'Gibraltar', 'GH': 'Ghana', 'OM': 'Oman', 'TN': 'Tunisie', 'JO': 'Jordanie', 'SP': 'Serbie', 'HR': 'Croatie', 'HT': u'Ha\xefti', 'HU': 'Hongrie', 'HK': 'Hong-Kong R.A.S.', 'HN': 'Honduras', 'HM': u'\xceles Heard et MacDonald', 'VE': u'V\xe9n\xe9zuela', 'PR': 'Porto Rico', 'PS': 'Territoire Palestinien', 'PW': 'Palaos', 'PT': 'Portugal', 'SJ': u'Svalbard et \xcele Jan Mayen', 'PY': 'Paraguay', 'IQ': 'Iraq', 'PA': 'Panama', 'PF': u'Polyn\xe9sie Fran\xe7aise', 'PG': u'Papouasie-Nouvelle-Guin\xe9e', 'PE': u'P\xe9rou', 'PK': 'Pakistan', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Pologne', 'PM': 'Saint Pierre et Miquelon', 'ZM': 'Zambie', 'EH': 'Sahara Occidental', 'EE': 'Estonie', 'EG': 'Egypte', 'ZA': 'Afrique du Sud', 'EC': u'\xc9quateur', 'IT': 'Italie', 'VN': u'Vi\xeat Nam', 'SB': u'\xceles Salomon', 'ET': 'Ethiopie', 'SO': 'Somalie', 'ZW': 'Zimbabwe', 'SA': 'Arabie Saoudite', 'ES': 'Espagne', 'ER': u'\xc9rythr\xe9e', 'MD': 'Moldova', 'MG': 'Madagascar', 'MA': 'Maroc', 'MC': 'Monaco', 'UZ': u'Ouzb\xe9kistan', 'MM': 'Myanmar', 'ML': 'Mali', 'MO': 'Macao R.A.S. de Chine', 'MN': 'Mongolie', 'MH': u'\xceles Marshall', 'MK': u'Mac\xe9doine', 'MU': 'Maurice', 'MT': 'Malte', 'MW': 'Malawi', 'MV': 'Maldives', 'MQ': 'Martinique', 'MP': 'Mariannes du Nord', 'MS': 'Montserrat', 'MR': 'Mauritanie', 'UG': 'Ouganda', 'MY': 'Malaisie', 'MX': 'Mexique', 'IL': u'Isra\xebl', 'FR': 'France', 'IO': u"Territoire Britannique de l'Oc\xe9an Indien", 'SH': u'Sainte-H\xe9l\xe8ne', 'FI': 'Finlande', 'FJ': 'Fidji', 'FK': u'\xceles Falkland (Malvinas)', 'FM': u'Micron\xe9sie', 'FO': u'\xceles F\xe9ro\xe9', 'NI': 'Nicaragua', 'NL': 'Pays-Bas', 'NO': u'Norv\xe8ge', 'NA': 'Namibie', 'VU': 'Vanuatu', 'NC': u'Nouvelle-Cal\xe9donie', 'NE': 'Niger', 'NF': u'\xcele Norfolk', 'NG': u'Nig\xe9ria', 'NZ': u'Nouvelle-Z\xe9lande', 'NP': u'N\xe9pal', 'NR': 'Nauru', 'NU': u'Niu\xe9', 'CK': u'\xceles Sandwich du Sud', 'CI': u"C\xf4te d'Ivoire", 'CH': 'Suisse', 'CO': 'Colombie', 'CN': 'Chine', 'CM': 'Cameroun', 'CL': 'Chili', 'CC': u'\xceles Cocos', 'CA': 'Canada', 'CG': 'Congo', 'CF': u'R\xe9publique Centrafricaine', 'CD': u'R\xe9publique D\xe9mocratique du Congo', 'CZ': u'R\xe9publique Tch\xe8que', 'CY': 'Chypre', 'CX': u'\xcele Christmas', 'CR': 'Costa Rica', 'Fallback': 'en', 'CV': 'Cap Vert', 'CU': 'Cuba', 'SZ': 'Swaziland', 'SY': 'Syrie', 'KG': 'Kyrgyzstan', 'KE': 'Kenya', 'SR': 'Suriname', 'KI': 'Kiribati', 'KH': 'Cambodge', 'KN': 'Saint Kitts et Nevis', 'KM': 'Comores', 'ST': u'Sao Tom\xe9-et-Principe', 'SK': 'Slovaquie', 'KR': u'Cor\xe9e du Sud', 'SI': u'Slov\xe9nie', 'KP': u'Cor\xe9e du Nord', 'KW': u'Kowe\xeft', 'SN': u'S\xe9n\xe9gal', 'SM': 'Saint-Marin', 'SL': 'Sierra Leone', 'SC': 'Seychelles', 'KZ': 'Kazakhstan', 'KY': 'Cayman Islands', 'SG': 'Singapour', 'SE': u'Su\xe8de', 'SD': 'Soudan', 'DO': u'R\xe9publique Dominicaine', 'DM': 'Dominique', 'DJ': 'Djibouti', 'DK': 'Danemark', 'VG': u'\xceles Vierges Britanniques', 'DE': 'Allemagne', 'YE': u'Y\xe9men', 'DZ': u'Alg\xe9rie', 'US': u'\xc9tats-Unis', 'UY': 'Uruguay', 'YU': 'Yougoslavie', 'YT': 'Mayotte', 'UM': u'\xceles Mineures \xc9loign\xe9es des \xc9tats-Unis', 'LB': 'Liban', 'LC': 'Sainte-Lucie', 'LA': 'Laos', 'TV': 'Tuvalu', 'TW': u'Ta\xefwan, Province de Chine', 'TT': u'Trinit\xe9 et Tobago', 'TR': 'Turquie', 'LK': 'Sri Lanka', 'LI': 'Liechtenstein', 'LV': 'Lettonie', 'TO': 'Tonga', 'LT': 'Lithuanie', 'LU': 'Luxembourg', 'LR': u'Lib\xe9ria', 'LS': 'Lesotho', 'TH': u'Tha\xeflande', 'TF': u'Terres Australes Fran\xe7aises', 'TG': 'Togo', 'TD': 'Tchad', 'TC': u'\xceles Turks et Ca\xefques', 'LY': 'Libye', 'VA': u'Le Saint-Si\xe8ge (Etat de la Cit\xe9 du Vatican)', 'VC': 'Saint Vincent et les Grenadines', 'AE': u'\xc9mirats Arabes Unis', 'AD': 'Andorre', 'AG': 'Antigua et Barbuda', 'AF': 'Afghanistan', 'AI': 'Anguilla', 'VI': u'\xceles Vierges des \xc9tats-Unis', 'IS': 'Islande', 'IR': 'Iran', 'AM': u'Arm\xe9nie', 'AL': 'Albanie', 'AO': 'Angola', 'AN': u'Antilles N\xe9erlandaises', 'AQ': 'Antarctica', 'AS': u'Samoa Am\xe9ricaines', 'AR': 'Argentine', 'AU': 'Australie', 'AT': 'Autriche', 'AW': 'Aruba', 'IN': 'Inde', 'TZ': 'Tanzanie', 'AZ': u'Azerba\xefdjan', 'IE': 'Irlande', 'ID': u'Indon\xe9sie', 'UA': 'Ukraine', 'QA': 'Qatar', 'MZ': 'Mozambique'}
months=['janvier', u'f\xe9vrier', 'mars', 'avril', 'mai', 'juin', 'juillet', u'ao\xfbt', 'septembre', 'octobre', 'novembre', u'd\xe9cembre']
abbrMonths=['janv.', u'f\xe9vr.', 'mars', 'avr.', 'mai', 'juin', 'juil.', u'ao\xfbt', 'sept.', 'oct.', 'nov.', u'd\xe9c.']
days=['lundi', 'mardi', 'mercredi', 'jeudi', 'vendredi', 'samedi', 'dimanche']
abbrDays=['lun.', 'mar.', 'mer.', 'jeu.', 'ven.', 'sam.', 'dim.']
dateFormats={'medium': '%d %%(abbrmonthname)s %y', 'full': '%%(dayname)s %d %%(monthname)s %Y', 'long': '%d %%(monthname)s %Y', 'short': '%d/%m/%y'}
numericSymbols={'group': u'\xa0', 'nativeZeroDigit': '0', 'exponential': 'E', 'perMille': u'\u2030', 'nan': u'\ufffd', 'decimal': ',', 'percentSign': '%', 'list': ';', 'patternDigit': '#', 'plusSign': '+', 'infinity': u'\u221e', 'minusSign': '-'} | [
"[email protected]"
] | |
200c75fdff4fa1317c02a08f43dc506d3c02c506 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/330/usersdata/302/93449/submittedfiles/lista1.py | 3c901a262a68d819c6cfc03bf35fedd1961bb928 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | # -*- coding: utf-8 -*-
n = int(input('DIgite a quantidade de valores da matriz: '))
a = []
for i in range(0,n,1):
a.append(float(input('Digite a%d:' %(i+1))))
| [
"[email protected]"
] | |
12f159b5cb51056d3aa2a0c1960d1e41b4d15d73 | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /73/73.set-matrix-zeroes.661615342.Accepted.leetcode.python3.py | e41b638e253235c385e74103532f377b5bc54df6 | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 652 | py | class Solution:
def setZeroes(self, matrix):
if not matrix or not matrix[0]:
return
h = len(matrix)
w = len(matrix[0])
rows_to_remove = set()
cols_to_remove = set()
for i in range(h):
if i not in rows_to_remove:
for j in range(w):
if matrix[i][j] == 0:
rows_to_remove.add(i)
cols_to_remove.add(j)
for i in rows_to_remove:
for j in range(w):
matrix[i][j] = 0
for j in cols_to_remove:
for i in range(h):
matrix[i][j] = 0
| [
"[email protected]"
] | |
27b7bea7760d2aae277db6002c3ca2da3c455373 | b8842700c2e071408bfab20bd499c6a4a47d0ccc | /week8/longest_subseq.py | 235e90d05bcab9175a7c4d4306fc0cd8ada18942 | [] | no_license | kobso1245/Algorithms | c74439b662d32b116b8ea3c7e6958b77d1a85bf5 | 1566a3b5e636f6a83f486382f17bff640da923a4 | refs/heads/master | 2016-08-10T10:11:14.314792 | 2015-10-05T21:36:45 | 2015-10-05T21:36:45 | 36,860,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 897 | py | def longest_subs(elems):
table = [(0, "") for x in range(len(elems))]
max_elem = elems[0]
curr_max_elem = 0
max_length = 0
max_pos = len(elems)
for i in range(len(elems) - 1, -1, -1):
curr_max_elem = 0
curr_max_index = i
for j in range(i + 1, len(elems)):
if elems[i] < elems[j]:
if curr_max_elem < table[j][0]:
curr_max_elem = table[j][0]
curr_max_index = j
if curr_max_index == i:
table[i] = (1, str(elems[i]))
else:
table[i] = (table[curr_max_index][0] + 1,
str(elems[i]) + table[curr_max_index][1])
if table[i][0] > max_length:
max_pos = i
max_length = table[i][0]
print(table[max_pos][0])
print(table[max_pos][1])
longest_subs([6, 1, 5, 3, 1, 7, 2, 5, 7, 4])
| [
"[email protected]"
] | |
b43371ed5b350666c780f23524655c92cfef2f34 | 7c7236aa95ebebe241f04b98d55f3033b19dadc2 | /cms/venv/Scripts/pip3.6-script.py | c9130b7fc695e3c6029ee434982930129888e925 | [] | no_license | taisuo/cms | 57d792bb47d85bf6a4a39558a1bc34457a355c26 | dd8baa834d6426a2ce7406ea0b74eab252ef7789 | refs/heads/master | 2020-08-03T13:49:44.754754 | 2019-10-08T04:00:21 | 2019-10-08T04:00:21 | 211,765,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | #!G:\pythonwork\server\cms\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3.6'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3.6')()
)
| [
"[email protected]"
] | |
8cab479aead0b05b07915bd2b9ec453d151bf9d4 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_capsules.py | 32a574bf1d186e135bc230087ec65675e0aa1fde | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
#calss header
class _CAPSULES():
def __init__(self,):
self.name = "CAPSULES"
self.definitions = capsule
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['capsule']
| [
"[email protected]"
] | |
73450c3296e8034de7cb80a6193f3d45d296b9e8 | dad081459d204349db0852f8e55a366b36b52b37 | /venv/bin/wheel | 414fdf58787336146cca47c1e5cb93c84f62d631 | [] | no_license | DennisMufasa/FirstFlaskApps | 78241f4be6145132bb6b03e692fc4d9d62b83687 | b5098718c1824077a6390a7dcfd924ad728d607a | refs/heads/master | 2020-03-17T17:19:04.375419 | 2018-05-17T08:34:00 | 2018-05-17T08:34:00 | 133,783,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | #!/home/mufasa/PycharmProjects/practiseII/venv/bin/python3.5
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
bede46778ebd5c5b26490fecc38fe70f71dea335 | b3ee8faf7712be6edbb2e800602784125082fe90 | /tensorflow/contrib/distribute/python/parameter_server_strategy_test.py | adfe3e8b020521d9c2c409da7c6d79e0ba060330 | [
"Apache-2.0"
] | permissive | AudioStreamTV/tensorflow | dc6b657012e368c5c9bc84c370ec51d49ab5f1fc | 7277ed8ed2da84b227295216632dec52a81f63b3 | refs/heads/master | 2020-03-27T02:43:28.244731 | 2018-08-27T08:39:42 | 2018-08-27T08:39:42 | 145,813,674 | 0 | 0 | Apache-2.0 | 2018-08-23T06:55:46 | 2018-08-23T06:55:46 | null | UTF-8 | Python | false | false | 17,881 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ParameterServerStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from absl.testing import parameterized
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import multi_worker_test_base
from tensorflow.contrib.distribute.python import parameter_server_strategy
from tensorflow.python.eager import context
from tensorflow.python.estimator import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.layers import core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import device_util
from tensorflow.python.training import distribution_strategy_context
class ParameterServerStrategyTest(multi_worker_test_base.MultiWorkerTestBase,
parameterized.TestCase):
@classmethod
def setUpClass(cls):
cls._workers, cls._ps = multi_worker_test_base.create_in_process_cluster(
num_workers=3, num_ps=2)
cls._cluster_spec = {
run_config.TaskType.WORKER: [
'fake_worker_0', 'fake_worker_1', 'fake_worker_2'
],
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
def setUp(self):
self._result = 0
self._lock = threading.Lock()
self._init_condition = threading.Condition()
self._init_reached = 0
self._finish_condition = threading.Condition()
self._finish_reached = 0
super(ParameterServerStrategyTest, self).setUp()
def _get_test_objects(self, task_type, task_id, num_gpus):
distribution = parameter_server_strategy.ParameterServerStrategy(
num_gpus_per_worker=num_gpus)
if not task_type:
return distribution, ''
distribution.configure(
cluster_spec=self._cluster_spec, task_type=task_type, task_id=task_id)
return distribution, self._workers[task_id].target
def _test_device_assignment_distributed(self, task_type, task_id, num_gpus):
worker_device = '/job:%s/replica:0/task:%d' % (task_type, task_id)
d, _ = self._get_test_objects(task_type, task_id, num_gpus)
with ops.Graph().as_default(), \
self.test_session(target=self._workers[0].target) as sess, \
d.scope():
# Define a variable outside the call_for_each_tower scope. This is not
# recommended.
n = variable_scope.get_variable('n', initializer=10.0)
self.assertEqual(n.device, '/job:ps/task:0')
def model_fn():
if num_gpus == 0:
last_part_device = 'device:CPU:0'
else:
last_part_device = (
'device:GPU:%d' %
distribution_strategy_context.get_tower_context().tower_id)
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
c = a + b
self.assertEqual(a.device, worker_device + '/' + last_part_device)
self.assertEqual(b.device, worker_device + '/' + last_part_device)
self.assertEqual(c.device, worker_device + '/' + last_part_device)
# The device scope is ignored for variables but not for normal ops.
with ops.device('/job:worker/task:0'):
x = variable_scope.get_variable(
'x', initializer=10.0,
aggregation=variable_scope.VariableAggregation.SUM)
x_add = x.assign_add(c)
e = a + c
# The variable x is on the task 1 since the device_function has been
# called once before the model_fn.
self.assertEqual(x.device, '/job:ps/task:1')
self.assertEqual(x_add.device, x.device)
self.assertEqual(e.device,
'/job:worker/replica:0/task:0/%s' % last_part_device)
# The colocate_vars_with can override the distribution's device.
with d.colocate_vars_with(x):
y = variable_scope.get_variable(
'y', initializer=20.0,
aggregation=variable_scope.VariableAggregation.SUM)
# We add an identity here to avoid complaints about summing
# non-distributed values.
y_add = y.assign_add(array_ops.identity(x_add))
self.assertEqual(y.device, '/job:ps/task:1')
self.assertEqual(y_add.device, y.device)
self.assertEqual(y.device, x.device)
z = variable_scope.get_variable(
'z', initializer=10.0,
aggregation=variable_scope.VariableAggregation.SUM)
self.assertEqual(z.device, '/job:ps/task:0')
self.assertNotEqual(z.device, x.device)
with ops.control_dependencies([y_add]):
# We add an identity here to avoid complaints about summing
# non-distributed values.
z_add = z.assign_add(array_ops.identity(y))
with ops.control_dependencies([z_add]):
f = z + c
self.assertEqual(f.device, worker_device + '/' + last_part_device)
# The device scope would merge with the default worker device.
with ops.device('/CPU:1'):
g = e + 1.0
self.assertEqual(g.device, worker_device + '/device:CPU:1')
# Ths ops.colocate_with will be ignored when defining a variale but not
# for a normal tensor.
with ops.colocate_with(x):
u = variable_scope.get_variable('u', initializer=30.0)
v = variable_scope.get_variable('v', initializer=30.0)
h = f + 1.0
self.assertIn('/job:ps/', u.device)
self.assertIn('/job:ps/', v.device)
# u and v are on different parameter servers.
self.assertTrue(u.device != x.device or v.device != x.device)
self.assertTrue(u.device == x.device or v.device == x.device)
# Here h is not on one worker. Note h.device is canonical while x.device
# is not but.
self.assertIn('/job:ps/', h.device)
return y_add, z_add, f
y, z, f = d.call_for_each_tower(model_fn)
self.assertNotEqual(y, None)
self.assertNotEqual(z, None)
self.assertNotEqual(f, None)
if context.num_gpus() >= 1 and num_gpus <= 1:
variables.global_variables_initializer().run()
y_val, z_val, f_val = sess.run([y, z, f])
self.assertEqual(y_val, 33.0)
self.assertEqual(z_val, 43.0)
self.assertEqual(f_val, 46.0)
@combinations.generate(
combinations.combine(mode=['graph'], num_gpus=[0, 1, 2]))
def testDeviceAssignmentDistributed(self, num_gpus):
self._test_device_assignment_distributed('worker', 1, num_gpus)
def _test_device_assignment_local(self,
d,
compute_device='CPU',
variable_device='CPU',
num_gpus=0):
with ops.Graph().as_default(), \
self.test_session(target=self._workers[0].target) as sess, \
d.scope():
def model_fn():
if 'CPU' in compute_device:
tower_compute_device = '/device:CPU:0'
else:
tower_compute_device = (
'/device:GPU:%d' %
distribution_strategy_context.get_tower_context().tower_id)
tower_compute_device = device_util.canonicalize(tower_compute_device)
if 'CPU' in variable_device:
tower_variable_device = '/device:CPU:0'
else:
tower_variable_device = (
'/device:GPU:%d' %
distribution_strategy_context.get_tower_context().tower_id)
tower_variable_device = device_util.canonicalize(tower_variable_device)
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
c = a + b
self.assertEqual(a.device, tower_compute_device)
self.assertEqual(b.device, tower_compute_device)
self.assertEqual(c.device, tower_compute_device)
# The device scope is ignored for variables but not for normal ops.
with ops.device('/device:GPU:2'):
x = variable_scope.get_variable(
'x', initializer=10.0,
aggregation=variable_scope.VariableAggregation.SUM)
x_add = x.assign_add(c)
e = a + c
self.assertEqual(
device_util.canonicalize(x.device), tower_variable_device)
self.assertEqual(x_add.device, x.device)
self.assertEqual(e.device, device_util.canonicalize('/device:GPU:2'))
# The colocate_vars_with can override the distribution's device.
with d.colocate_vars_with(x):
y = variable_scope.get_variable(
'y', initializer=20.0,
aggregation=variable_scope.VariableAggregation.SUM)
# We add an identity here to avoid complaints about summing
# non-distributed values.
y_add = y.assign_add(array_ops.identity(x_add))
self.assertEqual(
device_util.canonicalize(y.device), tower_variable_device)
self.assertEqual(y_add.device, y.device)
self.assertEqual(y.device, x.device)
z = variable_scope.get_variable(
'z', initializer=10.0,
aggregation=variable_scope.VariableAggregation.SUM)
self.assertEqual(
device_util.canonicalize(z.device), tower_variable_device)
with ops.control_dependencies([y_add]):
# We add an identity here to avoid complaints about summing
# non-distributed values.
z_add = z.assign_add(array_ops.identity(y))
with ops.control_dependencies([z_add]):
f = z + c
self.assertEqual(f.device, tower_compute_device)
# The device scope would merge with the default worker device.
with ops.device('/CPU:1'):
g = e + 1.0
self.assertEqual(g.device, device_util.canonicalize('/device:CPU:1'))
# Ths ops.colocate_with will be ignored when defining a variale but not
# for a normal tensor.
with ops.colocate_with(x):
u = variable_scope.get_variable('u', initializer=30.0)
h = f + 1.0
self.assertEqual(
device_util.canonicalize(u.device), tower_variable_device)
self.assertEqual(device_util.canonicalize(x.device), h.device)
return y_add, z_add, f
y, z, f = d.call_for_each_tower(model_fn)
self.assertNotEqual(y, None)
self.assertNotEqual(z, None)
self.assertNotEqual(f, None)
if context.num_gpus() >= 1 and num_gpus <= 1:
variables.global_variables_initializer().run()
y_val, z_val, f_val = sess.run([y, z, f])
self.assertEqual(y_val, 33.0)
self.assertEqual(z_val, 43.0)
self.assertEqual(f_val, 46.0)
def testDeviceAssignmentLocalCPU(self):
distribution = parameter_server_strategy.ParameterServerStrategy(
num_gpus_per_worker=0)
self._test_device_assignment_local(
distribution, compute_device='CPU', variable_device='CPU', num_gpus=0)
def testDeviceAssignmentLocalOneGPU(self):
distribution = parameter_server_strategy.ParameterServerStrategy(
num_gpus_per_worker=1)
self._test_device_assignment_local(
distribution, compute_device='GPU', variable_device='GPU', num_gpus=1)
def testDeviceAssignmentLocalTwoGPUs(self):
distribution = parameter_server_strategy.ParameterServerStrategy(
num_gpus_per_worker=2)
self._test_device_assignment_local(
distribution, compute_device='GPU', variable_device='CPU', num_gpus=2)
def _test_simple_increment(self, task_type, task_id, num_gpus):
d, master_target = self._get_test_objects(task_type, task_id, num_gpus)
if hasattr(d, '_cluster_spec') and d._cluster_spec:
num_workers = len(d._cluster_spec.as_dict().get('worker',
['dummy_worker']))
else:
num_workers = 1
with ops.Graph().as_default(), \
self.test_session(target=master_target) as sess, \
d.scope():
def model_fn():
x = variable_scope.get_variable(
'x', initializer=10.0,
aggregation=variable_scope.VariableAggregation.SUM)
y = variable_scope.get_variable(
'y', initializer=20.0,
aggregation=variable_scope.VariableAggregation.SUM)
# We explicitly make a constant tensor here to avoid complaints about
# summing non-distributed values.
one = constant_op.constant(1.0)
x_add = x.assign_add(one, use_locking=True)
y_add = y.assign_add(one, use_locking=True)
train_op = control_flow_ops.group([x_add, y_add])
return x, y, train_op
x, y, train_op = d.call_for_each_tower(model_fn)
train_op = d.group(d.unwrap(train_op))
if context.num_gpus() < d._num_gpus_per_worker:
return True
if task_id == 0:
variables.global_variables_initializer().run()
# Workers waiting for chief worker's initializing variables.
self._init_condition.acquire()
self._init_reached += 1
while self._init_reached != num_workers:
self._init_condition.wait()
self._init_condition.notify_all()
self._init_condition.release()
sess.run(train_op)
# Wait for other workers to finish training.
self._finish_condition.acquire()
self._finish_reached += 1
while self._finish_reached != num_workers:
self._finish_condition.wait()
self._finish_condition.notify_all()
self._finish_condition.release()
x_val, y_val = sess.run([x, y])
self.assertEqual(x_val, 10.0 + 1.0 * num_workers * d.num_towers)
self.assertEqual(y_val, 20.0 + 1.0 * num_workers * d.num_towers)
return (x_val == 10.0 + 1.0 * num_workers * d.num_towers and
y_val == 20.0 + 1.0 * num_workers * d.num_towers)
def _test_minimize_loss_graph(self, task_type, task_id, num_gpus):
d, master_target = self._get_test_objects(task_type, task_id, num_gpus)
with ops.Graph().as_default(), \
self.test_session(target=master_target) as sess, \
d.scope():
l = core.Dense(1, use_bias=False)
def loss_fn(x):
y = array_ops.reshape(l(x), []) - constant_op.constant(1.)
return y * y
# TODO(yuefengz, apassos): eager.backprop.implicit_grad is not safe for
# multiple graphs (b/111216820).
def grad_fn(x):
loss = loss_fn(x)
var_list = (
variables.trainable_variables() + ops.get_collection(
ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
grads = gradients.gradients(loss, var_list)
ret = list(zip(grads, var_list))
return ret
def update(v, g):
return v.assign_sub(0.05 * g, use_locking=True)
one = d.broadcast(constant_op.constant([[1.]]))
def step():
"""Perform one optimization step."""
# Run forward & backward to get gradients, variables list.
g_v = d.call_for_each_tower(grad_fn, one)
# Update the variables using the gradients and the update() function.
before_list = []
after_list = []
for g, v in g_v:
fetched = d.read_var(v)
before_list.append(fetched)
with ops.control_dependencies([fetched]):
# TODO(yuefengz): support non-Mirrored variable as destinations.
g = d.reduce(
variable_scope.VariableAggregation.SUM, g, destinations=v)
with ops.control_dependencies(d.unwrap(d.update(v, update, g))):
after_list.append(d.read_var(v))
return before_list, after_list
before_out, after_out = step()
if context.num_gpus() < d._num_gpus_per_worker:
return True
if task_id == 0:
variables.global_variables_initializer().run()
# Workers waiting for chief worker's initializing variables.
self._init_condition.acquire()
self._init_reached += 1
while self._init_reached != 3:
self._init_condition.wait()
self._init_condition.notify_all()
self._init_condition.release()
for i in range(10):
b, a = sess.run((before_out, after_out))
if i == 0:
before, = b
after, = a
error_before = abs(before - 1)
error_after = abs(after - 1)
# Error should go down
self.assertLess(error_after, error_before)
return error_after < error_before
def testSimpleBetweenGraph(self):
self._run_between_graph_clients(self._test_simple_increment,
self._cluster_spec, 0)
@combinations.generate(
combinations.combine(mode=['graph'], num_gpus=[0, 1, 2]))
def testLocalSimpleIncrement(self, num_gpus):
self._test_simple_increment(None, 0, num_gpus)
@combinations.generate(
combinations.combine(mode=['graph'], num_gpus=[0, 1, 2]))
def testMinimizeLossGraph(self, num_gpus):
self._run_between_graph_clients(self._test_minimize_loss_graph,
self._cluster_spec, num_gpus)
if __name__ == '__main__':
test.main()
| [
"[email protected]"
] | |
41548045a42748d17ade7d29eac099167198820d | dd32c5788caab10641c055b89c8c08f7b8a05361 | /pippin/config.py | 427652c0d1c6b105281b48ba0a698b382be69a79 | [
"MIT"
] | permissive | jcraig5/Pippin | bae71ab24e819f02ca732d18560cfe3543b18050 | 85db01173dd4a4f12af39cd3a2bad3a713e8767d | refs/heads/master | 2020-05-31T09:55:08.416338 | 2019-06-13T16:45:39 | 2019-06-13T16:45:39 | 190,225,226 | 0 | 0 | MIT | 2019-06-13T16:45:40 | 2019-06-04T15:09:56 | Python | UTF-8 | Python | false | false | 3,032 | py | import configparser
import inspect
import os
import logging
import hashlib
import shutil
import os
import shutil
import stat
def singleton(fn):
instance = None
def get(*args, **kwargs):
nonlocal instance
if instance is None:
instance = fn(*args, **kwargs)
return instance
return get
@singleton
def get_config():
filename = os.path.abspath(os.path.dirname(inspect.stack()[0][1]) + "/../cfg.ini")
config = configparser.ConfigParser()
config.read(filename)
return config
def get_output_dir():
output_dir = get_config()['OUTPUT']['output_dir']
if output_dir.startswith("$"):
output_dir = os.path.expandvars(output_dir)
elif not output_dir.startswith("/"):
output_dir = os.path.abspath(os.path.dirname(inspect.stack()[0][1]) + "/../" + output_dir)
return output_dir
def get_output_loc(path):
if "$" in path:
path = os.path.expandvars(path)
if path.startswith("/"):
return path
else:
return os.path.join(get_output_dir(), path)
def get_hash(input_string):
return hashlib.sha256(input_string.encode('utf-8')).hexdigest()
@singleton
def get_logger():
return logging.getLogger("pippin")
def mkdirs(path):
if not os.path.exists(path):
os.makedirs(path, exist_ok=True, mode=0o775)
chown_dir(path)
def copytree(src, dst, symlinks=False, ignore=None):
lst = os.listdir(src)
if ignore:
excl = ignore(src, lst)
lst = [x for x in lst if x not in excl]
for item in lst:
s = os.path.join(src, item)
d = os.path.join(dst, item)
if symlinks and os.path.islink(s):
if os.path.lexists(d):
os.remove(d)
os.symlink(os.readlink(s), d)
try:
st = os.lstat(s)
mode = stat.S_IMODE(st.st_mode)
os.lchmod(d, mode)
except:
pass # lchmod not available
elif os.path.isdir(s):
copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def chown_dir(directory):
global_config = get_config()
logger = get_logger()
try:
shutil.chown(directory, group=global_config["SNANA"]["group"])
except Exception as e:
logger.debug(str(e))
return
for root, dirs, files in os.walk(directory):
for d in dirs:
try:
shutil.chown(os.path.join(root, d), group=global_config["SNANA"]["group"])
except Exception:
logger.debug(f"Chown error: {os.path.join(root, d)}")
for f in files:
try:
shutil.chown(os.path.join(root, f), group=global_config["SNANA"]["group"])
except Exception:
logger.debug(f"Chown error: {os.path.join(root, f)}")
if __name__ == "__main__":
c = get_config()
print(c.sections())
print(c.get("SNANA", "sim_dir"))
print(c["OUTPUT"].getint("ping_frequency"))
| [
"[email protected]"
] | |
0b227badbe12d72eda1d08bf7b85c82dca9a0cc6 | 008c5aa9d132fa2549e089ae8df2ef1ce15ad020 | /response_timeout/middleware.py | dd8597205cd6de79d9e8ffdb02f0c87431542156 | [
"MIT"
] | permissive | movermeyer/django-response-timeout | 0c0cedc5c838011d314c57e4ab42252639b350f7 | 38f7462ab71d967749efc3be914e2a7a2df80f33 | refs/heads/master | 2021-01-25T14:33:17.576026 | 2013-07-20T13:56:44 | 2013-07-20T13:56:44 | 123,708,880 | 0 | 0 | MIT | 2018-03-03T16:17:38 | 2018-03-03T16:17:38 | null | UTF-8 | Python | false | false | 571 | py | from django.conf import settings
from django.utils.cache import patch_response_headers
class SetCacheTimeoutMiddleware(object):
"""
Request-phase middleware that sets the timeout of each response based on
the RESPONSE_CACHE_SECONDS
If using with UpdateCacheMiddleware, must be placed after so that it sets
the timeout before the cache is updated with the response.
"""
def process_response(self, request, response):
timeout = settings.RESPONSE_CACHE_SECONDS
patch_response_headers(response, timeout)
return response
| [
"[email protected]"
] | |
4cdc462c66dbf52bd65ffa90d47ca5bfebbb3f68 | 6e3e1834eaad3a0c97bf645238e59a0599e047b4 | /blog/urls/entries.py | b1a1b1567818f862b31ae4101bac790aab9a1abd | [
"JSON"
] | permissive | davogler/davsite | 2dc42bfebb476d94f92520e8829999859deae80b | edd8ceed560690fa2c3eefde236416ffba559a2e | refs/heads/master | 2021-01-19T06:31:20.655909 | 2014-01-03T19:04:13 | 2014-01-03T19:04:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,303 | py | from django.conf.urls.defaults import *
from django.views.generic.dates import YearArchiveView, MonthArchiveView, DayArchiveView, DateDetailView
from blog.models import Entry
entry_info_dict = {'queryset':Entry.live.all(), 'date_field': 'pub_date', }
urlpatterns = patterns('',
# Pagination for the equivalent of archive_index generic view.
# The url is of the form http://host/page/4/
# In urls.py for example, ('^blog/page/(?P<page>\d)/$', get_archive_index),
url(r'^$', 'blog.views.get_archive_index_first', ),
url(r'^page/(?P<page>\d)/$', 'blog.views.get_archive_index', ),
#(r'^$', 'django.views.generic.date_based.archive_index', entry_info_dict, 'blog_entry_archive_index'),
#(r'^(?P<year>\d{4})/$', YearArchiveView.as_view(), entry_info_dict, 'blog_entry_archive_year'),
url(r'^(?P<year>\d{4})/$', YearArchiveView.as_view(**entry_info_dict), name= 'blog_entry_archive_year'),
url(r'^(?P<year>\d{4})/(?P<month>\w{3})/$', MonthArchiveView.as_view(**entry_info_dict), name= 'blog_entry_archive_month'),
url(r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{2})/$', DayArchiveView.as_view(**entry_info_dict), name= 'blog_entry_archive_day'),
url(r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{2})/(?P<slug>[-\w]+)/$', DateDetailView.as_view(**entry_info_dict), name= 'blog_entry_detail'),
) | [
"[email protected]"
] | |
846c5cecb1d4a72e2e3b0b75e22f032feccc41ec | 5b8d0cd314fdd4537bc77ce9209ca903694b02e8 | /datasets/hate_speech_portuguese/hate_speech_portuguese.py | 577644e09ec2d76215be6cde5bf21bf6ff2ee6aa | [
"Apache-2.0"
] | permissive | amankhandelia/datasets | 97106f6d98b9cd17c50b1bf0c91f4ced6240dfd6 | 1a138f9bd2d1b62a255736375001bf918d36508d | refs/heads/master | 2023-06-21T01:08:25.212378 | 2021-07-26T13:27:59 | 2021-07-26T13:27:59 | 389,644,974 | 1 | 0 | Apache-2.0 | 2021-07-26T14:36:09 | 2021-07-26T13:36:08 | null | UTF-8 | Python | false | false | 5,287 | py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Portuguese dataset for hate speech detection composed of 5,668 tweets with binary annotations (i.e. 'hate' vs. 'no-hate')."""
import csv
import datasets
_CITATION = """\
@inproceedings{fortuna-etal-2019-hierarchically,
title = "A Hierarchically-Labeled {P}ortuguese Hate Speech Dataset",
author = "Fortuna, Paula and
Rocha da Silva, Jo{\\~a}o and
Soler-Company, Juan and
Wanner, Leo and
Nunes, S{\'e}rgio",
booktitle = "Proceedings of the Third Workshop on Abusive Language Online",
month = aug,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W19-3510",
doi = "10.18653/v1/W19-3510",
pages = "94--104",
abstract = "Over the past years, the amount of online offensive speech has been growing steadily. To successfully cope with it, machine learning are applied. However, ML-based techniques require sufficiently large annotated datasets. In the last years, different datasets were published, mainly for English. In this paper, we present a new dataset for Portuguese, which has not been in focus so far. The dataset is composed of 5,668 tweets. For its annotation, we defined two different schemes used by annotators with different levels of expertise. Firstly, non-experts annotated the tweets with binary labels ({`}hate{'} vs. {`}no-hate{'}). Secondly, expert annotators classified the tweets following a fine-grained hierarchical multiple label scheme with 81 hate speech categories in total. The inter-annotator agreement varied from category to category, which reflects the insight that some types of hate speech are more subtle than others and that their detection depends on personal perception. This hierarchical annotation scheme is the main contribution of the presented work, as it facilitates the identification of different types of hate speech and their intersections. To demonstrate the usefulness of our dataset, we carried a baseline classification experiment with pre-trained word embeddings and LSTM on the binary classified data, with a state-of-the-art outcome.",
}
"""
_DESCRIPTION = """\
Portuguese dataset for hate speech detection composed of 5,668 tweets with binary annotations (i.e. 'hate' vs. 'no-hate').
"""
_HOMEPAGE = "https://github.com/paulafortuna/Portuguese-Hate-Speech-Dataset"
_LICENSE = "Unknown"
_URL = "https://github.com/paulafortuna/Portuguese-Hate-Speech-Dataset/raw/master/2019-05-28_portuguese_hate_speech_binary_classification.csv"
class HateSpeechPortuguese(datasets.GeneratorBasedBuilder):
"""Portuguese dataset for hate speech detection composed of 5,668 tweets with binary annotations (i.e. 'hate' vs. 'no-hate')."""
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"text": datasets.Value("string"),
"label": datasets.ClassLabel(names=["no-hate", "hate"]),
"hatespeech_G1": datasets.Value("string"),
"annotator_G1": datasets.Value("string"),
"hatespeech_G2": datasets.Value("string"),
"annotator_G2": datasets.Value("string"),
"hatespeech_G3": datasets.Value("string"),
"annotator_G3": datasets.Value("string"),
}
),
supervised_keys=("text", "label"),
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_file = dl_manager.download_and_extract(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": data_file,
},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
with open(filepath, encoding="utf-8") as f:
reader = csv.reader(f)
for id_, row in enumerate(reader):
if id_ == 0:
continue
yield id_, {
"text": row[0],
"label": "hate" if row[1] == "1" else "no-hate",
"hatespeech_G1": row[2],
"annotator_G1": row[3],
"hatespeech_G2": row[4],
"annotator_G2": row[5],
"hatespeech_G3": row[6],
"annotator_G3": row[7],
}
| [
"[email protected]"
] | |
6a618c1bc31e5c1d4847dc2b5e0c0a32c900fa48 | b144c5142226de4e6254e0044a1ca0fcd4c8bbc6 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocolstack/range_5f0e3a0ea1418e640797b57a7df0b8d2.py | d86e43c9777830cf80b69dc5421183e66abe8cff | [
"MIT"
] | permissive | iwanb/ixnetwork_restpy | fa8b885ea7a4179048ef2636c37ef7d3f6692e31 | c2cb68fee9f2cc2f86660760e9e07bd06c0013c2 | refs/heads/master | 2021-01-02T17:27:37.096268 | 2020-02-11T09:28:15 | 2020-02-11T09:28:15 | 239,721,780 | 0 | 0 | NOASSERTION | 2020-02-11T09:20:22 | 2020-02-11T09:20:21 | null | UTF-8 | Python | false | false | 84,931 | py | # MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Range(Base):
"""
The Range class encapsulates a list of range resources that is be managed by the user.
A list of resources can be retrieved from the server using the Range.find() method.
The list can be managed by the user by using the Range.add() and Range.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'range'
def __init__(self, parent):
super(Range, self).__init__(parent)
@property
def AncpRange(self):
"""An instance of the AncpRange class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ancprange_946e827bfd04cdf9c665f7df35ba1803.AncpRange)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ancprange_946e827bfd04cdf9c665f7df35ba1803 import AncpRange
return AncpRange(self)
@property
def Dhcpv6ClientRange(self):
"""An instance of the Dhcpv6ClientRange class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.dhcpv6clientrange_c261fab1e5f4f5612eb92fd384e011d8.Dhcpv6ClientRange)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.dhcpv6clientrange_c261fab1e5f4f5612eb92fd384e011d8 import Dhcpv6ClientRange
return Dhcpv6ClientRange(self)
@property
def Dhcpv6PdClientRange(self):
"""An instance of the Dhcpv6PdClientRange class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.dhcpv6pdclientrange_61023dadafd9beab8caf1798c0ec1d27.Dhcpv6PdClientRange)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.dhcpv6pdclientrange_61023dadafd9beab8caf1798c0ec1d27 import Dhcpv6PdClientRange
return Dhcpv6PdClientRange(self)
@property
def Dhcpv6ServerRange(self):
"""An instance of the Dhcpv6ServerRange class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.dhcpv6serverrange_a0ebd8c7a9fcbd4a9fa332027f092368.Dhcpv6ServerRange)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.dhcpv6serverrange_a0ebd8c7a9fcbd4a9fa332027f092368 import Dhcpv6ServerRange
return Dhcpv6ServerRange(self)
@property
def Dot1xRange(self):
"""An instance of the Dot1xRange class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.dot1xrange_34518902fa4163e4ef2b334cba6bb765.Dot1xRange)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.dot1xrange_34518902fa4163e4ef2b334cba6bb765 import Dot1xRange
return Dot1xRange(self)
@property
def EsmcRange(self):
"""An instance of the EsmcRange class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.esmcrange_82b49109fd8506c97f4801efbd754fcb.EsmcRange)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.esmcrange_82b49109fd8506c97f4801efbd754fcb import EsmcRange
return EsmcRange(self)
@property
def IgmpMldRange(self):
"""An instance of the IgmpMldRange class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.igmpmldrange_b922833659914296e3330f9ecd7fb136.IgmpMldRange)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.igmpmldrange_b922833659914296e3330f9ecd7fb136 import IgmpMldRange
return IgmpMldRange(self)
@property
def IgmpQuerierRange(self):
"""An instance of the IgmpQuerierRange class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.igmpquerierrange_d0501301b0dcd3ec2ca10a1e8080369a.IgmpQuerierRange)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.igmpquerierrange_d0501301b0dcd3ec2ca10a1e8080369a import IgmpQuerierRange
return IgmpQuerierRange(self)
@property
def IptvRange(self):
"""An instance of the IptvRange class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.iptvrange_b754940c363e5e4d86292c0d1680f862.IptvRange)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.iptvrange_b754940c363e5e4d86292c0d1680f862 import IptvRange
return IptvRange(self)
@property
def MacRange(self):
"""An instance of the MacRange class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.macrange_bf08933d8709d332aac5e00af7dbbf0b.MacRange)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.macrange_bf08933d8709d332aac5e00af7dbbf0b import MacRange
return MacRange(self)._select()
@property
def PppoxRange(self):
"""An instance of the PppoxRange class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.pppoxrange_219f521228db41aee7566fa1ea3e759e.PppoxRange)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.pppoxrange_219f521228db41aee7566fa1ea3e759e import PppoxRange
return PppoxRange(self)._select()
@property
def PtpRangeOverMac(self):
"""An instance of the PtpRangeOverMac class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ptprangeovermac_d7beece9aaa2cd207fe97d2e82bf468f.PtpRangeOverMac)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ptprangeovermac_d7beece9aaa2cd207fe97d2e82bf468f import PtpRangeOverMac
return PtpRangeOverMac(self)
@property
def StaticHostsRange(self):
"""An instance of the StaticHostsRange class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.statichostsrange_0b2a3893448d98f79f73a87e9082ada0.StaticHostsRange)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.statichostsrange_0b2a3893448d98f79f73a87e9082ada0 import StaticHostsRange
return StaticHostsRange(self)._select()
@property
def VicClientRange(self):
"""An instance of the VicClientRange class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.vicclientrange_8770e6f5345e628c86b8dfb111fc902c.VicClientRange)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.vicclientrange_8770e6f5345e628c86b8dfb111fc902c import VicClientRange
return VicClientRange(self)
@property
def VlanRange(self):
"""An instance of the VlanRange class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.vlanrange_15568b5f3382e6953010f402330eba5a.VlanRange)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.vlanrange_15568b5f3382e6953010f402330eba5a import VlanRange
return VlanRange(self)._select()
def add(self):
"""Adds a new range node on the server and retrieves it in this instance.
Returns:
self: This instance with all currently retrieved range data using find and the newly added range data available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._create(locals())
def remove(self):
"""Deletes all the range data in this instance from server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self):
"""Finds and retrieves range data from the server.
All named parameters support regex and can be used to selectively retrieve range data from the server.
By default the find method takes no parameters and will retrieve all range data from the server.
Returns:
self: This instance with matching range data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of range data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the range data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def CustomProtocolStack(self, *args, **kwargs):
"""Executes the customProtocolStack operation on the server.
Create custom protocol stack under /vport/protocolStack
customProtocolStack(Arg2:list, Arg3:enum)
Args:
args[0] is Arg2 (list(str)): List of plugin types to be added in the new custom stack
args[1] is Arg3 (str(kAppend|kMerge|kOverwrite)): Append, merge or overwrite existing protocol stack
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('customProtocolStack', payload=payload, response_object=None)
def DisableProtocolStack(self, *args, **kwargs):
"""Executes the disableProtocolStack operation on the server.
Disable a protocol under protocolStack using the class name
disableProtocolStack(Arg2:string)string
Args:
args[0] is Arg2 (str): Protocol class name to disable
Returns:
str: Status of the exec
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('disableProtocolStack', payload=payload, response_object=None)
def EnableProtocolStack(self, *args, **kwargs):
"""Executes the enableProtocolStack operation on the server.
Enable a protocol under protocolStack using the class name
enableProtocolStack(Arg2:string)string
Args:
args[0] is Arg2 (str): Protocol class name to enable
Returns:
str: Status of the exec
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('enableProtocolStack', payload=payload, response_object=None)
def PppoxCancel(self):
"""Executes the pppoxCancel operation on the server.
Cancel ending PPP operations
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
return self._execute('pppoxCancel', payload=payload, response_object=None)
def PppoxConfigure(self):
"""Executes the pppoxConfigure operation on the server.
Configure PPPoX protocol on selected ranges.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
return self._execute('pppoxConfigure', payload=payload, response_object=None)
def PppoxDeconfigure(self):
"""Executes the pppoxDeconfigure operation on the server.
Deconfigure PPPoX protocol on selected ranges.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
return self._execute('pppoxDeconfigure', payload=payload, response_object=None)
def PppoxPause(self, *args, **kwargs):
"""Executes the pppoxPause operation on the server.
Pause negotiation for PPP sessions in specified range
The IxNetwork modeling infrastructure allows for multiple method Signatures with the same name while python does not.
The following correlates the modeling Signatures to the python *args variable length list:
pppoxPause()
pppoxPause(Arg2:enum)
Args:
args[0] is Arg2 (str(async|sync)): kArray[kObjref=/vport/protocolStack/atm/pppox,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/atm/pppoxEndpoint,/vport/protocolStack/atm/pppoxEndpoint/range,/vport/protocolStack/ethernet/pppox,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/pppoxEndpoint,/vport/protocolStack/ethernet/pppoxEndpoint/range]
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('pppoxPause', payload=payload, response_object=None)
def PppoxResume(self, *args, **kwargs):
"""Executes the pppoxResume operation on the server.
Resume previously paused negotiation for PPP sessions in specified range
The IxNetwork modeling infrastructure allows for multiple method Signatures with the same name while python does not.
The following correlates the modeling Signatures to the python *args variable length list:
pppoxResume()
pppoxResume(Arg2:enum)
Args:
args[0] is Arg2 (str(async|sync)): kArray[kObjref=/vport/protocolStack/atm/pppox,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/atm/pppoxEndpoint,/vport/protocolStack/atm/pppoxEndpoint/range,/vport/protocolStack/ethernet/pppox,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/pppoxEndpoint,/vport/protocolStack/ethernet/pppoxEndpoint/range]
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('pppoxResume', payload=payload, response_object=None)
def PppoxRetry(self, *args, **kwargs):
"""Executes the pppoxRetry operation on the server.
Retry negotiating PPP sessions if specified range timed out
The IxNetwork modeling infrastructure allows for multiple method Signatures with the same name while python does not.
The following correlates the modeling Signatures to the python *args variable length list:
pppoxRetry()
pppoxRetry(Arg2:enum)
Args:
args[0] is Arg2 (str(async|sync)): kArray[kObjref=/vport/protocolStack/atm/pppox,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/atm/pppoxEndpoint,/vport/protocolStack/atm/pppoxEndpoint/range,/vport/protocolStack/ethernet/pppox,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/pppoxEndpoint,/vport/protocolStack/ethernet/pppoxEndpoint/range]
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('pppoxRetry', payload=payload, response_object=None)
def PppoxSendNdpRs(self, *args, **kwargs):
"""Executes the pppoxSendNdpRs operation on the server.
Send RS on NDP for IPv6 ports
The IxNetwork modeling infrastructure allows for multiple method Signatures with the same name while python does not.
The following correlates the modeling Signatures to the python *args variable length list:
pppoxSendNdpRs(Arg2:number)
Args:
args[0] is Arg2 (number): kArray[kObjref=/vport/protocolStack/atm/pppox,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/atm/pppoxEndpoint,/vport/protocolStack/atm/pppoxEndpoint/range,/vport/protocolStack/ethernet/pppox,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/pppoxEndpoint,/vport/protocolStack/ethernet/pppoxEndpoint/range]
pppoxSendNdpRs(Arg2:number, Arg3:enum)
Args:
args[0] is Arg2 (number): kArray[kObjref=/vport/protocolStack/atm/pppox,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/atm/pppoxEndpoint,/vport/protocolStack/atm/pppoxEndpoint/range,/vport/protocolStack/ethernet/pppox,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/pppoxEndpoint,/vport/protocolStack/ethernet/pppoxEndpoint/range]
args[1] is Arg3 (str(async|sync)): IPv6 NDP rate for NS messages.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('pppoxSendNdpRs', payload=payload, response_object=None)
def PppoxStart(self, *args, **kwargs):
"""Executes the pppoxStart operation on the server.
Negotiate PPP sessions for selected ranges
The IxNetwork modeling infrastructure allows for multiple method Signatures with the same name while python does not.
The following correlates the modeling Signatures to the python *args variable length list:
pppoxStart()
pppoxStart(Arg2:enum)
Args:
args[0] is Arg2 (str(async|sync)): kArray[kObjref=/vport/protocolStack/atm/pppox,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/atm/pppoxEndpoint,/vport/protocolStack/atm/pppoxEndpoint/range,/vport/protocolStack/ethernet/pppox,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/pppoxEndpoint,/vport/protocolStack/ethernet/pppoxEndpoint/range]
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('pppoxStart', payload=payload, response_object=None)
def PppoxStop(self, *args, **kwargs):
"""Executes the pppoxStop operation on the server.
Teardown PPP sessions for selected ranges
The IxNetwork modeling infrastructure allows for multiple method Signatures with the same name while python does not.
The following correlates the modeling Signatures to the python *args variable length list:
pppoxStop()
pppoxStop(Arg2:enum)
Args:
args[0] is Arg2 (str(async|sync)): kArray[kObjref=/vport/protocolStack/atm/pppox,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/atm/pppoxEndpoint,/vport/protocolStack/atm/pppoxEndpoint/range,/vport/protocolStack/ethernet/pppox,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/pppoxEndpoint,/vport/protocolStack/ethernet/pppoxEndpoint/range]
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('pppoxStop', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
"""Executes the start operation on the server.
Negotiate sessions for all protocols on all ranges belonging to selected plugins
The IxNetwork modeling infrastructure allows for multiple method Signatures with the same name while python does not.
The following correlates the modeling Signatures to the python *args variable length list:
start()
start(Arg2:enum)
Args:
args[0] is Arg2 (str(async|sync)): kArray[kObjref=/vport/protocolStack/atm,/vport/protocolStack/atm/dhcpEndpoint,/vport/protocolStack/atm/dhcpEndpoint/ancp,/vport/protocolStack/atm/dhcpEndpoint/range,/vport/protocolStack/atm/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/dhcpServerEndpoint,/vport/protocolStack/atm/dhcpServerEndpoint/range,/vport/protocolStack/atm/emulatedRouter,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/dhcpServerEndpoint,/vport/protocolStack/atm/emulatedRouter/dhcpServerEndpoint/range,/vport/protocolStack/atm/emulatedRouter/dhcpServerEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip,/vport/protocolStack/atm/emulatedRouter/ip/ancp,/vport/protocolStack/atm/emulatedRouter/ip/egtpEnbEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpEnbEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpEnbEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpEnbEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpEnbEndpoint/ueSecondaryRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpMmeEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpMmeEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpMmeEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpMmeEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/twampClient,/vport/protocolStack/atm/emulatedRouter/ip/twampServer,/vport/protocolStack/atm/emulatedRouter/ipEndpoint,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/twampClient,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/twampServer,/vport/protocolStack/atm/emulatedRouterEndpoint,/vport/protocolStack/atm/emulatedRouterEndpoint/range/amtRange,/vport/protocolStack/atm/ip,/vport/protocolStack/atm/ip/ancp,/vport/protocolStack/atm/ip/egtpEnbEndpoint,/vport/protocolStack/atm/ip/egtpEnbEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpEnbEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpEnbEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpEnbEndpoint/ueSecondaryRange,/vport/protocolStack/atm/ip/egtpMmeEndpoint,/vport/protocolStack/atm/ip/egtpMmeEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpMmeEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpMmeEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpUeEndpoint,/vport/protocolStack/atm/ip/egtpUeEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpUeEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/l2tp,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/l2tpEndpoint,/vport/protocolStack/atm/ip/l2tpEndpoint/range,/vport/protocolStack/atm/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/smDnsEndpoint,/vport/protocolStack/atm/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/twampClient,/vport/protocolStack/atm/ip/twampServer,/vport/protocolStack/atm/ipEndpoint,/vport/protocolStack/atm/ipEndpoint/ancp,/vport/protocolStack/atm/ipEndpoint/range/amtRange,/vport/protocolStack/atm/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/ipEndpoint/range/twampControlRange,/vport/protocolStack/atm/ipEndpoint/twampClient,/vport/protocolStack/atm/ipEndpoint/twampServer,/vport/protocolStack/atm/pppox,/vport/protocolStack/atm/pppox/ancp,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/pppoxEndpoint,/vport/protocolStack/atm/pppoxEndpoint/ancp,/vport/protocolStack/atm/pppoxEndpoint/range,/vport/protocolStack/atm/pppoxEndpoint/range/ancpRange,/vport/protocolStack/atm/pppoxEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/pppoxEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet,/vport/protocolStack/ethernet/dcbxEndpoint,/vport/protocolStack/ethernet/dcbxEndpoint/range,/vport/protocolStack/ethernet/dhcpEndpoint,/vport/protocolStack/ethernet/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/dhcpEndpoint/range,/vport/protocolStack/ethernet/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/dhcpServerEndpoint,/vport/protocolStack/ethernet/dhcpServerEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip,/vport/protocolStack/ethernet/emulatedRouter/ip/ancp,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpEnbEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpEnbEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpEnbEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpEnbEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpEnbEndpoint/ueSecondaryRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpMmeEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpMmeEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpMmeEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpMmeEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/twampClient,/vport/protocolStack/ethernet/emulatedRouter/ip/twampServer,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/twampClient,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/twampServer,/vport/protocolStack/ethernet/emulatedRouterEndpoint,/vport/protocolStack/ethernet/emulatedRouterEndpoint/range/amtRange,/vport/protocolStack/ethernet/esmc,/vport/protocolStack/ethernet/fcoeClientEndpoint,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFdiscRange,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFlogiRange,/vport/protocolStack/ethernet/fcoeFwdEndpoint,/vport/protocolStack/ethernet/fcoeFwdEndpoint/range,/vport/protocolStack/ethernet/fcoeFwdEndpoint/secondaryRange,/vport/protocolStack/ethernet/ip,/vport/protocolStack/ethernet/ip/ancp,/vport/protocolStack/ethernet/ip/egtpEnbEndpoint,/vport/protocolStack/ethernet/ip/egtpEnbEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpEnbEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpEnbEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpEnbEndpoint/ueSecondaryRange,/vport/protocolStack/ethernet/ip/egtpMmeEndpoint,/vport/protocolStack/ethernet/ip/egtpMmeEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpMmeEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpMmeEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpUeEndpoint,/vport/protocolStack/ethernet/ip/egtpUeEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpUeEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/l2tp,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/twampClient,/vport/protocolStack/ethernet/ip/twampServer,/vport/protocolStack/ethernet/ipEndpoint,/vport/protocolStack/ethernet/ipEndpoint/ancp,/vport/protocolStack/ethernet/ipEndpoint/range/amtRange,/vport/protocolStack/ethernet/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ipEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ipEndpoint/twampClient,/vport/protocolStack/ethernet/ipEndpoint/twampServer,/vport/protocolStack/ethernet/pppox,/vport/protocolStack/ethernet/pppox/ancp,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/pppoxEndpoint,/vport/protocolStack/ethernet/pppoxEndpoint/ancp,/vport/protocolStack/ethernet/pppoxEndpoint/range,/vport/protocolStack/ethernet/pppoxEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppoxEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/pppoxEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/vepaEndpoint,/vport/protocolStack/ethernet/vepaEndpoint/range,/vport/protocolStack/ethernetEndpoint,/vport/protocolStack/ethernetEndpoint/esmc,/vport/protocolStack/fcClientEndpoint,/vport/protocolStack/fcClientEndpoint/range,/vport/protocolStack/fcClientEndpoint/range,/vport/protocolStack/fcClientEndpoint/range/fcClientFdiscRange,/vport/protocolStack/fcClientEndpoint/range/fcClientFlogiRange,/vport/protocolStack/fcFportFwdEndpoint,/vport/protocolStack/fcFportFwdEndpoint/range,/vport/protocolStack/fcFportFwdEndpoint/secondaryRange]
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def StaticHostsStart(self, *args, **kwargs):
"""Executes the staticHostsStart operation on the server.
Negotiate StaticHosts for selected plugins and ranges
The IxNetwork modeling infrastructure allows for multiple method Signatures with the same name while python does not.
The following correlates the modeling Signatures to the python *args variable length list:
staticHostsStart()
staticHostsStart(Arg2:enum)
Args:
args[0] is Arg2 (str(async|sync)): kArray[kObjref=/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range]
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('staticHostsStart', payload=payload, response_object=None)
def StaticHostsStop(self, *args, **kwargs):
"""Executes the staticHostsStop operation on the server.
Release StaticHosts for selected plugins and ranges
The IxNetwork modeling infrastructure allows for multiple method Signatures with the same name while python does not.
The following correlates the modeling Signatures to the python *args variable length list:
staticHostsStop()
staticHostsStop(Arg2:enum)
Args:
args[0] is Arg2 (str(async|sync)): kArray[kObjref=/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range]
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('staticHostsStop', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
"""Executes the stop operation on the server.
Teardown sessions for all protocols on all ranges belonging to selected plugins
The IxNetwork modeling infrastructure allows for multiple method Signatures with the same name while python does not.
The following correlates the modeling Signatures to the python *args variable length list:
stop()
stop(Arg2:enum)
Args:
args[0] is Arg2 (str(async|sync)): kArray[kObjref=/vport/protocolStack/atm,/vport/protocolStack/atm/dhcpEndpoint,/vport/protocolStack/atm/dhcpEndpoint/ancp,/vport/protocolStack/atm/dhcpEndpoint/range,/vport/protocolStack/atm/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/dhcpServerEndpoint,/vport/protocolStack/atm/dhcpServerEndpoint/range,/vport/protocolStack/atm/emulatedRouter,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/dhcpServerEndpoint,/vport/protocolStack/atm/emulatedRouter/dhcpServerEndpoint/range,/vport/protocolStack/atm/emulatedRouter/dhcpServerEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip,/vport/protocolStack/atm/emulatedRouter/ip/ancp,/vport/protocolStack/atm/emulatedRouter/ip/egtpEnbEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpEnbEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpEnbEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpEnbEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpEnbEndpoint/ueSecondaryRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpMmeEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpMmeEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpMmeEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpMmeEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/twampClient,/vport/protocolStack/atm/emulatedRouter/ip/twampServer,/vport/protocolStack/atm/emulatedRouter/ipEndpoint,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/twampClient,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/twampServer,/vport/protocolStack/atm/emulatedRouterEndpoint,/vport/protocolStack/atm/emulatedRouterEndpoint/range/amtRange,/vport/protocolStack/atm/ip,/vport/protocolStack/atm/ip/ancp,/vport/protocolStack/atm/ip/egtpEnbEndpoint,/vport/protocolStack/atm/ip/egtpEnbEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpEnbEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpEnbEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpEnbEndpoint/ueSecondaryRange,/vport/protocolStack/atm/ip/egtpMmeEndpoint,/vport/protocolStack/atm/ip/egtpMmeEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpMmeEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpMmeEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpUeEndpoint,/vport/protocolStack/atm/ip/egtpUeEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpUeEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/l2tp,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/l2tpEndpoint,/vport/protocolStack/atm/ip/l2tpEndpoint/range,/vport/protocolStack/atm/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/smDnsEndpoint,/vport/protocolStack/atm/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/twampClient,/vport/protocolStack/atm/ip/twampServer,/vport/protocolStack/atm/ipEndpoint,/vport/protocolStack/atm/ipEndpoint/ancp,/vport/protocolStack/atm/ipEndpoint/range/amtRange,/vport/protocolStack/atm/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/ipEndpoint/range/twampControlRange,/vport/protocolStack/atm/ipEndpoint/twampClient,/vport/protocolStack/atm/ipEndpoint/twampServer,/vport/protocolStack/atm/pppox,/vport/protocolStack/atm/pppox/ancp,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/pppoxEndpoint,/vport/protocolStack/atm/pppoxEndpoint/ancp,/vport/protocolStack/atm/pppoxEndpoint/range,/vport/protocolStack/atm/pppoxEndpoint/range/ancpRange,/vport/protocolStack/atm/pppoxEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/pppoxEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet,/vport/protocolStack/ethernet/dcbxEndpoint,/vport/protocolStack/ethernet/dcbxEndpoint/range,/vport/protocolStack/ethernet/dhcpEndpoint,/vport/protocolStack/ethernet/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/dhcpEndpoint/range,/vport/protocolStack/ethernet/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/dhcpServerEndpoint,/vport/protocolStack/ethernet/dhcpServerEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip,/vport/protocolStack/ethernet/emulatedRouter/ip/ancp,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpEnbEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpEnbEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpEnbEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpEnbEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpEnbEndpoint/ueSecondaryRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpMmeEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpMmeEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpMmeEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpMmeEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/twampClient,/vport/protocolStack/ethernet/emulatedRouter/ip/twampServer,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/twampClient,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/twampServer,/vport/protocolStack/ethernet/emulatedRouterEndpoint,/vport/protocolStack/ethernet/emulatedRouterEndpoint/range/amtRange,/vport/protocolStack/ethernet/esmc,/vport/protocolStack/ethernet/fcoeClientEndpoint,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFdiscRange,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFlogiRange,/vport/protocolStack/ethernet/fcoeFwdEndpoint,/vport/protocolStack/ethernet/fcoeFwdEndpoint/range,/vport/protocolStack/ethernet/fcoeFwdEndpoint/secondaryRange,/vport/protocolStack/ethernet/ip,/vport/protocolStack/ethernet/ip/ancp,/vport/protocolStack/ethernet/ip/egtpEnbEndpoint,/vport/protocolStack/ethernet/ip/egtpEnbEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpEnbEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpEnbEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpEnbEndpoint/ueSecondaryRange,/vport/protocolStack/ethernet/ip/egtpMmeEndpoint,/vport/protocolStack/ethernet/ip/egtpMmeEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpMmeEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpMmeEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpUeEndpoint,/vport/protocolStack/ethernet/ip/egtpUeEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpUeEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/l2tp,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/twampClient,/vport/protocolStack/ethernet/ip/twampServer,/vport/protocolStack/ethernet/ipEndpoint,/vport/protocolStack/ethernet/ipEndpoint/ancp,/vport/protocolStack/ethernet/ipEndpoint/range/amtRange,/vport/protocolStack/ethernet/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ipEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ipEndpoint/twampClient,/vport/protocolStack/ethernet/ipEndpoint/twampServer,/vport/protocolStack/ethernet/pppox,/vport/protocolStack/ethernet/pppox/ancp,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/pppoxEndpoint,/vport/protocolStack/ethernet/pppoxEndpoint/ancp,/vport/protocolStack/ethernet/pppoxEndpoint/range,/vport/protocolStack/ethernet/pppoxEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppoxEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/pppoxEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/vepaEndpoint,/vport/protocolStack/ethernet/vepaEndpoint/range,/vport/protocolStack/ethernetEndpoint,/vport/protocolStack/ethernetEndpoint/esmc,/vport/protocolStack/fcClientEndpoint,/vport/protocolStack/fcClientEndpoint/range,/vport/protocolStack/fcClientEndpoint/range,/vport/protocolStack/fcClientEndpoint/range/fcClientFdiscRange,/vport/protocolStack/fcClientEndpoint/range/fcClientFlogiRange,/vport/protocolStack/fcFportFwdEndpoint,/vport/protocolStack/fcFportFwdEndpoint/range,/vport/protocolStack/fcFportFwdEndpoint/secondaryRange]
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
| [
"[email protected]"
] | |
b2b0bcb274470509901337f8aafa2e13ec47fc33 | 278d7f4467a112416d1adfbcd3218033ff0fd9b3 | /configs/gfl/gfl_r50_fpn_mstrain_2x_coco.py | ace084ddd452aab6f3f34cc7bc60057b5b0e2962 | [] | no_license | Young-1217/detection | e3d67938b454e955b5b7a82d5ae222e62f9545fb | 6760288dac92e00ddc3e813ed0e1363c1fa1ce2d | refs/heads/main | 2023-06-01T21:41:37.998947 | 2021-06-21T10:03:01 | 2021-06-21T10:03:01 | 371,868,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | _base_ = './gfl_r50_fpn_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 22])
total_epochs = 24
# multi-scale training
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 480), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
data = dict(train=dict(pipeline=train_pipeline))
| [
"[email protected]"
] | |
c56df2a10e85a364cc8f8a5be1347de8cc461462 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_068/ch152_2020_06_21_21_06_14_910169.py | 9ecba31ee2d4056ecb74c2d89f3f12ef2d768425 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | def verifica_preco(nome, n_cor, c_preco):
cor = n_cor[nome]
preco = c_preco[cor]
return preco | [
"[email protected]"
] | |
90d7e06f3f8f8cb42fee6802b20f759e7005b963 | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Sklearn_scipy_numpy/source/sklearn/ensemble/weight_boosting.py | 9921afb4c5eddd2d815d5a2d70818839b8f59e9b | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 40,733 | py | """Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <[email protected]>
# Gilles Louppe <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# Arnaud Joly <[email protected]>
#
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin, is_regressor
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype,
y_numeric=is_regressor(self))
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overriden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
| [
"[email protected]"
] | |
f4c5184a6cca72d60a9a2cf2d98f15f9d2314811 | f907f8ce3b8c3b203e5bb9d3be012bea51efd85f | /around_square.py | b2b414eff97fad0c386ab0b4c9d138fc545e7555 | [] | no_license | KohsukeKubota/Atcoder-practice | 3b4b986395551443f957d1818d6f9a0bf6132e90 | 52554a2649445c2760fc3982e722854fed5b8ab1 | refs/heads/master | 2020-08-26T15:17:29.344402 | 2019-10-26T11:14:24 | 2019-10-26T11:14:24 | 217,052,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | import math
N = int(input())
res = 0
for i in range(int(math.sqrt(N))+1):
val = i**2
if val > res:
res = val
print(res)
| [
"[email protected]"
] | |
02e4fbe4535d2b7c0983b305399c7b442082d716 | 251d56a94b0d879a07a3d47a41f21258fa452a1f | /soqt/lilac.py | 4127feb783bb69597b471987d0d06cc245cd1b83 | [] | no_license | paroque28/arch4edu | d9bb5f5af008989454fe71677621149ae45c58cc | 24df17749cf556ed668c3a886a698ecbdcca211c | refs/heads/master | 2020-12-26T13:33:43.987122 | 2020-01-27T16:41:41 | 2020-01-27T16:41:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | #!/usr/bin/env python3
from lilaclib import *
maintainers = [{'github': 'petronny'}]
update_on = [{'aur': None}]
repo_depends = ['coin']
build_prefix = 'extra-x86_64'
pre_build = aur_pre_build
post_build = aur_post_build
if __name__ == '__main__':
single_main(build_prefix)
| [
"[email protected]"
] | |
e007362f985c8d2e6793d6f6d5e7ba3a6cdecbdd | d6254d3a0996d7977816c167bc2af76677a52b87 | /bigsi/cmds/search.py | cdbdd27ce5975e51c6880a3a79b9cbab407014af | [
"MIT"
] | permissive | rpetit3/BIGSI | f95c57a58e4ccfdd3d098737d76962a44565163e | d3e9a310e6c91c887d7917ced5609b6002a67623 | refs/heads/master | 2020-04-03T11:30:04.683289 | 2018-06-08T14:47:33 | 2018-06-08T14:47:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,634 | py | #! /usr/bin/env python
from __future__ import print_function
# from bigsi.utils import min_lexo
from bigsi.utils import seq_to_kmers
from bigsi.graph import BIGSI as Graph
import argparse
import os.path
import time
from Bio import SeqIO
import json
import logging
import sys
logger = logging.getLogger(__name__)
from bigsi.utils import DEFAULT_LOGGING_LEVEL
logger.setLevel(DEFAULT_LOGGING_LEVEL)
import operator
from bigsi.utils import convert_query_kmer
def per(i):
return float(sum(i))/len(i)
def parse_input(infile):
gene_to_kmers = {}
with open(infile, 'r') as inf:
for record in SeqIO.parse(inf, 'fasta'):
gene_to_kmers[record.id] = str(record.seq)
yield (record.id, str(record.seq))
# return gene_to_kmers
def _search(gene_name, seq, results, threshold, graph, output_format="json", pipe=False, score=False):
if pipe:
if output_format == "tsv":
start = time.time()
result = graph.search(seq, threshold=threshold, score=score)
diff = time.time() - start
if result:
for sample_id, percent in result.items():
print(
"\t".join([gene_name, sample_id, str(round(percent["percent_kmers_found"], 2)), str(round(diff, 2))]))
else:
print("\t".join([gene_name, "NA", str(0), str(diff)]))
elif output_format == "fasta":
samples = graph.sample_to_colour_lookup.keys()
print(" ".join(['>', gene_name]))
print(seq)
result = graph.search(seq, threshold=threshold, score=score)
result = sorted(
result.items(), key=operator.itemgetter(1), reverse=True)
for sample, percent in result:
percent = round(percent * 100, 2)
colour = int(graph.sample_to_colour_lookup.get(sample))
print(
" ".join(['>', gene_name, sample, "kmer-%i coverage %f" % (graph.kmer_size, percent)]))
presence = []
for kmer in seq_to_kmers(seq, graph.kmer_size):
kmer_presence = graph.graph.lookup(
convert_query_kmer(kmer))[colour]
sys.stdout.write(str(int(kmer_presence)))
sys.stdout.write('\n')
else:
result = {}
start = time.time()
result['results'] = graph.search(
seq, threshold=threshold, score=score)
diff = time.time() - start
result['time'] = diff
print(json.dumps({gene_name: result}))
else:
results[gene_name] = {}
start = time.time()
results[gene_name]['results'] = graph.search(
seq, threshold=threshold, score=score)
diff = time.time() - start
results[gene_name]['time'] = diff
return results
def search(seq, fasta_file, threshold, graph, output_format="json", pipe=False, score=False):
if output_format == "tsv":
print("\t".join(
["gene_name", "sample_id", str("kmer_coverage_percent"), str("time")]))
results = {}
if fasta_file is not None:
for gene, seq in parse_input(fasta_file):
results = _search(
gene_name=gene, seq=seq, results=results, threshold=threshold,
graph=graph, output_format=output_format, pipe=pipe, score=score)
else:
results = _search(
gene_name=seq, seq=seq, results=results, threshold=threshold,
graph=graph, output_format=output_format, pipe=pipe, score=score)
return results
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.