blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
38c664a5f633c233fdbe24e57b1a41e9ed27b39a | 74b97e20b06a58ada94278f82ce511403fcddf21 | /test/scenarios/synapse/output/extflatten/src/synapse/azext_synapse/vendored_sdks/synapse/aio/operations/_sql_pool_vulnerability_assessment_operations.py | 4f82fc427e879b6c9c0ea7004f96261f2bbff7a8 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/autorest.az | b171eb495efdb815dc051dface3800c3e5e35b8e | 64f403a5fe74be28e46a90b6b77f8d2bc9a12baf | refs/heads/master | 2023-09-01T13:22:21.784354 | 2022-11-01T02:34:12 | 2022-11-01T02:34:12 | 226,059,721 | 24 | 17 | MIT | 2023-02-08T00:46:07 | 2019-12-05T09:04:00 | Python | UTF-8 | Python | false | false | 18,864 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SQLPoolVulnerabilityAssessmentOperations:
"""SQLPoolVulnerabilityAssessmentOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~synapse_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
**kwargs
) -> AsyncIterable["models.SQLPoolVulnerabilityAssessmentListResult"]:
"""Lists the vulnerability assessment policies associated with a SQL pool.
Lists the vulnerability assessment policies associated with a SQL pool.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SQLPoolVulnerabilityAssessmentListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~synapse_management_client.models.SQLPoolVulnerabilityAssessmentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SQLPoolVulnerabilityAssessmentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'sqlPoolName': self._serialize.url("sql_pool_name", sql_pool_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SQLPoolVulnerabilityAssessmentListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/vulnerabilityAssessments'} # type: ignore
async def get(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
vulnerability_assessment_name: Union[str, "models.VulnerabilityAssessmentName"],
**kwargs
) -> "models.SQLPoolVulnerabilityAssessment":
"""Gets the Sql pool's vulnerability assessment.
Gets the Sql pool's vulnerability assessment.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:param vulnerability_assessment_name: The name of the vulnerability assessment.
:type vulnerability_assessment_name: str or ~synapse_management_client.models.VulnerabilityAssessmentName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SQLPoolVulnerabilityAssessment, or the result of cls(response)
:rtype: ~synapse_management_client.models.SQLPoolVulnerabilityAssessment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SQLPoolVulnerabilityAssessment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'sqlPoolName': self._serialize.url("sql_pool_name", sql_pool_name, 'str'),
'vulnerabilityAssessmentName': self._serialize.url("vulnerability_assessment_name", vulnerability_assessment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SQLPoolVulnerabilityAssessment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
vulnerability_assessment_name: Union[str, "models.VulnerabilityAssessmentName"],
storage_container_path: Optional[str] = None,
storage_container_sas_key: Optional[str] = None,
storage_account_access_key: Optional[str] = None,
recurring_scans: Optional["models.VulnerabilityAssessmentRecurringScansProperties"] = None,
**kwargs
) -> "models.SQLPoolVulnerabilityAssessment":
"""Creates or updates the Sql pool vulnerability assessment.
Creates or updates the Sql pool vulnerability assessment.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:param vulnerability_assessment_name: The name of the vulnerability assessment.
:type vulnerability_assessment_name: str or ~synapse_management_client.models.VulnerabilityAssessmentName
:param storage_container_path: A blob storage container path to hold the scan results (e.g.
https://myStorage.blob.core.windows.net/VaScans/). It is required if server level
vulnerability assessment policy doesn't set.
:type storage_container_path: str
:param storage_container_sas_key: A shared access signature (SAS Key) that has write access to
the blob container specified in 'storageContainerPath' parameter. If 'storageAccountAccessKey'
isn't specified, StorageContainerSasKey is required.
:type storage_container_sas_key: str
:param storage_account_access_key: Specifies the identifier key of the storage account for
vulnerability assessment scan results. If 'StorageContainerSasKey' isn't specified,
storageAccountAccessKey is required.
:type storage_account_access_key: str
:param recurring_scans: The recurring scans settings.
:type recurring_scans: ~synapse_management_client.models.VulnerabilityAssessmentRecurringScansProperties
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SQLPoolVulnerabilityAssessment, or the result of cls(response)
:rtype: ~synapse_management_client.models.SQLPoolVulnerabilityAssessment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SQLPoolVulnerabilityAssessment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
parameters = models.SQLPoolVulnerabilityAssessment(storage_container_path=storage_container_path, storage_container_sas_key=storage_container_sas_key, storage_account_access_key=storage_account_access_key, recurring_scans=recurring_scans)
api_version = "2019-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'sqlPoolName': self._serialize.url("sql_pool_name", sql_pool_name, 'str'),
'vulnerabilityAssessmentName': self._serialize.url("vulnerability_assessment_name", vulnerability_assessment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SQLPoolVulnerabilityAssessment')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SQLPoolVulnerabilityAssessment', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SQLPoolVulnerabilityAssessment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
vulnerability_assessment_name: Union[str, "models.VulnerabilityAssessmentName"],
**kwargs
) -> None:
"""Removes the database's vulnerability assessment.
Removes the database's vulnerability assessment.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:param vulnerability_assessment_name: The name of the vulnerability assessment.
:type vulnerability_assessment_name: str or ~synapse_management_client.models.VulnerabilityAssessmentName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'sqlPoolName': self._serialize.url("sql_pool_name", sql_pool_name, 'str'),
'vulnerabilityAssessmentName': self._serialize.url("vulnerability_assessment_name", vulnerability_assessment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}'} # type: ignore
| [
"[email protected]"
] | |
57895863065f914aab45070167a5407a01d68969 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_hashish.py | 6f095d1a7cb0b473df03c662bf7368e60180a1e3 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py |
#calss header
class _HASHISH():
def __init__(self,):
self.name = "HASHISH"
self.definitions = [u'a drug, illegal in many countries, made from the cannabis plant and usually smoked']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
61f25521a8ac9bb244b7920b6aad006c7e980101 | 04fcaa42f982bc6c8de1d3a28e83007a5b8b000d | /tests/tests_indiv_jobs/test_ooziejob.py | 31743f67165d76004d2470b532d5a8319dbf825d | [
"BSD-3-Clause"
] | permissive | tadinve/naga | 26622416db7ff81a256a2f51daac0769763ed711 | 52a789ff79cc20aa999f7bb731a1c3cc3acc27fa | refs/heads/main | 2023-08-12T22:45:19.876256 | 2021-09-24T02:15:42 | 2021-09-24T02:15:42 | 389,231,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,583 | py | from ctm_python_client.jobs.hadoop.oozie import OozieJob
import os
from ctm_python_client.core.bmc_control_m import CmJobFlow
from ctm_python_client.session.session import Session
BASE_PATH = os.path.abspath(os.path.dirname(__file__))
with open(BASE_PATH + "/.secrets", "r") as fp:
ctm_uri = fp.readline().strip()
ctm_user = fp.readline().strip()
ctm_pwd = fp.readline().strip()
# Create CTM Session
session = Session(endpoint=ctm_uri, username=ctm_user, password=ctm_pwd)
# CREATE JOB FLOW
t1_flow = CmJobFlow(
application="Naga0.3_Test", sub_application="TestAllJobs", session=session
)
t1_flow.set_run_as(username="ctmuser", host="acb-rhctmv20")
# Define the schedule
months = ["JAN", "OCT", "DEC"]
monthDays = ["ALL"]
weekDays = ["MON", "TUE", "WED", "THU", "FRI"]
fromTime = "0300"
toTime = "2100"
t1_flow.set_schedule(months, monthDays, weekDays, fromTime, toTime)
# Create Folder
fn = os.path.split(__file__)[-1][:-3]
f1 = t1_flow.create_folder(name=fn)
j1 = OozieJob(
folder=f1,
job_name='oozie',
host="edgenode",
connection_profile="DEV_CLUSTER",
job_properties_file="/home/user/job.properties",
oozie_options=[{'inputDir': '/usr/tucu/inputdir'}, {'outputDir': '/usr/tucu/outputdir'}],
)
t1_flow.add_job(folder=f1, job=j1)
import json
x = t1_flow.deploy()
s = str(x[0])
s = s.replace("'", '"')
s = s.replace("None", '"None"')
s = s.replace("False", '"False"')
s = s.replace("True", '"True"')
s = s.replace("\n", "")
j = json.loads(s)
def test_output():
assert j["successful_smart_folders_count"] == 1
| [
"[email protected]"
] | |
99a71c7b9e27e3157cd4de85ab83cc05e523d4bc | d902ac93fbff644ca2868d5836a9f476f3cd91fd | /wq_csv/util.py | 6bbe34d6c95cb08f37c53383ded1fea23ec6d5ee | [] | no_license | NMWDI/WDIExtractors | 5b3c2ad3562449ba6c3c2467284c9cd3e046837f | 2da9aa7a1bd53d58ff7479f4507fffbc15b3bbb2 | refs/heads/master | 2022-12-24T17:19:35.998182 | 2020-10-02T19:22:48 | 2020-10-02T19:22:48 | 265,703,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,162 | py | # ===============================================================================
# Copyright 2020 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
import re
import tempfile
from datetime import datetime
import yaml
from itertools import groupby
from operator import attrgetter, itemgetter
NO_DESCRIPTION = 'No Description Available'
import logging
logger = logging.getLogger('Parser')
logger.setLevel(logging.DEBUG)
def rows_to_yaml(location_name, path, items, wq_tag):
longitude_key = 'Longitude'
latitude_key = 'Latitude'
items = list(items)
item = items[0]
logger.debug('items {}'.format(items))
logger.debug('item {}'.format(item))
try:
wd = float(item['WellDepth'])
except:
wd = ''
obj = {'location': {'name': location_name, 'description': NO_DESCRIPTION},
'sensor': {'name': 'Analytical Water Chemistry', 'description': NO_DESCRIPTION},
'thing': {'name': 'WaterQuality',
'properties': {'welldepth': wd,
'datasource': item['DataSource']},
'description': NO_DESCRIPTION},
'datastream': {'name': '{} Water Quality Datastream'.format(wq_tag), 'description': NO_DESCRIPTION},
'observed_property': {'name': wq_tag, 'description': NO_DESCRIPTION}}
loc = obj['location']
loc['geometry'] = {'type': 'Point', 'coordinates': [float(item[longitude_key]), float(item[latitude_key])]}
ds = obj['datastream']
ds['unitofMeasurement'] = 'ppm'
result = item[wq_tag]
ds['observationType'] = get_observation_type(result)
def obsfactory(i):
pt = i['CollectionDate']
pt = datetime.strptime(pt, '%Y-%m-%d %H:%M:%S.%f')
return '{}.000Z, {}'.format(pt.isoformat(), i[wq_tag])
obj['observations'] = [obsfactory(item) for item in items]
with open(path, 'w') as wf:
yaml.dump(obj, wf)
DOUBLE = re.compile(r'^-?\d+.\d+')
BOOL = re.compile(r'^true|false|t|f')
URI = re.compile(r'^http')
INT = re.compile(r'^-?\d+$')
def get_observation_type(value):
for res, oti in ((DOUBLE, 'double'),
(BOOL, 'bool'),
(URI, 'uri'),
(INT, 'integer')):
if res.match(value.strip().lower()):
ot = oti
break
else:
ot = 'any'
return ot
class Parser:
def __enter__(self):
self._tempdir = tempfile.mkdtemp()
# self._tempdir = '/Users/ross/Sandbox/wdi/csvextractor'
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
# os.removedirs(self._tempdir)
def items(self, inputfile):
"""
read inputfile as csv, convert to a list of yamls
WQ_XXX.csv example headers
WQ_Arsenic
POINT_ID,CollectionDate,HistoricDate,Arsenic,Latitude,Longitude,WellDepth,DataSource,
DataSourceInfo,Arsenic_Symbol,GeoLocation
:param inputfile:
:return: list of paths
"""
delimiter = ','
with open(inputfile, 'r') as rf:
rows = []
header = None
for line in rf:
row = line.split(delimiter)
if header is None:
header = [r.strip() for r in row]
continue
row = dict(zip(header, [r.strip() for r in row]))
rows.append(row)
# determine wq_tag from header
wq_tag = next((n for n in ('Arsenic', 'HCO3',
'Ca', 'Cl', 'F', 'Mg', 'Na',
'SO4', 'TDS', 'U') if n in header), None)
for location_name, items in groupby(sorted(rows,
key=itemgetter('POINT_ID')),
key=itemgetter('POINT_ID')):
location_name = location_name.replace(' ', '_')
name = '{}.yaml'.format(location_name)
tmpfile = os.path.join(self._tempdir, name)
rows_to_yaml(location_name, tmpfile, items, wq_tag)
yield tmpfile
# if __name__ == '__main__':
# with Parser() as p:
# for i in p.items('/Users/ross/Programming/wdidata/wq_arsenic.csv'):
# print('isda', i)
# with open(i, 'r') as rfile:
# obj = yaml.load(rfile, Loader=yaml.SafeLoader)
# print('asd', obj)
# break
# ============= EOF =============================================
| [
"[email protected]"
] | |
196a7d3700530cf459ab672120d0ffb207717998 | c2d5055a7b292d18facce55d975ff8f9a19b5c39 | /examples/list_uarts.py | cbacc1f1a56b6a6c280b9dcd6a600aa50896ddb8 | [
"MIT"
] | permissive | playi/Adafruit_Python_BluefruitLE | 11f6c268436ebf1eb554fdfcf3a58eac0d01816e | 928669aff263b6602365ecfea2a1efe1950c111c | refs/heads/master | 2021-04-18T07:20:13.684162 | 2018-06-04T18:04:26 | 2018-06-04T18:04:26 | 126,250,975 | 2 | 3 | MIT | 2018-08-01T20:06:52 | 2018-03-21T23:04:06 | Python | UTF-8 | Python | false | false | 2,515 | py | # Search for BLE UART devices and list all that are found.
# Author: Tony DiCola
import atexit
import time
import Adafruit_BluefruitLE
from Adafruit_BluefruitLE.services import UART
# Get the BLE provider for the current platform.
ble = Adafruit_BluefruitLE.get_provider()
# Main function implements the program logic so it can run in a background
# thread. Most platforms require the main thread to handle GUI events and other
# asyncronous events like BLE actions. All of the threading logic is taken care
# of automatically though and you just need to provide a main function that uses
# the BLE provider.
def main():
# Clear any cached data because both bluez and CoreBluetooth have issues with
# caching data and it going stale.
ble.clear_cached_data()
# Get the first available BLE network adapter and make sure it's powered on.
adapter = ble.get_default_adapter()
adapter.power_on()
print('Using adapter: {0}'.format(adapter.name))
# Start scanning with the bluetooth adapter.
adapter.start_scan()
# Use atexit.register to call the adapter stop_scan function before quiting.
# This is good practice for calling cleanup code in this main function as
# a try/finally block might not be called since this is a background thread.
atexit.register(adapter.stop_scan)
print('Searching for UART devices...')
print('Press Ctrl-C to quit (will take ~30 seconds on OSX).')
# Enter a loop and print out whenever a new UART device is found.
known_uarts = set()
while True:
# Call UART.find_devices to get a list of any UART devices that
# have been found. This call will quickly return results and does
# not wait for devices to appear.
found = set(UART.find_devices())
# Check for new devices that haven't been seen yet and print out
# their name and ID (MAC address on Linux, GUID on OSX).
new = found - known_uarts
for device in new:
print('Found UART: {0} [{1}]'.format(device.name, device.id))
known_uarts.update(new)
# Sleep for a second and see if new devices have appeared.
time.sleep(1.0)
# Initialize the BLE system. MUST be called before other BLE calls!
ble.initialize()
# Start the mainloop to process BLE events, and run the provided function in
# a background thread. When the provided main function stops running, returns
# an integer status code, or throws an error the program will exit.
ble.run_mainloop_with(main)
| [
"[email protected]"
] | |
46691ee0ff111b983af457bc01fe09ae5a1b1391 | a5a6d753ebebec4ef31115424c932cbc167da83f | /models/resnext/paths.py | f382ef7737dfbb5bad6ea5da64078fa183756b11 | [
"MIT"
] | permissive | XixiJin/depth-distillation | 3bdf897eaa6b9dd725c6e1370e8d3d8572c23f23 | fd53bceda4f317a5ed3832a2932a1eeb7eeb456b | refs/heads/master | 2023-04-12T20:36:54.199498 | 2021-05-10T03:21:11 | 2021-05-10T03:21:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | resnext_101_32_path = 'resnext_101_32x4d.pth' | [
"[email protected]"
] | |
1340eb68406fbef4445d5ace94fbe286d31eeb2b | 06fb976887431bb1091080e29fcd96e3a03c234f | /scripts/train_vae.py | 238990d5876de0e5831fdd201f052fefafa9f450 | [] | no_license | fbkarsdorp/seqmod | ec07538dda41967cfcefcaafc8676bff6f301f89 | 41be103717635187d8bc7862f1b8b24e4b844321 | refs/heads/master | 2021-08-23T01:52:27.485158 | 2017-12-02T08:46:36 | 2017-12-02T08:46:36 | 112,826,621 | 0 | 0 | null | 2017-12-02T08:39:57 | 2017-12-02T08:39:57 | null | UTF-8 | Python | false | false | 8,411 | py |
import os
import argparse
import torch
from torch.autograd import Variable
from seqmod import utils as u
from seqmod.misc.loggers import StdLogger, VisdomLogger
from seqmod.misc.optimizer import Optimizer
from seqmod.misc.preprocess import text_processor
from seqmod.misc.dataset import PairedDataset, Dict
from seqmod.misc.trainer import Trainer
from seqmod.modules.vae import SequenceVAE
from seqmod.loaders import load_twisty, load_dataset
from w2v import load_embeddings
def kl_weight_hook(trainer, epoch, batch, checkpoints):
trainer.log("info", "kl weight: [%g]" % trainer.kl_weight)
def make_generate_hook(target="This is just a tweet and not much more", n=5):
def hook(trainer, epoch, batch, checkpoints):
d = trainer.datasets['train'].d['src']
inp = torch.LongTensor([d.index(i) for i in target.split()])
inp = Variable(inp, volatile=True).unsqueeze(1)
z_params = trainer.model.encode(inp)
for hyp_num in range(1, n + 1):
score, hyp = trainer.model.generate(z_params=z_params)
trainer.log("info", u.format_hyp(score[0], hyp[0], hyp_num, d))
return hook
def load_lines(path, processor=text_processor()):
lines = []
with open(os.path.expanduser(path)) as f:
for line in f:
line = line.strip()
if processor is not None:
line = processor(line)
if line:
lines.append(line)
return lines
def load_from_lines(path, batch_size, max_size=1000000, min_freq=5,
gpu=False, shuffle=True, **kwargs):
lines = load_lines(path)
ldict = Dict(pad_token=u.PAD, eos_token=u.EOS, bos_token=u.BOS,
max_size=max_size, min_freq=min_freq).fit(lines)
return PairedDataset(
lines, None, {'src': ldict}, batch_size, gpu=gpu
).splits(shuffle=shuffle, **kwargs)
def load_penn(path, batch_size, max_size=1000000, min_freq=1,
gpu=False, shuffle=True):
train_data = load_lines(os.path.join(path, 'train.txt'))
valid_data = load_lines(os.path.join(path, 'valid.txt'))
test_data = load_lines(os.path.join(path, 'test.txt'))
d = Dict(pad_token=u.PAD, eos_token=u.EOS, bos_token=u.BOS,
max_size=max_size, min_freq=min_freq)
d.fit(train_data, valid_data)
train = PairedDataset(
train_data, None, {'src': d}, batch_size, gpu=gpu)
valid = PairedDataset(
valid_data, None, {'src': d}, batch_size, gpu=gpu, evaluation=True)
test = PairedDataset(
test_data, None, {'src': d}, batch_size, gpu=gpu, evaluation=True)
return train.sort_(), valid.sort_(), test.sort_()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# model
parser.add_argument('--num_layers', default=1, type=int)
parser.add_argument('--emb_dim', default=50, type=int)
parser.add_argument('--hid_dim', default=50, type=int)
parser.add_argument('--z_dim', default=50, type=int)
parser.add_argument('--cell', default='LSTM')
parser.add_argument('--tie_weights', action='store_true')
parser.add_argument('--project_init', action='store_true')
parser.add_argument('--dropout', default=0.0, type=float)
parser.add_argument('--word_dropout', default=0.0, type=float)
parser.add_argument('--add_z', action='store_true')
parser.add_argument('--load_embeddings', action='store_true')
parser.add_argument('--flavor', default=None)
parser.add_argument('--suffix', default=None)
# training
parser.add_argument('--optim', default='RMSprop')
parser.add_argument('--lr', default=0.01, type=float)
parser.add_argument('--max_norm', default=5., type=float)
parser.add_argument('--weight_decay', default=0, type=float)
parser.add_argument('--lr_decay', default=0.85, type=float)
parser.add_argument('--start_decay_at', default=1, type=int)
parser.add_argument('--inflection_point', default=10000, type=int)
parser.add_argument('--epochs', default=10, type=int)
parser.add_argument('--batch_size', type=int, default=564)
parser.add_argument('--gpu', action='store_true')
parser.add_argument('--outputfile', default=None)
parser.add_argument('--checkpoints', default=100, type=int)
# dataset
parser.add_argument('--source', required=True)
parser.add_argument('--source_path')
parser.add_argument('--dev', default=0.1, type=float)
parser.add_argument('--test', default=0.2, type=float)
parser.add_argument('--min_len', default=0, type=int)
parser.add_argument('--min_freq', default=5, type=int)
parser.add_argument('--max_size', default=50000, type=int)
parser.add_argument('--level', default='token')
parser.add_argument('--concat', action='store_true')
parser.add_argument('--cache_data', action='store_true')
args = parser.parse_args()
prefix = '{source}.{level}.{min_len}.{min_freq}.{concat}.{max_size}' \
.format(**vars(args))
print("Loading data...")
# preprocess
if not args.cache_data or not os.path.isfile('data/%s_train.pt' % prefix):
if args.source == 'twisty':
src, trg = load_twisty(
min_len=args.min_len, level=args.level, concat=args.concat,
processor=text_processor(lower=False))
train, test, valid = load_dataset(
src, trg, args.batch_size,
min_freq=args.min_freq, max_size=args.max_size,
gpu=args.gpu, dev=args.dev, test=args.test)
elif args.source == 'penn':
train, test, valid = load_penn(
"~/corpora/penn", args.batch_size,
min_freq=args.min_freq, max_size=args.max_size, gpu=args.gpu)
else:
train, test, valid = load_from_lines(
args.source_path, args.batch_size,
min_freq=args.min_freq, max_size=args.max_size,
gpu=args.gpu, dev=args.dev, test=args.text)
# save
if args.cache_data:
train.to_disk('data/%s_train.pt' % prefix)
test.to_disk('data/%s_test.pt' % prefix)
valid.to_disk('data/%s_valid.pt' % prefix)
# load from file
else:
train = PairedDataset.from_disk('data/%s_train.pt' % prefix)
test = PairedDataset.from_disk('data/%s_test.pt' % prefix)
valid = PairedDataset.from_disk('data/%s_valid.pt' % prefix)
train.set_gpu(args.gpu)
test.set_gpu(args.gpu)
valid.set_gpu(args.gpu)
train.set_batch_size(args.batch_size)
test.set_batch_size(args.batch_size)
valid.set_batch_size(args.batch_size)
print("* Number of train batches %d" % len(train))
print("Building model...")
model = SequenceVAE(
args.emb_dim, args.hid_dim, args.z_dim, train.d['src'],
num_layers=args.num_layers, cell=args.cell, dropout=args.dropout,
add_z=args.add_z, word_dropout=args.word_dropout,
tie_weights=args.tie_weights, project_init=args.project_init,
inflection_point=args.inflection_point)
print(model)
u.initialize_model(model)
if args.load_embeddings:
weight = load_embeddings(
train.d['src'].vocab,
args.flavor,
args.suffix,
'~/data/word_embeddings')
model.init_embeddings(weight)
if args.gpu:
model.cuda()
def on_lr_update(old_lr, new_lr):
trainer.log("info", "Resetting lr [%g -> %g]" % (old_lr, new_lr))
optimizer = Optimizer(
model.parameters(), args.optim, lr=args.lr,
max_norm=args.max_norm, weight_decay=args.weight_decay,
# SGD-only
start_decay_at=args.start_decay_at, lr_decay=args.lr_decay,
on_lr_update=on_lr_update)
class VAETrainer(Trainer):
def on_batch_end(self, epoch, batch, loss):
# reset kl weight
total_batches = len(self.datasets['train'])
self.model.kl_weight = self.model.kl_schedule(
batch + total_batches * epoch)
losses = [{'loss': 'log-loss'},
{'loss': 'kl', 'format': lambda loss: loss}]
trainer = VAETrainer(
model, {'train': train, 'valid': valid, 'test': test}, optimizer,
losses=losses)
trainer.add_loggers(
StdLogger(), VisdomLogger(env='vae', losses=('rec', 'kl'), max_y=600))
trainer.train(args.epochs, args.checkpoints, shuffle=True)
| [
"[email protected]"
] | |
02fb7d52b9f2d4dd06d435b3711339f8d9111826 | baf3996414315ffb60470c40c7ad797bf4e6897f | /02_ai/4_cv/1_ml_mastery/1_cv/code/chapter_20/07_model_3vgg_data_aug.py | 02800ffcce5e2f90d1d314cb9dac9b719e6d5e41 | [
"MIT"
] | permissive | thiago-allue/portfolio | 8fbbecca7ce232567aebe97c19944f444508b7f4 | 0acd8253dc7c5150fef9b2d46eead3db83ca42de | refs/heads/main | 2023-03-15T22:10:21.109707 | 2022-09-14T17:04:35 | 2022-09-14T17:04:35 | 207,919,073 | 0 | 0 | null | 2019-11-13T18:18:23 | 2019-09-11T22:40:46 | Python | UTF-8 | Python | false | false | 3,497 | py | # baseline model with data augmentation on the cifar10 dataset
import sys
from matplotlib import pyplot
from keras.datasets import cifar10
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Dense
from keras.layers import Flatten
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
# load train and test dataset
def load_dataset():
# load dataset
(trainX, trainY), (testX, testY) = cifar10.load_data()
# one hot encode target values
trainY = to_categorical(trainY)
testY = to_categorical(testY)
return trainX, trainY, testX, testY
# scale pixels
def prep_pixels(train, test):
# convert from integers to floats
train_norm = train.astype('float32')
test_norm = test.astype('float32')
# normalize to range 0-1
train_norm = train_norm / 255.0
test_norm = test_norm / 255.0
# return normalized images
return train_norm, test_norm
# define cnn model
def define_model():
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', input_shape=(32, 32, 3)))
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(10, activation='softmax'))
# compile model
opt = SGD(lr=0.001, momentum=0.9)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return model
# plot diagnostic learning curves
def summarize_diagnostics(history):
# plot loss
pyplot.subplot(211)
pyplot.title('Cross Entropy Loss')
pyplot.plot(history.history['loss'], color='blue', label='train')
pyplot.plot(history.history['val_loss'], color='orange', label='test')
# plot accuracy
pyplot.subplot(212)
pyplot.title('Classification Accuracy')
pyplot.plot(history.history['acc'], color='blue', label='train')
pyplot.plot(history.history['val_acc'], color='orange', label='test')
# save plot to file
filename = sys.argv[0].split('/')[-1]
pyplot.savefig(filename + '_plot.png')
pyplot.close()
# run the test harness for evaluating a model
def run_test_harness():
# load dataset
trainX, trainY, testX, testY = load_dataset()
# prepare pixel data
trainX, testX = prep_pixels(trainX, testX)
# define model
model = define_model()
# create data generator
datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True)
# prepare iterator
it_train = datagen.flow(trainX, trainY, batch_size=64)
# fit model
steps = int(trainX.shape[0] / 64)
history = model.fit_generator(it_train, steps_per_epoch=steps, epochs=100, validation_data=(testX, testY), verbose=0)
# evaluate model
_, acc = model.evaluate(testX, testY, verbose=0)
print('> %.3f' % (acc * 100.0))
# learning curves
summarize_diagnostics(history)
# entry point, run the test harness
run_test_harness() | [
"[email protected]"
] | |
6a6c28212d9e9aabe4376c6f3ca2a32bf4e73053 | f6f4c87a1f2e750530a7d691da43514d84f99f5c | /hw20/a/q3/q3.py | 6ee2af17e499595428e05652bd352713f2bf63d4 | [] | no_license | sarthak77/Basics-of-ML-AI | e941c6653bca95278cc62ee7ba229e8eaf4e309b | cb2ba9d271da919846211cf8496e29aff6beaa46 | refs/heads/master | 2020-07-25T10:33:54.420972 | 2020-01-09T19:25:57 | 2020-01-09T19:25:57 | 208,257,383 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,409 | py | #Import modules
import numpy as np
from numpy import linalg as LA
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from sklearn.svm import SVC
import keras
from keras import regularizers
import keras.optimizers
import keras.initializers
from keras.models import Sequential
from keras.models import Model
from keras.layers.core import Layer, Dense, Dropout, Activation, Flatten, Reshape
from keras.layers import Dense, Dropout
from keras.layers import Input, add
from keras.datasets import mnist
"""
Load the data
NOTrS:training samples
NOTeS:test samples
TRC:training class
TRL:training labels
TEC:test class
TEL:test labels
"""
NOTrS=10000
NOTeS=1000
(TRC,TRL),(TEC,TEL)=mnist.load_data()
TRC=TRC.reshape(len(TRC),28*28)
TRC=TRC[0:NOTrS]
TRL=TRL[0:NOTrS]
TRC=TRC/255
predtest=keras.utils.to_categorical(TEL,10)
TEC=TEC.reshape(len(TEC),28*28)
TEC=TEC[0:NOTeS]
TEL=TEL[0:NOTeS]
TEC=TEC/255
predtrain=keras.utils.to_categorical(TRL,10)
#Initialise parameters
NOE=20
B=128
temp=[10,50,100,300,400,500]
encoders=[]
#Allpy NN
for i in range(6):
x=Input(shape=(784,))
H1=Dense(temp[i],activation='relu')(x)
h=Dense(temp[i]//2,activation='relu')(H1)
H2=Dense(temp[i],activation='relu')(h)
r=Dense(784,activation='sigmoid')(H2)
autoencoder=Model(inputs=x,outputs=r)
autoencoder.compile(optimizer=keras.optimizers.Adam(),loss='mse')
history=autoencoder.fit(TRC,TRC, batch_size=B, epochs=NOE, verbose=0, validation_data=(TEC,TEC))
encoders.append(Model(autoencoder.input,autoencoder.layers[-3].output))
#Raw model
c=.1
MR=SVC(C=c,kernel='rbf')
MR.fit(TRC,TRL)
raw_pred=MR.predict(TEC)
#Find accuracy from raw model
ACCR=0
for i in range(len(TEL)):
if(raw_pred[i]==TEL[i]):
ACCR=ACCR+1
#Find accuracy from auto E
ACC=[]
model_encode=SVC(C=c,kernel='rbf')
for i in range(6):
E=encoders[i]
entr=E.predict(TRC)
ente=E.predict(TEC)
model_encode.fit(entr,TRL)
out=model_encode.predict(ente)
ACCEN=0
for i in range(len(TEL)):
if(out[i]==TEL[i]):
ACCEN=ACCEN+1
ACC.append(ACCEN/10)
#plotting
#calculate X and Y
Y=[temp[i]+temp[i]//2 for i in range(len(temp))]
Y.append("Raw")
X=np.arange(7)
ACC.append(ACCR/10)
plt.bar(X,ACC, align='center', alpha=0.5)
plt.ylabel('Accuracy')
plt.xticks(X, Y)
plt.title('SVM classifier with RBF kernel')
plt.tight_layout()
plt.show() | [
"[email protected]"
] | |
e8518de3fd37b928d126d13d14cc0fe03395fbf7 | b838c392fec9934d73b6b605d672667bf1d5e3fd | /backend/application.py | 88743c87cdba3245e549229a5b6501dfa63b214f | [] | no_license | hmisonne/Canoo_SWOPS_test | 25ddcdb624cfeb8542206e3c055a5b4fba95f328 | 01c1556c60680674d51f8047f9c3c5afe8b91a03 | refs/heads/main | 2023-04-10T14:23:43.977490 | 2021-04-23T20:15:16 | 2021-04-23T20:15:16 | 360,990,095 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,834 | py | from flask import Flask
from flask import render_template, abort, redirect, url_for, request, jsonify
from flask_cors import CORS
import json
import uuid
app = Flask(__name__)
CORS(app)
# Configure "database" as json file
database_path = "data.json"
app.config['JSON_DATA'] = database_path
def read_json():
path = app.config["JSON_DATA"]
with open(path) as jsonFile:
return json.load(jsonFile)
def write_json(data):
path = app.config["JSON_DATA"]
with open(path, "w") as jsonFile:
json.dump(data, jsonFile)
@app.route("/temperature", methods=['GET'])
def get_temperature():
try:
data = read_json()
result = data['temperature']
response = {
'success': True,
'data': result
}
app.logger.info('%s %s %s',request.method, request.url_rule, response)
return jsonify(response)
except:
app.logger.info('%s %s %s',request.method, request.url_rule, "422: unprocessable")
abort(422)
@app.route("/temperature", methods=[ 'POST'])
def set_temperature():
body = request.get_json()
value = body.get('temperature', None)
if value is None or type(value) != int:
app.logger.error('%s %s %s',request.method, request.url_rule, "400: Bad request")
abort(400)
data = read_json()
data["temperature"] = value
write_json(data)
response = {
'success': True,
'data': value
}
app.logger.info('%s %s %s',request.method, request.url_rule, response)
return jsonify(response)
@app.route("/lights", methods=["GET"])
def get_lights():
try:
data = read_json()
result = list(data['lights'].values())
response = {
'success': True,
'data': result
}
app.logger.info('%s %s %s',request.method, request.url_rule, response)
return jsonify(response)
except:
app.logger.info('%s %s %s',request.method, request.url_rule, "422: unprocessable")
abort(422)
@app.route("/lights", methods=["POST"])
def add_light():
try:
data = read_json()
light_id = uuid.uuid1().hex
newLight = {
"id": light_id,
"turnedOn": False,
}
data["lights"][light_id] = newLight
write_json(data)
response = {
'success': True,
'data': newLight
}
app.logger.info('%s %s %s',request.method, request.url_rule, response)
return jsonify(response)
except:
app.logger.info('%s %s %s',request.method, request.url_rule, "422: unprocessable")
abort(422)
@app.route("/lights/<light_id>", methods=["GET"])
def get_light(light_id):
data = read_json()
light = data["lights"].get(light_id, None)
if light is None:
app.logger.error('%s %s=%s %s',request.method, request.url_rule, light_id, "404: Resource not found")
abort(404)
response = {
'success': True,
'data': light
}
app.logger.info('%s %s=%s %s',request.method, request.url_rule, light_id, response)
return jsonify(response)
@app.route("/lights/<light_id>", methods=["DELETE"])
def remove_light(light_id):
data = read_json()
light_toDelete = data["lights"].get(light_id, None)
if light_toDelete is None:
app.logger.error('%s %s=%s %s',request.method, request.url_rule, light_id, "404: Resource not found")
abort(404)
del data["lights"][light_id]
write_json(data)
response = {
'success': True,
'light_deleted': light_id
}
app.logger.info('%s %s=%s %s',request.method, request.url_rule, light_id, response)
return jsonify(response)
@app.route("/lights/<light_id>", methods=["PUT"])
def toggle_light(light_id):
data = read_json()
light_toToggle = data["lights"].get(light_id, None)
if light_toToggle is None:
app.logger.error('%s %s=%s %s',request.method, request.url_rule, light_id, "404: Resource not found")
abort(404)
light_toToggle['turnedOn'] = not light_toToggle['turnedOn']
write_json(data)
response = {
'success': True,
'data': light_toToggle
}
app.logger.info('%s %s=%s %s',request.method, request.url_rule, light_id, response)
return jsonify(response)
@app.errorhandler(404)
def resource_not_found(error):
return jsonify({
"success": False,
"error": 404,
"message": "Resource not found"
}), 404
@app.errorhandler(422)
def unprocessable(error):
return jsonify({
"success": False,
"error": 422,
"message": "unprocessable"
}), 422
@app.errorhandler(400)
def bad_request(error):
return jsonify({
"success": False,
"error": 400,
"message": "Bad request"
}), 400 | [
"[email protected]"
] | |
22c07331e1b110b09e94605b96187b0a4ec40108 | c1960138a37d9b87bbc6ebd225ec54e09ede4a33 | /Week 11/ArnieMartin_CircuitPlayground_Single_Mouse_Button_Advanced.py | 318bd949acb9fb6b6f28b56d917f96a7274d3b7d | [] | no_license | apalileo/ACCD_PHCR_SP21 | 76d0e27c4203a2e90270cb2d84a75169f5db5240 | 37923f70f4c5536b18f0353470bedab200c67bad | refs/heads/main | 2023-04-07T00:01:35.922061 | 2021-04-15T18:02:22 | 2021-04-15T18:02:22 | 332,101,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,597 | py |
# more advanced-ish use of single onboard button to control LMB and RMB
# works with CPX and CPB, copy adafuit_hid to /lib
import board
import time
import digitalio
import busio
import adafruit_lis3dh
import usb_hid
from adafruit_hid.mouse import Mouse
killswitch = digitalio.DigitalInOut(board.SLIDE_SWITCH)
killswitch.direction = digitalio.Direction.INPUT
killswitch.pull = digitalio.Pull.UP
smb = digitalio.DigitalInOut(board.BUTTON_A)
smb.direction = digitalio.Direction.INPUT
smb.pull = digitalio.Pull.DOWN
smb_pre = smb.value
mouse = Mouse(usb_hid.devices)
smb_time = 0
RMB_DELAY = 0.5
while True:
if killswitch.value:
if smb.value is not smb_pre:
smb_pre = smb.value
if smb.value:
print("button clicked...")
smb_time = time.monotonic()
print("press time is", smb_time)
if not smb.value:
print("release time is", time.monotonic())
eltime = time.monotonic() - smb_time
print("elapsed time is", eltime)
if eltime < RMB_DELAY:
print("short press... LMB clicked!")
mouse.click(Mouse.LEFT_BUTTON)
smb_time = 0
else:
if smb_time != 0:
eltime = time.monotonic() - smb_time
print("elapsed time is", eltime)
time.sleep(0.01)
if eltime > RMB_DELAY:
print("long press... RMB clicked!")
mouse.click(Mouse.RIGHT_BUTTON)
smb_time = 0 | [
"[email protected]"
] | |
9d01e66f647965d123823224278de34331718f3a | 4e67c2edd71493a98a3f13e5b2073c1d05b1b656 | /Semestre 02/ProjetoIntegrador2/Aula 08.27.2020/Metodos_lista.py | 75d15a7c7e7bd102bbd084a1482db9446b15b851 | [] | no_license | felipellima83/UniCEUB | 05991d7a02b13cd4e236f3be3a34726af2dc1504 | dbc44866545b5247d1b5f76ec6e9b7778e54093e | refs/heads/master | 2023-07-08T19:04:19.830473 | 2021-08-12T12:33:49 | 2021-08-12T12:33:49 | 249,958,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | lista = [ ]
t_lista = int(input("Quantidade: "))
for i in range(t_lista):
n = int(input("Digite o número: "))
lista.append(n)
print(lista)
print(len(lista))
print(sum(lista))
print(max(lista))
print(min(lista))
pesquisa = int(input("Qual: "))
if pesquisa in lista:
posicao = lista.index(pesquisa)
print(posicao)
else:
print("Não tem")
#lista.sort()
lista.reverse()
print(lista)
print("Média: ",sum(lista)/len(lista)) | [
"[email protected]"
] | |
0af3834982b72c9cdf345b6aff6ffd9e7ccea915 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02716/s014491177.py | f6f24c53a93fa9d93bb64a4642c8896800c4707e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | import sys
sr = lambda: sys.stdin.readline().rstrip()
ir = lambda: int(sr())
lr = lambda: list(map(int, sr().split()))
from collections import defaultdict
def resolve():
N = ir()
A = lr()
dp = defaultdict(lambda: -float('inf'))
dp[0, 0, 0] = 0
for i in range(N):
for j in range(max(i//2-1, 0), i//2+2):
dp[i+1, j+1, 1] = dp[i, j, 0]+A[i]
dp[i+1, j, 0] = max(dp[i, j, 0], dp[i, j, 1])
# print(dp)
print(max(dp[N, N//2, 1], dp[N, N//2, 0]))
resolve() | [
"[email protected]"
] | |
342b2b11b0890dc05a1974e7b6bcc3a3d4d54c02 | 25bf0d5ed033006074eaec207358d11943ba9936 | /spider/eastnews/eastnews/middlewares.py | 9693b0fe2196f59059d1711169982f4cae36adcd | [] | no_license | wanglanfeng/Distributed-crawler | 0c7f90ea684da14ad7c33e8824f00b5df04e4745 | 36ce154562d022ebf1b06e507824786cb670d358 | refs/heads/master | 2020-03-16T23:41:00.866080 | 2018-02-24T11:37:20 | 2018-02-24T11:37:20 | 133,087,133 | 1 | 0 | null | 2018-05-11T20:46:47 | 2018-05-11T20:46:47 | null | UTF-8 | Python | false | false | 1,906 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class EastnewsSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"[email protected]"
] | |
bfb26558c8d4426d10ad050db99ba001fa79afe6 | c727d96f9ee57a186ac2f6a069e7863017131d9b | /mypy_boto3_builder/structures/waiter.py | 573b3a99a0ef983c7359c5cc3713e46a8412d8b4 | [
"MIT"
] | permissive | ayobuba/mypy_boto3_builder | 66438be3c9fce1e6215a58692b69496250eda433 | 9315adca025a5831ab1c2c00d3ed8602a21e8c74 | refs/heads/master | 2022-11-08T12:38:00.596915 | 2020-06-09T23:10:30 | 2020-06-09T23:10:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,734 | py | """
Boto3 client Waiter.
"""
from dataclasses import dataclass, field
from typing import List
from botocore.waiter import Waiter as Boto3Waiter
from mypy_boto3_builder.enums.service_module_name import ServiceModuleName
from mypy_boto3_builder.import_helpers.import_string import ImportString
from mypy_boto3_builder.service_name import ServiceName, ServiceNameCatalog
from mypy_boto3_builder.structures.argument import Argument
from mypy_boto3_builder.structures.class_record import ClassRecord
from mypy_boto3_builder.structures.method import Method
from mypy_boto3_builder.type_annotations.external_import import ExternalImport
from mypy_boto3_builder.type_annotations.fake_annotation import FakeAnnotation
from mypy_boto3_builder.type_annotations.type import Type
from mypy_boto3_builder.type_annotations.type_class import TypeClass
from mypy_boto3_builder.type_annotations.type_literal import TypeLiteral
@dataclass
class Waiter(ClassRecord):
"""
Boto3 client Waiter.
"""
waiter_name: str = "waiter_name"
service_name: ServiceName = ServiceNameCatalog.ec2
bases: List[FakeAnnotation] = field(
default_factory=lambda: [TypeClass(Boto3Waiter, alias="Boto3Waiter")]
)
def get_client_method(self) -> Method:
return Method(
name="get_waiter",
decorators=[Type.overload],
docstring=self.docstring,
arguments=[
Argument("self", None),
Argument("waiter_name", TypeLiteral(self.waiter_name)),
],
return_type=ExternalImport(
source=ImportString(self.service_name.module_name, ServiceModuleName.waiter.value),
name=self.name,
),
)
| [
"[email protected]"
] | |
33ae7c9f65c1462e20cf31b50507a4e2a51c791e | a2e638cd0c124254e67963bda62c21351881ee75 | /Extensions/Advanced Corporate Actions/FPythonCode/FCorpActionPayoutViewer.py | aba8a545dc56d44c822c7b513a9f647546083cb7 | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,739 | py | """ Compiled: 2020-09-18 10:38:49 """
#__src_file__ = "extensions/advanced_corporate_actions/./etc/FCorpActionPayoutViewer.py"
import acm
import FUxCore
def SelectFirstItem(objList, itemList):
if objList:
firstItem = objList[0]
itemList.SetData(firstItem)
def RemoveItem(objList, itemList, item):
index = objList.index(item)
objList.remove(item)
itemList.RemoveItem(item)
if objList:
if len(objList) <= index:
index -= 1
newItem = objList[index]
if newItem:
itemList.SetData(newItem)
def OnDeleteClicked(self, cd):
val = self.m_values.GetData()
if val:
acm.FCorporateActionPayout[val].Delete()
RemoveItem(self.valList, self.m_values, val)
def OnValDoubleClicked(self, cd):
val = self.m_values.GetData()
if val:
acm.StartRunScript(acm.FCorporateActionPayout[val], 'Modify')
class PayoutsListCustomDialog(FUxCore.LayoutDialog):
LIST_VALUES = 'listValues'
BTN_DELETE = 'btnDelete'
def __init__(self, params):
self.choices = params['choices']
self.selected = params['selected']
self.caption = 'Payouts List'
self.valLabel = 'Payouts'
self.valList = []
self.selectList = []
def HandleApply(self):
resultDic = acm.FDictionary()
resultDic.AtPut('result', self.valList)
return resultDic
def SetControlData(self):
SelectFirstItem(self.valList, self.m_values)
def HandleCreate(self, dlg, layout):
self.m_fuxDlg = dlg
self.m_fuxDlg.Caption(self.caption)
self.m_values = layout.GetControl(self.LIST_VALUES)
self.m_values.AddCallback('DefaultAction', OnValDoubleClicked, self)
self.m_btnDelete = layout.GetControl(self.BTN_DELETE)
self.m_btnDelete.AddCallback('Activate', OnDeleteClicked, self)
self.PopulateControls()
self.SetControlData()
def CreateLayout(self):
b = acm.FUxLayoutBuilder()
b.BeginVertBox()
b. BeginHorzBox()
b. AddSpace(3)
b. BeginVertBox()
b. AddLabel("lblValues", self.valLabel)
b. AddList(self.LIST_VALUES, 10, -1, 15, -1)
b. EndBox()
b. AddSpace(3)
b. EndBox()
b. AddSpace(5)
b. BeginHorzBox()
b. AddFill()
b. AddButton(self.BTN_DELETE, "Delete")
b. AddButton('ok', 'Close')
b. AddSpace(3)
b. EndBox()
b.EndBox()
return b
def PopulateControls(self):
self.valList = [s for s in self.selected]
self.valList.sort()
self.m_values.Populate(self.valList)
if self.valList:
self.m_values.SetData(self.valList[0]) | [
"[email protected]"
] | |
0950fd4826dade797628563d1bc43cbc412d6018 | 7234e6c72eb3f09c4a66dbe91f00fdf7742f010f | /algo/dp/medium/longestArithmeticSequence2.py | 50004a01f60d588e9b4155d465a095777a44c4e1 | [] | no_license | srinathalla/python | 718ac603473e7bed060ba66aa3d39a90cf7ef69d | b6c546070b1738350303df3939888d1b0e90e89b | refs/heads/master | 2021-06-13T06:11:42.653311 | 2021-02-19T06:01:41 | 2021-02-19T06:01:41 | 150,374,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | from typing import List
class Solution:
def longestArithSeqLength(self, A: List[int]) -> int:
dp = {}
for i in range(1, len(A)):
for j in range(i):
dp[i, A[i] - A[j]] = dp.get((j, A[i] - A[j]), 1) + 1
print(dp)
return max(dp.values())
s = Solution()
print(s.longestArithSeqLength([3, 6, 9, 12]))
| [
"[email protected]"
] | |
c17b10b518d979feca78ccbe1e9788aeba9bd12a | 37a776779a43662ed9bb6523f1f42fd3dc215610 | /TTHAnalysis/python/tools/treeReaderArrayTools.py | f35c73164ab905d806528e773e44aa78dd865187 | [] | no_license | ashrafkasem/cmgtools-lite | 818b3bc42ee3b5944074b45bc74248b8c32fa514 | 14d0a465aa5cfaa2036a38fcc2589cda159c6f72 | refs/heads/80X | 2022-12-31T02:07:00.683384 | 2017-01-25T10:19:40 | 2017-01-25T10:19:40 | 109,703,088 | 0 | 0 | null | 2017-11-06T14:06:09 | 2017-11-06T14:06:09 | null | UTF-8 | Python | false | false | 4,008 | py | import types
import ROOT
def initTree(tree):
tree.entry = -1
tree._ttreereader = ROOT.TTreeReader(tree)
tree._ttreereader.SetEntry(0)
tree._ttrvs = {}
tree._ttras = {}
tree._leafTypes = {}
tree._ttreereaderversion = 1
tree.arrayReader = types.MethodType(getArrayReader, tree)
tree.valueReader = types.MethodType(getValueReader, tree)
tree.readBranch = types.MethodType(readBranch, tree)
def getArrayReader(tree, branchName, isClean=False):
"""Make a reader for branch branchName containing a variable-length value array.
If you are sure nobody has yet read from the tree, you can set isClean to True and save some overhead."""
if branchName not in tree._ttras:
if not tree.GetBranch(branchName): raise RuntimeError, "Can't find branch '%s'" % branchName
leaf = tree.GetBranch(branchName).GetLeaf(branchName)
if not leaf.GetLen() == 0: raise RuntimeError, "Branch %s is not a variable-length value array" % branchName
typ = _rootType2Python[leaf.GetTypeName()]
tree._ttras[branchName] = _makeArrayReader(tree, typ, branchName, remakeAllFirst=not(isClean))
return tree._ttras[branchName]
def getValueReader(tree, branchName, isClean=False):
"""Make a reader for branch branchName containing a single value.
If you are sure nobody has yet read from the tree, you can set isClean to True and save some overhead."""
if branchName not in tree._ttrvs:
if not tree.GetBranch(branchName): raise RuntimeError, "Can't find branch '%s'" % branchName
leaf = tree.GetBranch(branchName).GetLeaf(branchName)
if not leaf.GetLen() == 1: raise RuntimeError, "Branch %s is not a value" % branchName
typ = _rootType2Python[leaf.GetTypeName()]
tree._ttrvs[branchName] = _makeValueReader(tree, typ, branchName, remakeAllFirst=not(isClean))
return tree._ttrvs[branchName]
def readBranch(tree, branchName):
"""Return the branch value if the branch is a value, and a TreeReaderArray if the branch is an array"""
if branchName in tree._ttras:
return tree._ttras[branchName]
elif branchName in tree._ttrvs:
return tree._ttrvs[branchName].Get()[0]
else:
branch = tree.GetBranch(branchName)
if not branch: raise RuntimeError, "Unknown branch %s" % branchName
leaf = branch.GetLeaf(branchName)
if leaf.GetTypeName() not in _rootType2Python:
raise RuntimeError, "Branch %s has unsupported type %s" % (branchName, leaf.GetTypeName())
typ = _rootType2Python[leaf.GetTypeName()]
if leaf.GetLen() == 1:
return _makeValueReader(tree, typ, branchName).Get()[0]
else:
return _makeArrayReader(tree, typ, branchName)
####### PRIVATE IMPLEMENTATION PART #######
_rootType2Python = { 'Int_t':int, 'Long_t':long, 'UInt_t':int, 'ULong_t':long,
'Float_t':float, 'Double_t':float }
def _makeArrayReader(tree, typ, nam, remakeAllFirst=True):
if remakeAllFirst: _remakeAllReaders(tree)
ttra = ROOT.TTreeReaderArray(typ)(tree._ttreereader, nam)
tree._leafTypes[nam] = typ
tree._ttras[nam] = ttra;
tree._ttreereader.SetEntry(tree.entry)
return tree._ttras[nam]
def _makeValueReader(tree, typ, nam, remakeAllFirst=True):
if remakeAllFirst: _remakeAllReaders(tree)
ttrv = ROOT.TTreeReaderValue(typ)(tree._ttreereader, nam)
tree._leafTypes[nam] = typ
tree._ttrvs[nam] = ttrv
tree._ttreereader.SetEntry(tree.entry)
return tree._ttrvs[nam]
def _remakeAllReaders(tree):
_ttreereader = ROOT.TTreeReader(tree)
_ttrvs = {}
for k in tree._ttrvs.iterkeys():
_ttrvs[k] = ROOT.TTreeReaderValue(tree._leafTypes[k])(_ttreereader,k)
_ttras = {}
for k in tree._ttras.iterkeys():
_ttras[k] = ROOT.TTreeReaderArray(tree._leafTypes[k])(_ttreereader,k)
tree._ttrvs = _ttrvs
tree._ttras = _ttras
tree._ttreereader = _ttreereader
tree._ttreereaderversion += 1
| [
"[email protected]"
] | |
d5b36533b86e2e386538d1e81c4dcd407fa84e4e | b0d0dbb0742e3925bc8adab9bb7b7ee458972ad6 | /analayze.py | 13faa7de5dd2d27767b804c06edf05fc13229fab | [
"Apache-2.0"
] | permissive | miyosuda/dendritic_bp | 0cd0e23da0db7ba7460f7b209a92362f9f0f28fe | fd831b6ad9ae1993a14ba970408b80abfd45f0b1 | refs/heads/master | 2020-04-05T12:47:53.112472 | 2019-04-12T00:46:59 | 2019-04-12T00:46:59 | 156,880,868 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,657 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from model import Layer, LAYER_TYPE_BOTTOM, LAYER_TYPE_HIDDEN, LAYER_TYPE_TOP, LowPassFilter
from option import Option
# TopとBottomだけを接続して分析するためのコード
class Network(object):
def __init__(self):
option = Option()
self.layers = [None] * 2
self.layers[0] = Layer(pd_unit_size=1, layer_type=LAYER_TYPE_BOTTOM, option=option)
self.layers[1] = Layer(pd_unit_size=1, layer_type=LAYER_TYPE_TOP, option=option)
self.layers[0].connect_to(self.layers[1])
self.set_target_prediction_mode()
def set_target_prediction_mode(self):
# Pyramidalのweightを更新する
for layer in self.layers:
# ここを変えている
layer.train_w_pp_bu = True
layer.train_w_pp_td = False
layer.train_w_ip = False
layer.train_w_pi = False
for i,layer in enumerate(self.layers):
option = Option.get_target_prediction_option(i)
layer.set_option(option)
def update(self, dt):
for layer in self.layers:
layer.update_potential(dt)
for layer in self.layers:
layer.update_weight(dt)
def set_input_firing_rate(self, values):
self.layers[0].set_input_firing_rate(values)
def set_target_firing_rate(self, values):
self.layers[1].set_target_firing_rate(values)
def train_target_prediction(network):
dt = 0.1
lp_filter = LowPassFilter(dt, 3)
target_values = np.array([0.8], dtype=np.float32)
values = np.array([0.5], dtype=np.float32)
network.set_target_firing_rate(target_values)
network.set_input_firing_rate(values)
iteration = 2000
for i in range(iteration):
for j in range(1000):
network.update(dt)
du = network.layers[1].u_target - network.layers[1].u_p
v_p_b = network.layers[1].v_p_b
u_p = network.layers[1].u_p
print("du={}, v_p_b={}, u_p={}".format(du, v_p_b, u_p))
"""
print("upper_r_p={}, upper_v_p_b_hat={}, upper_r_p_b={}, d_w_pp_bu={}".format(
network.layers[0].debug_upper_r_p,
network.layers[0].debug_upper_v_p_b_hat,
network.layers[0].debug_upper_r_p_b,
network.layers[0].debug_d_w_pp_bu))
"""
def main():
np.random.seed(seed=0)
network = Network()
train_target_prediction(network)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
b794c2df5ea3e950d2a988e2baa61785911770f8 | 2dd560dc468af0af4ca44cb4cd37a0b807357063 | /Leetcode/378. Kth Smallest Element in a Sorted Matrix/solution1.py | 85aed3e0043f6c962b512d91df8783c990105928 | [
"MIT"
] | permissive | hi0t/Outtalent | 460fe4a73788437ba6ce9ef1501291035c8ff1e8 | 8a10b23335d8e9f080e5c39715b38bcc2916ff00 | refs/heads/master | 2023-02-26T21:16:56.741589 | 2021-02-05T13:36:50 | 2021-02-05T13:36:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | class Solution:
def kthSmallest(self, matrix: List[List[int]], k: int) -> int:
lo, hi = matrix[0][0], matrix[-1][-1]
while lo <= hi:
mid = (lo + hi) >> 1
loc = sum(bisect.bisect_right(m, mid) for m in matrix)
if loc >= k:
hi = mid - 1
else:
lo = mid + 1
return lo
| [
"[email protected]"
] | |
e8937677ed1449ab35310cecf64433fe455a1935 | 835abbf0309503caddba77f4ed94ea3209e8784f | /kurs_dla_sredniozaawansowanych/itertools_groupby.py | 7404c6c1405458b65739d460cc96b78a58e667c3 | [] | no_license | rafal-mizera/UDEMY | 46873a01223a31b36de84fcfd13a5b9b1cf262e8 | d463613ecd470dae4f17ce59d2f815f70942ea07 | refs/heads/master | 2023-07-19T12:14:44.818156 | 2021-09-01T16:14:37 | 2021-09-01T16:14:37 | 402,125,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | import os, itertools
def scantree(path):
for el in os.scandir(path):
if el.is_dir():
yield el
yield from scantree(el.path)
if not el.is_dir():
yield el
listing = scantree(r"C:\\Users\RMZ\PycharmProjects")
listing = sorted(listing,key= lambda x: x.is_dir())
for el in listing:
if el.is_dir():
print(f"{el} jest folderem plików")
if not el.is_dir():
print(f"{el} jest plikiem")
for is_dir, elements in itertools.groupby(listing,key=lambda e: e.is_dir()):
print(print('DIR ' if is_dir else 'FILE', len(list(elements)))) | [
"[email protected]"
] | |
15eb945f14b81379026156ad81640eb4e6951ec3 | 02b37f5f702ae164de31f6fc36f826607a5f713e | /manage.py | e68e1157d6f290fa3927fc5333e90167c76a47b3 | [] | no_license | Sovushka-sever/test_replica | e671aa4f20da6efd18ea947fa298e40b9d4ee457 | 8b15013b725e2f4772e904002dd359aa6a4a3716 | refs/heads/master | 2023-08-24T16:47:38.271279 | 2021-10-04T05:37:42 | 2021-10-04T05:37:42 | 412,221,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'my_library.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
c04acce25630ba2428d0ebcd3095b7bceebd7a59 | c75ec82316ed5322c5844912ce9c528c24360b9f | /nsd1909/py02/day02/jiecheng.py | 966d2d9c92ecc9d5f7c1468b18df28cbad74f5c0 | [] | no_license | MrZhangzhg/nsd2019 | a94cde22f2e4bd648bb9e56ca63827f558f3c083 | 54f6d2c7b348a69f13ad5f38f2fbdc8207528749 | refs/heads/master | 2021-08-22T17:38:27.697675 | 2020-02-22T08:36:21 | 2020-02-22T08:36:21 | 183,539,489 | 21 | 24 | null | 2020-05-17T12:07:55 | 2019-04-26T02:06:16 | HTML | UTF-8 | Python | false | false | 385 | py | def func(n):
if n == 1:
return 1
else:
return n * func(n - 1)
# 5 * func(4) # 不能直接返回,需要将func(4)的结果与5相乘,得到的结果再返回
# 5 * 4 * func(3)
# 5 * 4 * 3 * func(2)
# 5 * 4 * 3 * 2 * func(1)
# 5 * 4 * 3 * 2 * 1
if __name__ == '__main__':
print(func(5))
| [
"[email protected]"
] | |
43114d2f7132bdd755e261c108c200afc4f4c9a5 | 3b84c4b7b16ccfd0154f8dcb75ddbbb6636373be | /google-cloud-sdk/lib/googlecloudsdk/surface/compute/instances/set_iam_policy.py | b9eea24fbd21a269b6f8e9fd753b33d2e48e35d9 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | twistedpair/google-cloud-sdk | 37f04872cf1ab9c9ce5ec692d2201a93679827e3 | 1f9b424c40a87b46656fc9f5e2e9c81895c7e614 | refs/heads/master | 2023-08-18T18:42:59.622485 | 2023-08-15T00:00:00 | 2023-08-15T12:14:05 | 116,506,777 | 58 | 24 | null | 2022-02-14T22:01:53 | 2018-01-06T18:40:35 | Python | UTF-8 | Python | false | false | 717 | py | # Copyright 2015 Google Inc. All Rights Reserved.
"""Command to set IAM policy for an instance resource."""
from googlecloudsdk.api_lib.compute import iam_base_classes
from googlecloudsdk.calliope import base
@base.Hidden
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class SetIamPolicy(iam_base_classes.ZonalSetIamPolicy):
"""Set the IAM Policy for a Google Compute Engine instance resource."""
@staticmethod
def Args(parser):
iam_base_classes.ZonalSetIamPolicy.Args(parser, 'compute.instances')
@property
def service(self):
return self.compute.instances
@property
def resource_type(self):
return 'instances'
SetIamPolicy.detailed_help = iam_base_classes.SetIamPolicyHelp('instance')
| [
"[email protected]"
] | |
19b8bad12ca06aa46842267e1530a1532d13ae81 | 1ef56dcfef70ee14df8956eedd171f74406594af | /erp/biz/contact_unit_biz.py | 44cf2026076d3860ea2709dc3884f82a98010696 | [] | no_license | journeyends/webtest | 6c54ff19e01cd0cd99a34bcae55dd5701abf132f | 2a24c6d7c52aa627edfbba3dd5eb9ccc16abe9fb | refs/heads/master | 2020-04-17T07:38:52.873722 | 2019-03-21T09:36:50 | 2019-03-21T09:36:50 | 166,378,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | from erp.idal.i_contact_unit_dal import I_contact_unit_dal
class Contact_unit_biz:
dal = I_contact_unit_dal().instance()
def getList(self, condition={}, user_info={}):
dal = Contact_unit_biz.dal
return dal.getList(condition=condition, user_info=user_info)
| [
"[email protected]"
] | |
d91ca4161a07129e9b9b27f17cd0465ab467fa7e | 5ec7a72cab10dd39e0cc877caa1cb97c3cd9f3de | /garuda/models/dq/operation.py | ba1d5125ca3eb27edc83cce8bc6439db91f6ad76 | [] | no_license | raufer/spark-dsl | a1d311263fe48f64859c04cd63a79f48d8cd8fa4 | a0fbf9561ba4567bc5d40bf2c7d289e214712aa6 | refs/heads/main | 2023-04-11T19:29:11.661273 | 2021-01-26T18:34:23 | 2021-01-26T18:34:23 | 367,982,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py | import logging
from pydantic import BaseModel
from pydantic import validator
from garuda.dsl.mappings.operations import DSL_OPERATIONS
from garuda.models.dq.argument import Argument
from typing import List
logger = logging.getLogger(__name__)
class Operation(BaseModel):
"""
An operation represents a a Boolean Column as the result of
a function application
They are the most granular unit of computation of the engine
Op :: (...) -> bool
* each function is applicable to one or more columns;
* the argument list can also contain other native types
"""
id: str
arguments: List[Argument]
def __eq__(self, other):
if isinstance(other, self.__class__):
return all([
self.id == other.id,
self.arguments == other.arguments,
])
else:
return False
@staticmethod
def from_data(data):
id = data['id']
arguments = data['arguments']
return Operation(id=id, arguments=arguments)
| [
"[email protected]"
] | |
71ba4f507651adad9a4a6bea16e4fb6403802870 | dbc4a65c17645fe2b77d4acd22d6c53ace261f89 | /Numpy_bs/boolSetopSorting.py | 3bc13b81a4446580efc48bf257a421f0260eef06 | [] | no_license | felipeguth/basics | 52fba5e94c4ebeda53a881a4f3f806e10a000c98 | 40669c03324e44a36466d760e0b6af923e0fafd0 | refs/heads/master | 2023-08-15T10:30:59.759551 | 2021-08-20T18:21:43 | 2021-08-20T18:21:43 | 203,874,748 | 0 | 0 | null | 2023-07-22T14:17:41 | 2019-08-22T21:16:55 | HTML | UTF-8 | Python | false | false | 2,257 | py | import numpy as np
# We create a 5 x 5 ndarray that contains integers from 0 to 24
X = np.arange(25).reshape(5, 5)
# We print X
print()
print('Original X = \n', X)
print()
# We use Boolean indexing to select elements in X:
print('The elements in X that are greater than 10:', X[X > 10])
print('The elements in X that less than or equal to 7:', X[X <= 7])
print('The elements in X that are between 10 and 17:', X[(X > 10) & (X < 17)])
# We use Boolean indexing to assign the elements that are between 10 and 17 the value of -1
X[(X > 10) & (X < 17)] = -1
# We print X
print()
print('X = \n', X)
print()
#SET OPS
# We create a rank 1 ndarray
x = np.array([1,2,3,4,5])
# We create a rank 1 ndarray
y = np.array([6,7,2,8,4])
# We print x
print()
print('x = ', x)
# We print y
print()
print('y = ', y)
# We use set operations to compare x and y:
print()
print('The elements that are both in x and y:', np.intersect1d(x,y))
print('The elements that are in x that are not in y:', np.setdiff1d(x,y))
print('All the elements of x and y:',np.union1d(x,y))
#SORT
# We create an unsorted rank 1 ndarray
x = np.random.randint(1,11,size=(10,))
# We print x
print()
print('Original x = ', x)
# We sort x and print the sorted array using sort as a function.
print()
print('Sorted x (out of place):', np.sort(x))
# When we sort out of place the original array remains intact. To see this we print x again
print()
print('x after sorting:', x)
# We sort x but only keep the unique elements in x
print(np.sort(np.unique(x)))
# We create an unsorted rank 1 ndarray
x = np.random.randint(1,11,size=(10,))
# We print x
print()
print('Original x = ', x)
# We sort x and print the sorted array using sort as a method.
x.sort()
# When we sort in place the original array is changed to the sorted array. To see this we print x again
print()
print('x after sorting:', x)
# We create an unsorted rank 2 ndarray
X = np.random.randint(1,11,size=(5,5))
# We print X
print()
print('Original X = \n', X)
print()
# We sort the columns of X and print the sorted array
print()
print('X with sorted columns :\n', np.sort(X, axis = 0))
# We sort the rows of X and print the sorted array
print()
print('X with sorted rows :\n', np.sort(X, axis = 1))
| [
"[email protected]"
] | |
c11c3116864d5ac9805e39e4c0379455ab221940 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_11_01/aio/operations/_private_endpoint_connections_operations.py | 083296317b9947180f059bbc703508241d77a482 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 21,670 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._private_endpoint_connections_operations import (
build_delete_request,
build_get_request,
build_list_request,
build_update_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2020_11_01.aio.ContainerServiceClient`'s
:attr:`private_endpoint_connections` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace_async
async def list(
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> _models.PrivateEndpointConnectionListResult:
"""Gets a list of private endpoint connections in the specified managed cluster.
Gets a list of private endpoint connections in the specified managed cluster. The operation
returns properties of each private endpoint connection.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnectionListResult or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_11_01.models.PrivateEndpointConnectionListResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2020-11-01"))
cls: ClsType[_models.PrivateEndpointConnectionListResult] = kwargs.pop("cls", None)
request = build_list_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PrivateEndpointConnectionListResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections"
}
@distributed_trace_async
async def get(
self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> _models.PrivateEndpointConnection:
"""Gets the private endpoint connection.
Gets the details of the private endpoint connection by managed cluster and resource group.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_11_01.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2020-11-01"))
cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
@overload
async def update(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
parameters: _models.PrivateEndpointConnection,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.PrivateEndpointConnection:
"""Updates a private endpoint connection.
Updates a private endpoint connection in the specified managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:param parameters: Parameters supplied to the Update a private endpoint connection operation.
Required.
:type parameters: ~azure.mgmt.containerservice.v2020_11_01.models.PrivateEndpointConnection
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_11_01.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def update(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.PrivateEndpointConnection:
"""Updates a private endpoint connection.
Updates a private endpoint connection in the specified managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:param parameters: Parameters supplied to the Update a private endpoint connection operation.
Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_11_01.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def update(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
parameters: Union[_models.PrivateEndpointConnection, IO],
**kwargs: Any
) -> _models.PrivateEndpointConnection:
"""Updates a private endpoint connection.
Updates a private endpoint connection in the specified managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:param parameters: Parameters supplied to the Update a private endpoint connection operation.
Is either a PrivateEndpointConnection type or a IO type. Required.
:type parameters: ~azure.mgmt.containerservice.v2020_11_01.models.PrivateEndpointConnection or
IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_11_01.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2020-11-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "PrivateEndpointConnection")
request = build_update_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2020-11-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
@distributed_trace_async
async def begin_delete(
self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a private endpoint connection.
Deletes the private endpoint connection in the specified managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2020-11-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
| [
"[email protected]"
] | |
874eb69589c1c8e3384d0af1565f3fb7bbaa4eb5 | 19da1a56f137a08772c347cf974be54e9c23c053 | /lib/adafruit_bitmap_font/bitmap_font.py | b958a6d5e9f99646e075e137ab049f2028ea3062 | [] | no_license | mk53202/mk53202-timeclock-pyportal | d94f45a9d186190a4bc6130077baa6743a816ef3 | 230a858d429f8197c00cab3e67dcfd3b295ffbe0 | refs/heads/master | 2021-02-04T05:38:25.533292 | 2020-02-27T22:45:56 | 2020-02-27T22:45:56 | 243,626,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,343 | py | # The MIT License (MIT)
#
# Copyright (c) 2019 Scott Shawcroft for Adafruit Industries LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_bitmap_font.bitmap_font`
====================================================
Loads bitmap glyphs from a variety of font.
* Author(s): Scott Shawcroft
Implementation Notes
--------------------
**Hardware:**
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
__version__ = "1.0.3"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_Bitmap_Font.git"
def load_font(filename, bitmap=None):
"""Loads a font file. Returns None if unsupported."""
if not bitmap:
import displayio
bitmap = displayio.Bitmap
font_file = open(filename, "rb")
first_four = font_file.read(4)
#print(first_four)
if filename.endswith("bdf") and first_four == b"STAR":
from . import bdf
return bdf.BDF(font_file, bitmap)
if filename.endswith("pcf") and first_four == b"\x01fcp":
import pcf
return pcf.PCF(font_file)
if filename.endswith("ttf") and first_four == b"\x00\x01\x00\x00":
import ttf
return ttf.TTF(font_file)
return None
| [
"[email protected]"
] | |
9c97389a9de5ea9c3f054a8cc24a3d65143ec055 | 3da991a057cd81de802c40da2edd640878685258 | /test/test_op_normalization.py | c706451847070bc4b6095de92fb86e51b8490ca8 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | sjx0451/pytorch | 9f5b1c0c7c874f9da72c0190dc131944ba828ab7 | 3544f60f7602081398ee62bc5d652a87f4743dab | refs/heads/master | 2022-12-01T22:30:29.888370 | 2020-08-13T23:45:58 | 2020-08-13T23:48:31 | 287,421,291 | 2 | 0 | NOASSERTION | 2020-08-14T02:06:11 | 2020-08-14T02:06:11 | null | UTF-8 | Python | false | false | 4,347 | py | import torch
from torch.testing import FileCheck
from torch.testing._internal.common_utils import \
(run_tests)
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, skipCPUIfNoLapack, skipCUDAIfNoMagma)
# Information for generating an alias test
# NOTE: ending the alias_name with an underscore will interpret the test
# as the test for an inplace method of that name
class AliasInfo(object):
__slots__ = ['alias_name', 'alias_op', 'original_name', 'input', 'args', 'decorators']
def __init__(self,
alias_name, # the name of the alias
alias_op, # the aliased op
original_name, # the name of the original function
input, # the first tensor argument to the op
*,
args=(), # tuple of additional positional arguments
decorators=()): # decorators to apply to the test
self.alias_name = alias_name
self.alias_op = alias_op
self.original_name = original_name
self.input = input
self.args = args
self.decorators = decorators
alias_infos = (
AliasInfo('absolute', torch.absolute, 'abs',
torch.randn(20) - .5),
AliasInfo('absolute_', torch.Tensor.absolute_, 'abs_',
torch.randn(20) - .5),
AliasInfo('clip', torch.clip, 'clamp',
torch.randn(20), args=(.4, .6)),
AliasInfo('clip_', torch.Tensor.clip_, 'clamp_',
torch.randn(20), args=(.4, .6)),
AliasInfo('linalg.det', torch.linalg.det, 'det',
torch.randn(10, 10), decorators=(skipCPUIfNoLapack, skipCUDAIfNoMagma)),
AliasInfo('outer', torch.outer, 'ger',
torch.randn(20), args=(torch.randn(20),))
)
# Placeholder test class for validating that aliases are correctly
# translated when scripted and traced
class TestOpNormalization(JitTestCase):
pass
# Generates alias tests and adds them to the specified class (cls)
def create_alias_tests(cls):
for info in alias_infos:
@torch.no_grad()
def _test(self, device, info=info):
tensor = torch.tensor
op = info.alias_op
is_inplace = info.alias_name.endswith('_')
# Checks that scripting converts aliases
# NOTE: the code to test scripting must be generated since
# scripting does not support splatting args or directly
# calling torch.Tensor methods. The following
# splats args after the first tensor by inlining them as constants.
if is_inplace:
fn_template = '''
def _fn(t):
return t.{alias_name}({args})
'''
arg_string = ', '.join((str(arg) for arg in info.args))
script = fn_template.format(alias_name=info.alias_name, args=arg_string)
else:
fn_template = '''
def _fn(t):
return op(t{args})
'''
arg_string = ", " + ', '.join((str(arg) for arg in info.args))
script = fn_template.format(args=arg_string)
# Compiles script
scripted = torch.jit.CompilationUnit(script)._fn
# Acquires and checks the graph remaps the alias
scripted(info.input)
graph = scripted.graph_for(info.input)
FileCheck().check(info.original_name).check_not(info.alias_name).run(graph)
# Checks that tracing converts aliases
# NOTE: tracing has no problem splatting args
def _fn(t):
return info.alias_op(t, *info.args)
traced = torch.jit.trace(_fn, (info.input,))
traced(info.input)
graph = traced.graph_for(info.input)
FileCheck().check(info.original_name).check_not(info.alias_name).run(graph)
# Applies decorators
for decorator in info.decorators:
_test = decorator(_test)
test_name = "test_alias_" + info.alias_name
setattr(cls, test_name, _test)
create_alias_tests(TestOpNormalization)
instantiate_device_type_tests(TestOpNormalization, globals())
if __name__ == '__main__':
run_tests()
| [
"[email protected]"
] | |
ca558233180fa9b596dcb99d1b085674e4516699 | e12edf6cde9670eb3d4f1596cd648fddefaf480f | /acm-practice/2017-10-26/D_suspension_binary_search.py | 1661f660b42de137344239ea94ac4938ef6967cb | [] | no_license | VitamintK/AlgorithmProblems | 7a7786a0377a236f5cc82ae3b623ecad7f0eb025 | 34da53e2e7d3b5964bafd1f3edb2e00dea9a729d | refs/heads/master | 2023-06-29T00:56:53.360829 | 2023-06-14T16:33:14 | 2023-06-14T16:33:14 | 25,510,479 | 11 | 4 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | d, s = map(int, input().split())
l = 0
r = 123456789
eps = 0.000001
import math
for i in range(123456):
a = (r+l)/2
term = (d/(2*a))
sag = 0.5*a*(math.exp(term) + math.exp(-term)) - a
if sag < s:
r = a
else:
l = a
term = d/(2*l)
print(l * (math.exp(term) - math.exp(-term)))
| [
"[email protected]"
] | |
99d79ab61d8a2d1d313f004be2ed3b8742bb4c89 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/postfix/for/topLevel.py | f1b3ddf673f84c9ab1c8ffb03a662bc644f7fe85 | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 13 | py | [].for<caret> | [
"[email protected]"
] | |
96f75f958e7146a11ac99d2224a3a125a48406dc | 60ca69e2a4c6b05e6df44007fd9e4a4ed4425f14 | /grand_contest/037/A.py | 96f513b224075418d215cfb1e943c483fd3a12f5 | [
"MIT"
] | permissive | FGtatsuro/myatcoder | 12a9daafc88efbb60fc0cd8840e594500fc3ee55 | 25a3123be6a6311e7d1c25394987de3e35575ff4 | refs/heads/master | 2021-06-13T15:24:07.906742 | 2021-05-16T11:47:09 | 2021-05-16T11:47:09 | 195,441,531 | 0 | 0 | MIT | 2021-05-16T11:47:10 | 2019-07-05T16:47:58 | Python | UTF-8 | Python | false | false | 1,445 | py | import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
s = input().strip()
if len(s) == 1:
print(0)
sys.exit(0)
if len(s) == 2 and s[0] == s[1]:
print(0)
sys.exit(0)
# [i][0]: index iが1文字分割
# [i][1]: index iが2文字分割
dp = [[0] * 2 for _ in range(len(s))]
# 3文字以上では1文字/2文字の仮想結果が使われる
# 1文字
dp[0][0] = dp[0][1] = 1
# 2文字
if s[0] == s[1]:
# 実際には常に2文字分割になる
dp[1][0] = dp[1][1] = 1
else:
dp[1][0] = 2
dp[1][1] = 1
# 3文字以上
for i in range(2, len(s)):
# [i][0]: 末尾の分割を1文字にする場合
# 末尾2文字が同じ場合
# - 1つ前を2文字分割にしなくてはならない
if s[i] == s[i-1]:
dp[i][0] = dp[i-1][1] + 1
# 末尾2文字が異なる場合
# - 1つ前が1文字分割
# - 1つ前が2文字分割
else:
dp[i][0] = max(dp[i-1][0] + 1, dp[i-1][1] + 1)
# [i][1]: 末尾の分割を2文字にする場合
# 末尾前の2文字が末尾の2文字と同じ場合
# 末尾前を1文字分割にしなくてはならない
if s[i-3:i-1] == s[i-1:i+1]:
dp[i][1] = dp[i-2][0] + 1
# 末尾前の2文字が末尾の2文字と異なる場合
# - 末尾前が1文字分割
# - 末尾前が2文字分割
else:
dp[i][1] = max(dp[i-2][0] + 1, dp[i-2][1] + 1)
print(max(dp[-1][0], dp[-1][1]))
| [
"[email protected]"
] | |
03ba7abe47bcab7d9872f3847417428cf3b2f187 | cc2bb9ccc66783ac7d37454e4784df5e4a2d80f4 | /server/mysqldb.py | f2feb13c8fbf3f3239e7424de6323d998304ed62 | [] | no_license | ronnyzh/Tornado_Server | f308b7e9c2112167b04cbe324e37b1f891999187 | 42112d39e4dea128d059dbfa53c410f3774dc4b1 | refs/heads/master | 2021-05-22T22:10:26.694262 | 2020-04-04T23:39:14 | 2020-04-04T23:39:14 | 253,118,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | # -*- coding:utf-8 -*-
# !/bin/python
"""
Author: ronnyzh
Date: 2019/10/15
Revision: 1.0.0
Description: Description
"""
from configs import CONFIGS
from model.model_mysql import MySQLdb
from model.model_asyn_mysql import Async_Mysql
import tornado.ioloop
from public.public_logger import *
mysql_logger = getHandlerLogger(fileLabel='mysql', loggerLabel='mysql', level=logging.DEBUG,
handler_types=[Handler_Class.RotatingFile])
mysqlDB = MySQLdb(CONFIGS['mysql'])
async_mysqlDb = Async_Mysql(CONFIGS['async_mysql'], logger=mysql_logger)
tornado.ioloop.IOLoop.current().spawn_callback(async_mysqlDb.createPool_async)
| [
"[email protected]"
] | |
a404e63cb25c5f20ac03b7c7d3a2529e11c00d6f | 1388bcd6de659ffefe97e7e6c2aee685b5e7c534 | /stubs/stubs/Utilityrate4.pyi | 0b9fd269d9178b5f58e6650042aed54b3107d49f | [
"BSD-3-Clause"
] | permissive | BRIK-Engenharia/pysam | a7b4b543131043510023a5c17b057ead0b39d440 | 2a4115f34419edf9776b0bbc7b3f453c958ce734 | refs/heads/master | 2022-12-06T05:15:35.364375 | 2020-09-03T22:59:17 | 2020-09-03T22:59:17 | 297,958,820 | 1 | 0 | BSD-3-Clause | 2020-09-23T12:13:32 | 2020-09-23T12:13:32 | null | UTF-8 | Python | false | false | 6,297 | pyi | class Common(object):
def assign(self):
pass
def export(self) -> Dict[Dict]:
pass
def __init__(self, *args, **kwargs):
pass
analysis_period = float
load_escalation = tuple
rate_escalation = tuple
system_use_lifetime_output = float
ur_annual_min_charge = float
ur_dc_enable = float
ur_dc_flat_mat = tuple
ur_dc_sched_weekday = tuple
ur_dc_sched_weekend = tuple
ur_dc_tou_mat = tuple
ur_ec_sched_weekday = tuple
ur_ec_sched_weekend = tuple
ur_ec_tou_mat = tuple
ur_metering_option = float
ur_monthly_fixed_charge = float
ur_monthly_min_charge = float
ur_nm_yearend_sell_rate = float
ur_sell_eq_buy = float
class TimeSeries(object):
def assign(self):
pass
def export(self) -> Dict[Dict]:
pass
def __init__(self, *args, **kwargs):
pass
gen = tuple
load = tuple
class Financials(object):
def assign(self):
pass
def export(self) -> Dict[Dict]:
pass
def __init__(self, *args, **kwargs):
pass
inflation_rate = float
class AnnualOutput(object):
def assign(self):
pass
def export(self) -> Dict[Dict]:
pass
def __init__(self, *args, **kwargs):
pass
degradation = tuple
class Outputs(object):
def assign(self):
pass
def export(self) -> Dict[Dict]:
pass
def __init__(self, *args, **kwargs):
pass
annual_electric_load = tuple
annual_energy_value = tuple
charge_w_sys_dc_fixed = tuple
charge_w_sys_dc_fixed_ym = tuple
charge_w_sys_dc_tou = tuple
charge_w_sys_dc_tou_ym = tuple
charge_w_sys_ec = tuple
charge_w_sys_ec_apr_tp = tuple
charge_w_sys_ec_aug_tp = tuple
charge_w_sys_ec_dec_tp = tuple
charge_w_sys_ec_feb_tp = tuple
charge_w_sys_ec_jan_tp = tuple
charge_w_sys_ec_jul_tp = tuple
charge_w_sys_ec_jun_tp = tuple
charge_w_sys_ec_mar_tp = tuple
charge_w_sys_ec_may_tp = tuple
charge_w_sys_ec_nov_tp = tuple
charge_w_sys_ec_oct_tp = tuple
charge_w_sys_ec_sep_tp = tuple
charge_w_sys_ec_ym = tuple
charge_w_sys_fixed = tuple
charge_w_sys_fixed_ym = tuple
charge_w_sys_minimum = tuple
charge_w_sys_minimum_ym = tuple
charge_wo_sys_dc_fixed = tuple
charge_wo_sys_dc_fixed_ym = tuple
charge_wo_sys_dc_tou = tuple
charge_wo_sys_dc_tou_ym = tuple
charge_wo_sys_ec = tuple
charge_wo_sys_ec_apr_tp = tuple
charge_wo_sys_ec_aug_tp = tuple
charge_wo_sys_ec_dec_tp = tuple
charge_wo_sys_ec_feb_tp = tuple
charge_wo_sys_ec_jan_tp = tuple
charge_wo_sys_ec_jul_tp = tuple
charge_wo_sys_ec_jun_tp = tuple
charge_wo_sys_ec_mar_tp = tuple
charge_wo_sys_ec_may_tp = tuple
charge_wo_sys_ec_nov_tp = tuple
charge_wo_sys_ec_oct_tp = tuple
charge_wo_sys_ec_sep_tp = tuple
charge_wo_sys_ec_ym = tuple
charge_wo_sys_fixed = tuple
charge_wo_sys_fixed_ym = tuple
charge_wo_sys_minimum = tuple
charge_wo_sys_minimum_ym = tuple
elec_cost_with_system = tuple
elec_cost_with_system_year1 = float
elec_cost_without_system = tuple
elec_cost_without_system_year1 = float
energy_w_sys_ec_apr_tp = tuple
energy_w_sys_ec_aug_tp = tuple
energy_w_sys_ec_dec_tp = tuple
energy_w_sys_ec_feb_tp = tuple
energy_w_sys_ec_jan_tp = tuple
energy_w_sys_ec_jul_tp = tuple
energy_w_sys_ec_jun_tp = tuple
energy_w_sys_ec_mar_tp = tuple
energy_w_sys_ec_may_tp = tuple
energy_w_sys_ec_nov_tp = tuple
energy_w_sys_ec_oct_tp = tuple
energy_w_sys_ec_sep_tp = tuple
energy_wo_sys_ec_apr_tp = tuple
energy_wo_sys_ec_aug_tp = tuple
energy_wo_sys_ec_dec_tp = tuple
energy_wo_sys_ec_feb_tp = tuple
energy_wo_sys_ec_jan_tp = tuple
energy_wo_sys_ec_jul_tp = tuple
energy_wo_sys_ec_jun_tp = tuple
energy_wo_sys_ec_mar_tp = tuple
energy_wo_sys_ec_may_tp = tuple
energy_wo_sys_ec_nov_tp = tuple
energy_wo_sys_ec_oct_tp = tuple
energy_wo_sys_ec_sep_tp = tuple
lifetime_load = tuple
savings_year1 = float
surplus_w_sys_ec_apr_tp = tuple
surplus_w_sys_ec_aug_tp = tuple
surplus_w_sys_ec_dec_tp = tuple
surplus_w_sys_ec_feb_tp = tuple
surplus_w_sys_ec_jan_tp = tuple
surplus_w_sys_ec_jul_tp = tuple
surplus_w_sys_ec_jun_tp = tuple
surplus_w_sys_ec_mar_tp = tuple
surplus_w_sys_ec_may_tp = tuple
surplus_w_sys_ec_nov_tp = tuple
surplus_w_sys_ec_oct_tp = tuple
surplus_w_sys_ec_sep_tp = tuple
utility_bill_w_sys = tuple
utility_bill_w_sys_ym = tuple
utility_bill_wo_sys = tuple
utility_bill_wo_sys_ym = tuple
year1_electric_load = float
year1_hourly_dc_peak_per_period = tuple
year1_hourly_dc_tou_schedule = tuple
year1_hourly_dc_with_system = tuple
year1_hourly_dc_without_system = tuple
year1_hourly_e_fromgrid = tuple
year1_hourly_e_tofromgrid = tuple
year1_hourly_e_togrid = tuple
year1_hourly_ec_tou_schedule = tuple
year1_hourly_ec_with_system = tuple
year1_hourly_ec_without_system = tuple
year1_hourly_p_system_to_load = tuple
year1_hourly_p_tofromgrid = tuple
year1_hourly_salespurchases_with_system = tuple
year1_hourly_salespurchases_without_system = tuple
year1_hourly_system_to_load = tuple
year1_monthly_cumulative_excess_dollars = tuple
year1_monthly_cumulative_excess_generation = tuple
year1_monthly_dc_fixed_with_system = tuple
year1_monthly_dc_fixed_without_system = tuple
year1_monthly_dc_tou_with_system = tuple
year1_monthly_dc_tou_without_system = tuple
year1_monthly_ec_charge_with_system = tuple
year1_monthly_ec_charge_without_system = tuple
year1_monthly_electricity_to_grid = tuple
year1_monthly_fixed_with_system = tuple
year1_monthly_fixed_without_system = tuple
year1_monthly_load = tuple
year1_monthly_minimum_with_system = tuple
year1_monthly_minimum_without_system = tuple
year1_monthly_peak_w_system = tuple
year1_monthly_peak_wo_system = tuple
year1_monthly_use_w_system = tuple
year1_monthly_use_wo_system = tuple
year1_monthly_utility_bill_w_sys = tuple
year1_monthly_utility_bill_wo_sys = tuple
class Utilityrate4(object):
def assign(self, dict):
pass
def value(self, name, value=None):
pass
def execute(self, int_verbosity):
pass
def export(self):
pass
def __getattribute__(self, *args, **kwargs):
pass
def __init__(self, *args, **kwargs):
pass
Common = Common
TimeSeries = TimeSeries
Financials = Financials
AnnualOutput = AnnualOutput
Outputs = Outputs
def default(config) -> Utilityrate4:
pass
def new() -> Utilityrate4:
pass
def wrap(ssc_data_t) -> Utilityrate4:
pass
def from_existing(model, config="") -> Utilityrate4:
pass
__loader__ = None
__spec__ = None
| [
"[email protected]"
] | |
f1c6b8b4f3add343bd97fe50d6c9ac34e1446bc6 | 099c5d0d21de342ad578be0fa06dde6be10b4e95 | /saltcloud/clouds/joyent.py | 5f6729d01b3fa922fc700a5e3345b90c34949cca | [
"Apache-2.0"
] | permissive | lexual/salt-cloud | b09835795a0221c3d283e7e17c60ac68f76ee226 | 063ac2050f27181ea6da8e3ece528974f8284b72 | refs/heads/master | 2021-01-17T12:21:01.688573 | 2012-08-13T19:46:19 | 2012-08-13T19:46:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,900 | py | '''
Joyent Cloud Module
===================
The Joyent Cloud module is used to intereact with the Joyend cloud system
it requires that the username and password to the joyent accound be configured
.. code-block:: yaml
# The Joyent login user
JOYENT.user: fred
# The Joyent user's password
JOYENT.password: saltybacon
# The location of the ssh private key that can log into the new vm
JOYENT.private_key: /root/joyent.pem
'''
# The import section is mostly libcloud boilerplate
# Import python libs
import os
import subprocess
import types
# Import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.compute.deployment import MultiStepDeployment, ScriptDeployment, SSHKeyDeployment
# Import generic libcloud functions
import saltcloud.utils
from saltcloud.libcloudfuncs import *
# Some of the libcloud functions need to be in the same namespace as the
# functions defined in the module, so we create new function objects inside
# this module namespace
avail_images = types.FunctionType(avail_images.__code__, globals())
avail_sizes = types.FunctionType(avail_sizes.__code__, globals())
script = types.FunctionType(script.__code__, globals())
destroy = types.FunctionType(destroy.__code__, globals())
list_nodes = types.FunctionType(list_nodes.__code__, globals())
# Only load in this module is the JOYENT configurations are in place
def __virtual__():
'''
Set up the libcloud functions and check for JOYENT configs
'''
if 'JOYENT.user' in __opts__ and 'JOYENT.password' in __opts__:
return 'joyent'
return False
def get_conn():
'''
Return a conn object for the passed vm data
'''
driver = get_driver(Provider.JOYENT)
return driver(
__opts__['JOYENT.user'],
__opts__['JOYENT.password'],
)
def create(vm_):
'''
Create a single vm from a data dict
'''
print('Creating Cloud VM {0}'.format(vm_['name']))
conn = get_conn()
deploy_script = script(vm_)
kwargs = {}
kwargs['name'] = vm_['name']
kwargs['image'] = get_image(conn, vm_)
kwargs['size'] = get_size(conn, vm_)
data = conn.create_node(**kwargs)
if saltcloud.utils.wait_for_ssh(data.public_ips[0]):
cmd = ('ssh -oStrictHostKeyChecking=no -t -i {0} {1}@{2} '
'"{3}"').format(
__opts__['JOYENT.private_key'],
'root',
data.public_ips[0],
deploy_script.script,
)
subprocess.call(cmd, shell=True)
else:
print('Failed to start Salt on Cloud VM {0}'.format(vm_['name']))
print('Created Cloud VM {0} with the following values:'.format(
vm_['name']
))
for key, val in data.__dict__.items():
print(' {0}: {1}'.format(key, val))
| [
"[email protected]"
] | |
d9b758cc0246650a55d4873c5080463895e12575 | 6e7b1305887d25ae60251ce54ed2b94dc37ea06c | /Ecommerce/shopping/migrations/0008_order_product.py | 310c90465104e681244c4f32141b478425d05d25 | [] | no_license | Aadeshkale/assignment | 5bedaa95c6d2457d87b239117259c2a17d765c0f | bac0fa5523e3e6179dfe907f493a677adda7993b | refs/heads/master | 2023-01-08T04:11:14.847962 | 2020-11-12T09:37:15 | 2020-11-12T09:37:15 | 304,293,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,723 | py | # Generated by Django 3.1.1 on 2020-10-14 07:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('shopping', '0007_auto_20201011_1315'),
]
operations = [
migrations.CreateModel(
name='order_product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fullname', models.CharField(max_length=100, null=True)),
('house_no', models.CharField(blank=True, max_length=100, null=True)),
('area_name', models.CharField(max_length=100, null=True)),
('city', models.CharField(max_length=100, null=True)),
('state', models.CharField(max_length=100, null=True)),
('email', models.CharField(blank=True, max_length=100, null=True)),
('pincode', models.CharField(max_length=100, null=True)),
('mob1', models.CharField(max_length=100, null=True)),
('mob2', models.CharField(blank=True, max_length=100, null=True)),
('status', models.CharField(max_length=100, null=True)),
('landmark', models.CharField(max_length=100, null=True)),
('pro', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='shopping.product')),
('usr', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
d45f5f1e80eb49d2b796c96c0f6f74279719c04f | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/Calorimeter/CaloCondPhysAlgs/share/CaloNoise2Ntuple_data.py | b272eccf19df896d9085998da90333c610274a83 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,388 | py | ###############################################################
#
# Job options file for CaloNoise2Ntuple
#
#==============================================================
# configuration for data, read noise from database through CaloNoiseToolDB
if 'RunNumber' not in dir():
RunNumber = 258914
if 'LumiBlock' not in dir():
LumiBlock = 1
if 'GlobalTag' not in dir():
GlobalTag = 'CONDBR2-ES1PA-2015-04'
if 'Geometry' not in dir():
Geometry = 'ATLAS-R2-2015-03-01-00'
if 'outputNtuple' not in dir():
outputNtuple="cellnoise_data.root"
from RecExConfig.RecFlags import rec
rec.RunNumber.set_Value_and_Lock(RunNumber)
from PerfMonComps.PerfMonFlags import jobproperties
jobproperties.PerfMonFlags.doMonitoring = True
from AthenaCommon.Resilience import treatException,protectedInclude
protectedInclude( "PerfMonComps/PerfMonSvc_jobOptions.py" )
from AthenaCommon.DetFlags import DetFlags
DetFlags.all_setOff()
DetFlags.LAr_setOn()
DetFlags.Tile_setOn()
DetFlags.digitize.all_setOff()
from AthenaCommon.GlobalFlags import globalflags
globalflags.DetGeo.set_Value_and_Lock('atlas')
globalflags.DataSource.set_Value_and_Lock('data')
from CaloTools.CaloNoiseFlags import jobproperties
jobproperties.CaloNoiseFlags.FixedLuminosity.set_Value_and_Lock(-1.)
import AthenaCommon.AtlasUnixGeneratorJob
# Get a handle to the default top-level algorithm sequence
from AthenaCommon.AppMgr import ToolSvc
from AthenaCommon.AlgSequence import AlgSequence
topSequence = AlgSequence()
# Get a handle to the ServiceManager
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
# Get a handle to the ApplicationManager
from AthenaCommon.AppMgr import theApp
# Setup Db stuff
import AthenaPoolCnvSvc.AthenaPool
from AthenaCommon.GlobalFlags import jobproperties
jobproperties.Global.DetDescrVersion=Geometry
from AtlasGeoModel import SetGeometryVersion
from AtlasGeoModel import GeoModelInit
include( "CaloDetMgrDetDescrCnv/CaloDetMgrDetDescrCnv_joboptions.py")
include( "CaloIdCnv/CaloIdCnv_joboptions.py" )
include( "TileIdCnv/TileIdCnv_jobOptions.py" )
include( "LArDetDescr/LArDetDescr_joboptions.py" )
include("TileConditions/TileConditions_jobOptions.py" )
include("LArConditionsCommon/LArConditionsCommon_comm_jobOptions.py")
svcMgr.IOVDbSvc.GlobalTag = GlobalTag
from CaloTools.CaloNoiseToolDefault import CaloNoiseToolDefault
theCaloNoiseTool = CaloNoiseToolDefault()
theCaloNoiseTool.RescaleForHV=False
ToolSvc += theCaloNoiseTool
if "dbNoise" in dir():
conddb.addMarkup("/LAR/NoiseOfl/CellNoise","<db>"+dbNoise+"</db>")
if "folderTag" in dir():
conddb.addOverride("/LAR/NoiseOfl/CellNoise",folderTag)
#--------------------------------------------------------------
# Private Application Configuration options
#--------------------------------------------------------------
from CaloCondPhysAlgs.CaloCondPhysAlgsConf import CaloNoise2Ntuple
theCaloNoise2Ntuple = CaloNoise2Ntuple("CaloNoise2Ntuple")
theCaloNoise2Ntuple.noiseTool = theCaloNoiseTool
topSequence += theCaloNoise2Ntuple
#--------------------------------------------------------------
#--- Dummy event loop parameters
#--------------------------------------------------------------
svcMgr.EventSelector.RunNumber = RunNumber
svcMgr.EventSelector.EventsPerRun = 1
svcMgr.EventSelector.FirstEvent = 0
svcMgr.EventSelector.EventsPerLB = 1
svcMgr.EventSelector.FirstLB = LumiBlock
svcMgr.EventSelector.InitialTimeStamp = 0
svcMgr.EventSelector.TimeStampInterval = 5
svcMgr.EventSelector.OverrideRunNumber=True
theApp.EvtMax = 1
# ------------------------------------------------------------------
# --- Ntuple
# ------------------------------------------------------------------
if not hasattr(ServiceMgr, 'THistSvc'):
from GaudiSvc.GaudiSvcConf import THistSvc
ServiceMgr += THistSvc()
ServiceMgr.THistSvc.Output = ["file1 DATAFILE='"+outputNtuple+"' OPT='RECREATE'"];
#--------------------------------------------------------------
# Set output level threshold (1=VERBOSE, 2=DEBUG, 3=INFO, 4=WARNING, 5=ERROR, 6=FATAL )
#--------------------------------------------------------------
svcMgr.MessageSvc.OutputLevel = INFO
svcMgr.MessageSvc.debugLimit = 100000
svcMgr.MessageSvc.infoLimit = 100000
svcMgr.MessageSvc.Format = "% F%30W%S%7W%R%T %0W%M"
svcMgr.IOVDbSvc.OutputLevel = INFO
| [
"[email protected]"
] | |
877ca248005210bb37f38a0f37109cfac25b2dbd | 61a21ed2dcdfe9a43588c5582eea38ce8fdfcbf2 | /akshare/charity/__init__.py | c5707908556063f7183a0abfe452b67f4dab4660 | [
"MIT"
] | permissive | huanghyw/akshare | 44187c6c56872d499651bb62c178ee837c776388 | ed84e937773c0420cc003793d74b73e64223e08b | refs/heads/master | 2023-04-22T07:06:08.929307 | 2021-05-02T16:05:59 | 2021-05-02T16:05:59 | 319,346,216 | 13 | 5 | MIT | 2021-05-02T16:05:59 | 2020-12-07T14:32:08 | null | UTF-8 | Python | false | false | 81 | py | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/2/21 16:03
Desc:
"""
| [
"[email protected]"
] | |
916d8ba379bd9fe0dcfbd4758f028f88f55562fa | d044e88e622d9f4ca350aa4fd9d95d7ba2fae50b | /application/budget/migrations/0005_auto_20210518_1730.py | 2650551c2c42b96e2b7524ac633bd6e6f8c70b2f | [] | no_license | Tiny-Hands/tinyhands | 337d5845ab99861ae189de2b97b8b36203c33eef | 77aa0bdcbd6f2cbedc7eaa1fa4779bb559d88584 | refs/heads/develop | 2023-09-06T04:23:06.330489 | 2023-08-31T11:31:17 | 2023-08-31T11:31:17 | 24,202,150 | 7 | 3 | null | 2023-08-31T11:31:18 | 2014-09-18T19:35:02 | PLpgSQL | UTF-8 | Python | false | false | 683 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2021-05-18 17:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('budget', '0004_auto_20210120_1903'),
]
operations = [
migrations.AlterField(
model_name='otherbudgetitemcost',
name='form_section',
field=models.IntegerField(blank=True, null=True, verbose_name=[(1, 'Travel'), (2, 'Miscellaneous'), (3, 'Awareness'), (5, 'Potential Victim Care'), (7, 'Communication'), (8, 'Staff & Benefits'), (10, 'Administration'), (11, 'Past Month Sent Money')]),
),
]
| [
"[email protected]"
] | |
49f3bee05885e479558b18dcff7a038de7a5e3ba | 250db406ad4a62e3d576e55b979bcfdc3407f226 | /Leetcode分类/1. Array /Leetcode_27 Remove Element/my_solution.py | eb387a47da03f53268d0f9347d5e5e9c16df2127 | [] | no_license | chenshanghao/Interview_preparation | 0830f0e461a2fe287b8ec24ae761974f50268767 | 4e7701d32990604c16ba18a8083c2108c0232306 | refs/heads/master | 2020-04-25T02:36:19.499364 | 2019-06-10T04:51:00 | 2019-06-10T04:51:00 | 172,446,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | class Solution(object):
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
index = 0
for i in range(len(nums)):
if nums[i] != val:
nums[index] = nums[i]
index += 1
return index
| [
"[email protected]"
] | |
f37cd5437e19fb4a0472381b67b5f554307c24ad | 1e263d605d4eaf0fd20f90dd2aa4174574e3ebce | /components/ally-http/__setup__/ally_http/__init__.py | b06c5790549446b5d6d3390a164ae4559aab9422 | [] | no_license | galiminus/my_liveblog | 698f67174753ff30f8c9590935d6562a79ad2cbf | 550aa1d0a58fc30aa9faccbfd24c79a0ceb83352 | refs/heads/master | 2021-05-26T20:03:13.506295 | 2013-04-23T09:57:53 | 2013-04-23T09:57:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,077 | py | '''
Created on Jul 15, 2011
@package: ally http
@copyright: 2012 Sourcefabric o.p.s.
@license: http://www.gnu.org/licenses/gpl-3.0.txt
@author: Gabriel Nistor
Contains setup and configuration files for the HTTP REST server.
'''
from ally.container import ioc
# --------------------------------------------------------------------
NAME = 'ally HTTP'
GROUP = 'ally'
VERSION = '1.0'
DESCRIPTION = 'Provides the HTTP communication support'
# --------------------------------------------------------------------
# The default configurations
@ioc.config
def server_type() -> str:
'''
The type of the server to use, the options are:
"basic"- single threaded server, the safest but slowest server to use.
'''
return 'basic'
@ioc.config
def server_host() -> str:
'''The IP address to bind the server to, something like 127.0.0.1'''
return '0.0.0.0'
@ioc.config
def server_port() -> int:
'''The port on which the server will run'''
return 8080
@ioc.config
def server_version() -> str:
'''The server version name'''
return 'Ally/0.1'
| [
"[email protected]"
] | |
fdf0029733f0c29f70e2242919078fe8131e8b6b | f428482945cf11d0fa17aa1a0607f43ec8427614 | /run_profile.py | 3a8a3189a42f462a73a5eb880fb0a24e0f0f8f45 | [] | no_license | SomervilleJesusBall/KebleBall | 22e3367797a0a9f740271dff40d5359e69a80f9d | 09a7d9c6b86365c31827bfd44fa50d4527a646e4 | refs/heads/master | 2021-01-22T05:23:55.882724 | 2016-03-13T18:28:09 | 2016-04-14T12:17:45 | 47,200,067 | 0 | 0 | null | 2015-12-01T15:56:32 | 2015-12-01T15:56:31 | null | UTF-8 | Python | false | false | 469 | py | #! /usr/bin/env python2
# coding: utf-8
"""Executable to run Eisitirio with profiling."""
from __future__ import unicode_literals
from werkzeug.contrib import profiler
from eisitirio import app
from eisitirio import system # pylint: disable=unused-import
APP = app.APP
APP.config.from_pyfile('config/development.py')
APP.config['PROFILE'] = True
APP.wsgi_app = profiler.ProfilerMiddleware(APP.wsgi_app, restrictions=[30])
if __name__ == '__main__':
APP.run()
| [
"[email protected]"
] | |
0901587cb483f5f586d862ad701e89b6273493d3 | 78649dd3fdfafc3edb7ef5b0de52096846cd9c28 | /networking_mlnx_baremetal/ufmclient/session.py | 4e246c17d2045be65610b4e4a49a1dd690b63064 | [
"Apache-2.0"
] | permissive | IamFive/networking-mlnx-baremetal | 38c99b127a7b08850e9ce5c83f0a6539ec4fe9b6 | 8d65405a8546803f903cadd0cf0818007a1d0119 | refs/heads/master | 2023-02-02T05:47:02.166816 | 2020-12-02T08:01:40 | 2020-12-02T12:29:02 | 296,988,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,375 | py | # Copyright 2020 HuaWei Technologies. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import requests
from requests.auth import HTTPBasicAuth
from networking_mlnx_baremetal.ufmclient import constants
from networking_mlnx_baremetal.ufmclient import exceptions
LOG = logging.getLogger(__name__)
HEAD = 'HEAD'
"""http method HEAD"""
GET = 'GET'
"""http method get"""
POST = 'POST'
"""http method POST"""
PATCH = 'PATCH'
"""http method PATCH"""
PUT = 'PUT'
"""http method PUT"""
DELETE = 'DELETE'
class UfmSession(object):
"""UFM REST API session"""
# Default timeout in seconds for requests connect and read
# http://docs.python-requests.org/en/master/user/advanced/#timeouts
_DEFAULT_TIMEOUT = 60
def __init__(self, endpoint, username, password, verify_ca, timeout=None):
self.endpoint = endpoint
self.base_url = '%s/ufmRest' % endpoint
self._timeout = timeout if timeout else self._DEFAULT_TIMEOUT
# Initial request session
self._session = requests.Session()
self._session.verify = verify_ca
self._session.auth = HTTPBasicAuth(username, password)
from networking_mlnx_baremetal import __version__ as version
self._session.headers.update({
'User-Agent': 'python-ufmclient - v%s' % version
})
def get_url(self, path):
"""get absolute URL for UFM REST API resource
:param path: path of resource, can be relative path or absolute path
:return:
"""
if path.startswith(self.base_url):
return path
elif path.startswith('/ufmRest'):
return '%s%s' % (self.endpoint, path)
else:
return '%s%s' % (self.base_url, path)
def get(self, url, headers=None):
return self.request(GET, url, headers=headers)
def post(self, url, payload, headers=None):
return self.request(POST, url, json=payload, headers=headers)
def put(self, url, payload, headers=None):
return self.request(PUT, url, json=payload, headers=headers)
def patch(self, url, payload, headers=None):
return self.request(PATCH, url, json=payload, headers=headers)
def delete(self, url, headers=None):
return self.request(DELETE, url, headers=headers)
def request(self, method, url, json=None, headers=None):
try:
url = self.get_url(url)
return self._request(method, url, json=json, headers=headers)
except requests.exceptions.RequestException as e:
response = e.response
if response is not None:
LOG.warning('UFM responses -> %(method)s %(url)s, '
'code: %(code)s, response: %(resp_txt)s',
{'method': method, 'url': url,
'code': response.status_code,
'resp_txt': response.content})
raise exceptions.raise_for_response(method, url, response)
else:
raise exceptions.UfmConnectionError(url=url, error=e)
def _request(self, method, url, json=None, headers=None):
if method.upper() in [constants.POST, constants.PATCH, constants.PUT]:
headers = headers or {}
headers.update({constants.HEADER_CONTENT_TYPE: 'application/json'})
req = requests.Request(method, url, json=json, headers=headers)
prepped_req = self._session.prepare_request(req)
res = self._session.send(prepped_req, timeout=self._timeout)
res.raise_for_status()
LOG.debug('UFM responses -> %(method)s %(url)s, code: %(code)s, '
'content:: %(content)s',
{'method': method, 'url': url, 'code': res.status_code,
'content': res.text})
return res
| [
"[email protected]"
] | |
bf3d53245cc918b53d410fb4e30485d53f1b055f | 98d34935bfa9b709c07df539267daa6f3a6db880 | /kikar_hamedina/mks/factories/member_factory.py | b99cce744d9c25d90690e045e9aff5fd62a4e562 | [] | no_license | hasadna/kikar-hamedina | c4a0e939fdafb1f8d187db1be35aba5fde2350be | d08e9231fd4c91c4024ced26b760b87f93bb8607 | refs/heads/dev | 2020-12-25T18:04:22.817008 | 2019-03-30T15:27:13 | 2019-03-30T15:27:13 | 18,186,117 | 12 | 36 | null | 2019-03-30T15:28:06 | 2014-03-27T18:11:01 | Python | UTF-8 | Python | false | false | 373 | py | import factory
from mks import models
from party_factory import PartyFactory
class MemberFactory(factory.DjangoModelFactory):
class Meta:
model = models.Member
name = factory.sequence(lambda n: u"Name {}".format(n))
name_en = factory.sequence(lambda n: u"Name {}".format(n))
current_party = factory.SubFactory(PartyFactory)
is_current = True
| [
"[email protected]"
] | |
92faa3cc6c9b46d19d31350aa17bf68325786ac2 | d1c3a9a4289b1aa262285b5de5084f3074893703 | /games/forms.py | 2465c9d8ef21a1a536c51afe892d02c167bedc86 | [
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] | permissive | Code-Institute-Submissions/stream-three-project-1 | a9ce00a608b5d12d0c4ef48546c265f0110fb55e | 96a5718a22f57b908ea5eb76298ceffdb1f17c8b | refs/heads/master | 2020-03-21T11:13:19.873566 | 2018-06-24T14:57:31 | 2018-06-24T14:57:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | from django import forms
from datetime import datetime
# Form to select a season when viewing the league standings. Just a simple select field to choose a year.
class SeasonSelectForm(forms.Form):
SEASON_OPTIONS = (
(year, year) for year in range(2000, datetime.now().year+1)
)
season = forms.ChoiceField(initial=datetime.now().year, choices=SEASON_OPTIONS)
| [
"[email protected]"
] | |
f43a8030903ababf84b372f2a71583c30458595b | 49273a7e6e0d4726f38fab1c430b86dbfc4b2345 | /leetcode/p49.py | ffbeedf32f08e603a01ecbb7f7ff820bb024d919 | [] | no_license | adqz/interview_practice | d16d8c56436dde1f7fa96dc0d8dcc827295e0ff0 | f55fb9c0a39c2482c98cc452c185a938a59ad57c | refs/heads/master | 2023-01-11T01:30:03.353498 | 2023-01-03T14:48:08 | 2023-01-03T14:48:08 | 207,520,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,289 | py | '''
@author: adnan
Problem No. 98. Validate Binary Search Tree (Medium)
Runtime: 40 ms, faster than 96.02% of Python3 online submissions for Validate Binary Search Tree.
Memory Usage: 15.1 MB, less than 100.00% of Python3 online submissions for Validate Binary Search Tree.
'''
from typing import List
import tree_visualizer
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def isValidBST(self, root: TreeNode, Min=None, Max=None) -> bool:
if root:
if Min!=None and root.val <= Min:
return False
if Max!=None and root.val >= Max:
return False
if root.left and not(self.isValidBST(root.left, Min, root.val)):
return False
if root.right and not(self.isValidBST(root.right, root.val, Max)):
return False
return True
if __name__ == '__main__':
sol = Solution()
root = tree_visualizer.deserialize('[5,1,4,null,null,3,6]')
ans = sol.isValidBST(root)
print(f'ans = {ans}')
root = tree_visualizer.deserialize('[2,1,3]')
ans = sol.isValidBST(root)
print(f'ans = {ans}')
root = tree_visualizer.deserialize('[1,null,1]')
ans = sol.isValidBST(root)
print(f'ans = {ans}')
tree_visualizer.drawtree(root) | [
"[email protected]"
] | |
07a49fac0cb7ec2461404a59bf4502820bac0d55 | fcccdb133bf5611c69781080fdbcbb9f4b70c222 | /input/parameters/lithium-ion/experimental_functions/electrolyte_conductivity.py | 40a418d3a98ec2576698a3f0b4f8afde213c94fe | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | galvanic653960572/PyBaMM | d04036e9e0fec12ceb1d9b4b50cfb3bcfe25f3f1 | 4869d358b3452c7ca627d713823a67fdfdafa4bd | refs/heads/master | 2020-04-09T05:04:28.951580 | 2018-12-03T15:20:44 | 2018-12-03T15:21:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | def lfp_cond(c):
"""
Conductivity of LiPF6 in EC:DMC as in Newman's Dualfoil code. This
function is in dimensional form.
Parameters
----------
c: double
lithium-ion concentration
"""
c = c / 1000
sigma_e = 0.0911 + 1.9101 * c - 1.052 * c ** 2 + 0.1554 * c ** 3
return sigma_e
| [
"[email protected]"
] | |
e08622954352b9e1b6540769c6217dd480ef770c | 7ab41799fd38489c93282f1beb3b20e7ef8ff165 | /python/111.py | ec32bd42ef24b2c21f576226dc84e6a8613f5d9a | [] | no_license | scturtle/leetcode-sol | 86c4095df6b31a9fcad683f2d63669ce1691633c | e1a9ce5d9b8fe4bd11e50bd1d5ba1933de845db7 | refs/heads/master | 2020-04-23T00:01:37.016267 | 2015-11-21T04:15:27 | 2015-11-21T04:15:27 | 32,385,573 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | class Solution(object):
def minDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root is None:
return 0
if root.left is None:
return 1 + self.minDepth(root.right)
if root.right is None:
return 1 + self.minDepth(root.left)
return 1 + min(self.minDepth(root.left), self. minDepth(root.right))
| [
"[email protected]"
] | |
f58e05ef882af135fa092cb0991f14a2e01dd968 | d98883fe1007111b8795ac5661e56758eca3b62e | /google-cloud-sdk/lib/surface/lifesciences/workflows/run.py | 92eaf4895e9c83f71c66d593c7df7c99f4b867d9 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | pyDeb/KindnessCafe | 7303464e3c0693b0586a4a32740d8b9b19299caf | 6ff8dfe338aefd986edf67c382aff1a2920945d1 | refs/heads/master | 2022-12-29T16:16:35.796387 | 2021-04-19T00:03:14 | 2021-04-19T00:03:14 | 243,533,146 | 3 | 4 | null | 2022-12-08T09:48:09 | 2020-02-27T14:01:16 | Python | UTF-8 | Python | false | false | 18,047 | py | # -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of gcloud lifesciences workflows run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import base64
from googlecloudsdk.api_lib import lifesciences as lib
from googlecloudsdk.api_lib.lifesciences import exceptions
from googlecloudsdk.api_lib.lifesciences import lifesciences_util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope.concepts import concepts
from googlecloudsdk.command_lib.util.apis import yaml_data
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.command_lib.util.concepts import concept_parsers
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.util import files
import six
CLOUD_SDK_IMAGE = 'google/cloud-sdk:slim'
SHARED_DISK = 'gcloud-shared'
class _SharedPathGenerator(object):
def __init__(self, root):
self.root = root
self.index = -1
def Generate(self):
self.index += 1
return '/%s/%s%d' % (SHARED_DISK, self.root, self.index)
def _ValidateAndMergeArgInputs(args):
"""Turn args.inputs and args.inputs_from_file dicts into a single dict.
Args:
args: The parsed command-line arguments
Returns:
A dict that is the merge of args.inputs and args.inputs_from_file
Raises:
files.Error
"""
is_local_file = {}
# If no inputs from file, then no validation or merge needed
if not args.inputs_from_file:
return args.inputs, is_local_file
# Initialize the merged dictionary
arg_inputs = {}
if args.inputs:
# Validate args.inputs and args.inputs-from-file do not overlap
overlap = set(args.inputs.keys()).intersection(
set(args.inputs_from_file.keys()))
if overlap:
raise exceptions.LifeSciencesError(
'--{0} and --{1} may not specify overlapping values: {2}'
.format('inputs', 'inputs-from-file', ', '.join(overlap)))
# Add the args.inputs
arg_inputs.update(args.inputs)
# Read up the inputs-from-file and add the values from the file
for key, value in six.iteritems(args.inputs_from_file):
arg_inputs[key] = files.ReadFileContents(value)
is_local_file[key] = True
return arg_inputs, is_local_file
# TODO(b/137185310): Make this visible once the API is public.
@base.Hidden
class Run(base.SilentCommand):
r"""Defines and runs a pipeline.
A pipeline is a transformation of a set of inputs to a set of outputs.
Supports Docker-based commands.
## EXAMPLES
To run a pipeline described in the `pipeline.json` file, run:
$ {command} --pipeline-file=pipeline.json
"""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
location_spec = concepts.ResourceSpec.FromYaml(
yaml_data.ResourceYAMLData.FromPath('lifesciences.location')
.GetData())
concept_parsers.ConceptParser.ForResource(
'--location',
location_spec,
'The Google Cloud location to run the pipeline.',
required=True).AddToParser(parser)
pipeline = parser.add_mutually_exclusive_group(required=True)
pipeline.add_argument(
'--pipeline-file',
help='''A YAML or JSON file containing a Pipeline object. See
[](https://cloud.google.com/life-sciences/reference/rest/v2beta/workflows#Pipeline)
''')
pipeline.add_argument(
'--command-line',
category=base.COMMONLY_USED_FLAGS,
help='''Command line to run with /bin/sh in the specified
Docker image. Cannot be used with --pipeline-file.''')
parser.add_argument(
'--docker-image',
category=base.COMMONLY_USED_FLAGS,
default=CLOUD_SDK_IMAGE,
help='''A Docker image to run. Requires --command-line to
be specified and cannot be used with --pipeline-file.''')
parser.add_argument(
'--inputs',
category=base.COMMONLY_USED_FLAGS,
metavar='NAME=VALUE',
type=arg_parsers.ArgDict(),
action=arg_parsers.UpdateAction,
help='''Map of input PipelineParameter names to values.
Used to pass literal parameters to the pipeline, and to specify
input files in Google Cloud Storage that will have a localCopy
made. Specified as a comma-separated list: --inputs
file=gs://my-bucket/in.txt,name=hello''')
parser.add_argument(
'--inputs-from-file',
category=base.COMMONLY_USED_FLAGS,
metavar='NAME=FILE',
type=arg_parsers.ArgDict(),
action=arg_parsers.UpdateAction,
help='''Map of input PipelineParameter names to values.
Used to pass literal parameters to the pipeline where values come
from local files; this can be used to send large pipeline input
parameters, such as code, data, or configuration values.
Specified as a comma-separated list:
--inputs-from-file script=myshellscript.sh,pyfile=mypython.py''')
parser.add_argument(
'--outputs',
category=base.COMMONLY_USED_FLAGS,
metavar='NAME=VALUE',
type=arg_parsers.ArgDict(),
action=arg_parsers.UpdateAction,
help='''Map of output PipelineParameter names to values.
Used to specify output files in Google Cloud Storage that will be
made from a localCopy. Specified as a comma-separated list:
--outputs ref=gs://my-bucket/foo,ref2=gs://my-bucket/bar''')
parser.add_argument(
'--logging',
category=base.COMMONLY_USED_FLAGS,
help='''The location in Google Cloud Storage to which the pipeline logs
will be copied. Can be specified as a fully qualified directory
path, in which case logs will be output with a unique identifier
as the filename in that directory, or as a fully specified path,
which must end in `.log`, in which case that path will be
used. Stdout and stderr logs from the run are also generated and
output as `-stdout.log` and `-stderr.log`.''')
parser.add_argument(
'--env-vars',
category=base.COMMONLY_USED_FLAGS,
metavar='NAME=VALUE',
type=arg_parsers.ArgDict(),
help='''List of key-value pairs to set as environment variables.''')
labels_util.AddCreateLabelsFlags(parser)
parser.add_argument(
'--disk-size',
category=base.COMMONLY_USED_FLAGS,
default=None,
help='''The disk size(s) in GB, specified as a comma-separated list of
pairs of disk name and size. For example:
--disk-size "name:size,name2:size2".
Overrides any values specified in the pipeline-file.''')
parser.add_argument(
'--preemptible',
category=base.COMMONLY_USED_FLAGS,
action='store_true',
help='''Whether to use a preemptible VM for this pipeline. The
"resource" section of the pipeline-file must also set preemptible
to "true" for this flag to take effect.''')
parser.add_argument(
'--run-id',
hidden=True,
help='THIS ARGUMENT NEEDS HELP TEXT.')
parser.add_argument(
'--service-account-email',
default='default',
help='''The service account used to run the pipeline. If unspecified,
defaults to the Compute Engine service account for your project.''')
parser.add_argument(
'--service-account-scopes',
metavar='SCOPE',
type=arg_parsers.ArgList(),
default=[],
help='''List of additional scopes to be made available for this service
account. The following scopes are always requested:
https://www.googleapis.com/auth/cloud-platform''')
parser.add_argument(
'--machine-type',
default='n1-standard-1',
help='''The type of VirtualMachine to use. Defaults to n1-standard-1.''')
parser.add_argument(
'--zones',
metavar='ZONE',
type=arg_parsers.ArgList(),
help='''List of Compute Engine zones the pipeline can run in.
If no zones are specified with the zones flag, then zones in the
pipeline definition file will be used.
If no zones are specified in the pipeline definition, then the
default zone in your local client configuration is used (and must be specified).
For more information on default zones, see
https://cloud.google.com/compute/docs/gcloud-compute/#set_default_zone_and_region_in_your_local_client''')
parser.add_argument(
'--regions',
metavar='REGION',
type=arg_parsers.ArgList(),
help='''List of Compute Engine regions the pipeline can
run in.
If no regions are specified with the regions flag, then regions in the
pipeline definition file will be used.
If no regions are specified in the pipeline definition, then the
default region in your local client configuration is used.
At least one region or region must be specified.
For more information on default regions, see
https://cloud.google.com/compute/docs/gcloud-compute/#set_default_zone_and_region_in_your_local_client''')
parser.add_argument(
'--network',
help='''The network name to attach the VM's network
interface to.
The value will be prefixed with global/networks/ unless it contains a /, in
which case it is assumed to be a fully specified network resource URL.
If unspecified, the global default network is used.''')
parser.add_argument(
'--subnetwork',
help='''The subnetwork to use on the provided network.
If the specified network is configured for custom subnet creation, the name of
the subnetwork to attach the instance to must be specified here.
The value is prefixed with regions/*/subnetworks/ unless it contains a /, in
which case it is assumed to be a fully specified subnetwork resource URL.
If the * character appears in the value, it is replaced with the region that
the virtual machine has been allocated in.''')
parser.add_argument(
'--boot-disk-size',
type=int,
help='''The size of the boot disk in GB.
The boot disk size must be large enough to accommodate all Docker images from
each action in the pipeline at the same time. If not specified, a small but
reasonable default value is used.''')
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: argparse.Namespace, All the arguments that were provided to this
command invocation.
Raises:
files.Error: A file argument could not be read.
LifeSciencesError: User input was invalid.
HttpException: An http error response was received while executing api
request.
Returns:
Operation representing the running pipeline.
"""
pipeline = None
apitools_client = lifesciences_util.GetLifeSciencesClient('v2beta')
lifesciences_messages = lifesciences_util.GetLifeSciencesMessages('v2beta')
if args.pipeline_file:
pipeline = lifesciences_util.GetFileAsMessage(
args.pipeline_file,
lifesciences_messages.Pipeline,
self.context[lib.STORAGE_V1_CLIENT_KEY])
elif args.command_line:
pipeline = lifesciences_messages.Pipeline(
actions=[lifesciences_messages.Action(
imageUri=args.docker_image,
commands=['-c', args.command_line],
entrypoint='bash')])
arg_inputs, is_local_file = _ValidateAndMergeArgInputs(args)
request = None
# Create messages up front to avoid checking for None everywhere.
if not pipeline.resources:
pipeline.resources = lifesciences_messages.Resources()
resources = pipeline.resources
if not resources.virtualMachine:
resources.virtualMachine = lifesciences_messages.VirtualMachine(
machineType=args.machine_type)
virtual_machine = resources.virtualMachine
if not virtual_machine.serviceAccount:
virtual_machine.serviceAccount = lifesciences_messages.ServiceAccount()
if args.preemptible:
virtual_machine.preemptible = args.preemptible
if args.zones:
resources.zones = args.zones
elif not resources.zones and properties.VALUES.compute.zone.Get():
resources.zones = [properties.VALUES.compute.zone.Get()]
if args.regions:
resources.regions = args.regions
elif not resources.regions and properties.VALUES.compute.region.Get():
resources.regions = [properties.VALUES.compute.region.Get()]
if args.service_account_email != 'default':
virtual_machine.serviceAccount.email = args.service_account_email
if args.service_account_scopes:
virtual_machine.serviceAccount.scopes = args.service_account_scopes
# Always add the cloud-platform scope for user convenience.
virtual_machine.serviceAccount.scopes.append(
'https://www.googleapis.com/auth/cloud-platform')
# Attach custom network/subnetwork (if set).
if args.network or args.subnetwork:
if not virtual_machine.network:
virtual_machine.network = lifesciences_messages.Network()
if args.network:
virtual_machine.network.network = args.network
if args.subnetwork:
virtual_machine.network.subnetwork = args.subnetwork
if args.boot_disk_size is not None:
if args.boot_disk_size <= 0:
raise exceptions.LifeSciencesError(
'Boot disk size must be greater than zero.')
virtual_machine.bootDiskSizeGb = args.boot_disk_size
# Generate paths for inputs and outputs in a shared location and put them
# into the environment for actions based on their name.
env = {}
if arg_inputs:
input_generator = _SharedPathGenerator('input')
for name, value in arg_inputs.items():
if lifesciences_util.IsGcsPath(value):
env[name] = input_generator.Generate()
pipeline.actions.insert(0, lifesciences_messages.Action(
imageUri=CLOUD_SDK_IMAGE,
commands=['/bin/sh', '-c', 'gsutil -m -q cp %s ${%s}' %
(value, name)]))
elif name in is_local_file:
env[name] = input_generator.Generate()
pipeline.actions.insert(0, lifesciences_messages.Action(
imageUri=CLOUD_SDK_IMAGE,
commands=['/bin/sh', '-c', 'echo "%s" | base64 -d > ${%s}' %
(base64.b64encode(value), name)]))
else:
env[name] = value
if args.outputs:
output_generator = _SharedPathGenerator('output')
for name, value in args.outputs.items():
env[name] = output_generator.Generate()
pipeline.actions.append(lifesciences_messages.Action(
imageUri=CLOUD_SDK_IMAGE,
commands=['/bin/sh', '-c', 'gsutil -m -q cp ${%s} %s' % (name,
value)]))
if args.env_vars:
for name, value in args.env_vars.items():
env[name] = value
# Merge any existing pipeline arguments into the generated environment and
# update the pipeline.
if pipeline.environment:
for val in pipeline.environment.additionalProperties:
if val.key not in env:
env[val.key] = val.value
pipeline.environment = lifesciences_messages.Pipeline.EnvironmentValue(
additionalProperties=lifesciences_util.ArgDictToAdditionalPropertiesList(
env,
lifesciences_messages.Pipeline.EnvironmentValue.AdditionalProperty))
if arg_inputs or args.outputs:
virtual_machine.disks.append(lifesciences_messages.Disk(
name=SHARED_DISK))
for action in pipeline.actions:
action.mounts.append(lifesciences_messages.Mount(
disk=SHARED_DISK,
path='/' + SHARED_DISK))
if args.logging:
pipeline.actions.append(lifesciences_messages.Action(
imageUri=CLOUD_SDK_IMAGE,
commands=['/bin/sh', '-c',
'gsutil -m -q cp /google/logs/output ' + args.logging],
alwaysRun=True))
# Update disk sizes if specified, potentially including the shared disk.
if args.disk_size:
disk_sizes = {}
for disk_encoding in args.disk_size.split(','):
parts = disk_encoding.split(':', 1)
try:
disk_sizes[parts[0]] = int(parts[1])
except:
raise exceptions.LifeSciencesError('Invalid --disk-size.')
for disk in virtual_machine.disks:
if disk.name in disk_sizes:
disk.sizeGb = disk_sizes[disk.name]
request = lifesciences_messages.RunPipelineRequest(
pipeline=pipeline,
labels=labels_util.ParseCreateArgs(
args, lifesciences_messages.RunPipelineRequest.LabelsValue))
projectId = lifesciences_util.GetProjectId()
location_ref = args.CONCEPTS.location.Parse()
request_wrapper = lifesciences_messages.LifesciencesProjectsLocationsPipelinesRunRequest(
parent=location_ref.RelativeName(),
runPipelineRequest=request)
result = apitools_client.projects_locations_pipelines.Run(request_wrapper)
log.status.Print('Running [{0}].'.format(result.name))
return result
| [
"[email protected]"
] | |
740243c8c2e06db15307652ccdf16cb6d4e8ecf1 | 79e0f8d64cb928ccc6a026b32dcbb3c8fcefa2de | /script/compile-coffee.py | 179931a5db0a31bf159caccbe6107c6d847aaeef | [
"MIT"
] | permissive | mapbox/atom-shell | 939bca5dec3f6cf7460a3b34d0517e78ed0da928 | 2125a0be826170c3a84af1e75572b06402f3add9 | refs/heads/master | 2023-06-05T10:46:29.802386 | 2014-08-04T16:05:26 | 2014-08-04T16:05:26 | 22,614,165 | 1 | 6 | MIT | 2023-04-09T15:03:19 | 2014-08-04T17:58:00 | null | UTF-8 | Python | false | false | 953 | py | #!/usr/bin/env python
import os
import subprocess
import sys
SOURCE_ROOT = os.path.dirname(os.path.dirname(__file__))
WINDOWS_NODE_PATHs = [
'C:/Program Files/nodejs/node.exe',
'C:/Program Files (x86)/nodejs/node.exe',
]
def main():
input_file = sys.argv[1]
output_dir = os.path.dirname(sys.argv[2])
coffee = os.path.join(SOURCE_ROOT, 'node_modules', 'coffee-script', 'bin',
'coffee')
if sys.platform in ['win32', 'cygwin']:
node = find_node()
if not node:
print 'Node.js is required for building atom-shell'
return 1
subprocess.check_call(['node', coffee, '-c', '-o', output_dir, input_file],
executable=node)
else:
subprocess.check_call(['node', coffee, '-c', '-o', output_dir, input_file])
def find_node():
for path in WINDOWS_NODE_PATHs:
if os.path.exists(path):
return path
return None
if __name__ == '__main__':
sys.exit(main())
| [
"[email protected]"
] | |
f33c5c5eacd1cacf4b21708ba3c979e5958862da | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/cv/detection/SSD_for_PyTorch/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py | 1a90b9ca274de0d0f5104b2b0f1741b2778accc7 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 1,019 | py | # Copyright 2022 Huawei Technologies Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| [
"[email protected]"
] | |
63de0a760734ad6f525a1164cff32a69663a6382 | 7d43ba52d958537905cfdde46cc194a97c45dc56 | /WEB/Networks/Organizations/Registrars/RIR.py | 332105180c0a42d4b6aa3d37dd9dc1d0d59402e4 | [] | no_license | Koshmatova/workbook | 3e4d1f698a01f2be65c1abc83ee251ebc8a6bbcd | 902695e8e660689a1730c23790dbdc51737085c9 | refs/heads/master | 2023-05-01T02:30:46.868027 | 2021-05-10T03:49:08 | 2021-05-10T03:49:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | RIR
#regional internet Registry
РЕГИСТРАТОРЫ
#получают блоки и номера автономных сисм у IANA
#выдают блоки LIR
ARIN
#обслуживает
Северную Америку
Багамы
Пуэро-Рико
Ямайку
APNIC
#обслуживает
Южную Азию
Восточную Азию
Юго-Восточную Азию
Австралию
Океанию
AfriNIC
#обслуживает
Африку
Страны Индийского Океана
LACNIC
#обслуживает
Южную Америку
Страны бассейна Карибского моря
RIPE NCC
#обслуживает
Европу
Центральную Азию
Ближний восток
| [
"[email protected]"
] | |
3d5ca3e81b309d799bafa57120c402ad3bbbaa20 | ad16b0c0178e4543d0c44ad3d90f90c6beeb4f5a | /di2.py | cb2a72a47cbef1e402302ce534f3005b82c52464 | [] | no_license | timmonspatrick/HemoDub | 09cb61e8e33ee8b64c9e6011d4ae8679d07950d9 | 4e6cceb44456c498cc1d6d55f8369099d0d5d947 | refs/heads/master | 2021-04-27T09:34:40.935684 | 2018-05-31T08:29:04 | 2018-05-31T08:29:04 | 122,491,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,278 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 14 15:02:19 2017
@author: Patrick
"""
from __future__ import print_function
import numpy as np
from residue_distribution import alphabets
conjoint_letters = ["A", "I", "Y", "H", "R", "D", "C"]
aa_letters = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
di_letters = ["%s%s" % (a, b) for a in aa_letters for b in aa_letters]
di_conjoint_letters = ["%s%s" % (a, b) for a in conjoint_letters for b in conjoint_letters]
di3_letters = ["%s%s%s" % (a, b, c) for a in conjoint_letters for b in conjoint_letters for c in conjoint_letters]
def counter(string_list):
'''
A function for counting the number of letters present.
Returns a list of (letter, #occurances) tuples.
string_list eg. ["HW", "WA", "AL", "LS", ...]
'''
l = max(1, len(string_list))
d = {i : 0 for i in di_letters}
for s in string_list:
try:
d[s] += 1.0
except KeyError:
d[s] = 1.0
d = {k : d[k]/(l) for k in d}
return d
def counter3(string):
'''
A function for counting the number of letters present.
Returns a list of (letter, #occurances) tuples.
'''
l = max(1, len(string))
d = {i : 0 for i in di3_letters}
for s in string:
try:
d[s] += 1.0
except KeyError:
d[s] = 1.0
d = {k : d[k]/(l) for k in d}
return d
def residue_distribution2(all_residues, alphabet):
'''
Takes as arguments a string with letters, and the type of sequence represented.
Returns an alphabetically ordered string of relative frequencies, correct to three decimal places.
'''
d = counter(all_residues)
di2_counts = list(sorted([(i, d[i]) for i in alphabet ])) ##Removes ambiguous letters
r_c = [i[1] for i in di2_counts]
dis = np.array([r_c,])
return dis
def residue_distribution3(all_residues):
'''
Takes as arguments a string with letters, and the type of sequence represented.
Returns an alphabetically ordered string of relative frequencies, correct to three decimal places.
'''
d = counter3(all_residues)
di3_counts = list(sorted([(i, d[i]) for i in di3_letters ])) ##Removes ambiguous letters
r_c = [i[1] for i in di3_counts]
dis = np.array([r_c,])
return dis
def di2(seq, alphabet="aa"):
'''
A function to return all the di2s for a sequence.
Eg. ABCDEF --> AD, BE, CF
'''
l = []
for a in range(len(seq)):
try:
x = "%s%s" % (seq[a], seq[a + 3 ])
l.append(x)
except IndexError:
pass
return residue_distribution2(l, alphabets[alphabet][2])
def di3(seq):
'''
A function to return all the di3s for a sequence.
Eg. ABCDEFGHI --> ADG, BEH, CFI
'''
l = []
for a in range(len(seq)):
try:
x = "%s%s%s" % (seq[a], seq[a + 3 ], seq[a + 6])
l.append(x)
except IndexError:
pass
return residue_distribution3(l)
| [
"[email protected]"
] | |
9e47a121f146030bb57a04733eda6fd89bd415c2 | 512f48fdcfa78e322526cf47163110009b84bf73 | /test/test_page_of_policy_asset.py | b94a86aae703c7f17bf5ff820a90730167232177 | [
"MIT"
] | permissive | confluentinc/vm-console-client-python | 9a0f540c0113acf68ee9dc914715bc255e4d99f4 | ccbd944a0e0333c73e098b769fe4c82755d29874 | refs/heads/master | 2023-07-18T10:33:58.909287 | 2021-09-02T20:52:20 | 2021-09-02T20:52:20 | 402,559,283 | 0 | 0 | MIT | 2021-09-02T20:49:56 | 2021-09-02T20:49:56 | null | UTF-8 | Python | false | false | 879 | py | # coding: utf-8
"""
Python InsightVM API Client
OpenAPI spec version: 3
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.page_of_policy_asset import PageOfPolicyAsset # noqa: E501
from swagger_client.rest import ApiException
class TestPageOfPolicyAsset(unittest.TestCase):
"""PageOfPolicyAsset unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPageOfPolicyAsset(self):
"""Test PageOfPolicyAsset"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.page_of_policy_asset.PageOfPolicyAsset() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
93f00f64a3499f2739b654603d11c63bd28647f0 | c36bd73ddbf668b25908df4ed2d4729d3ea792a7 | /venv/lib/python3.5/site-packages/facebook_business/adobjects/favoriterequest.py | 29354ce89e9dd9e997aa5312a28ef6b0f0f1c467 | [] | no_license | Suraj-KD/AbsentiaVR_Task | 1e236f88063b97666c8e188af7fddc0fd7ea3792 | 1de364e0464ac79cefc26077318021570993d713 | refs/heads/master | 2022-12-10T20:40:31.396380 | 2019-01-31T19:19:36 | 2019-01-31T19:19:36 | 168,398,946 | 0 | 0 | null | 2022-12-08T01:34:30 | 2019-01-30T19:08:22 | Python | UTF-8 | Python | false | false | 5,371 | py | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject
from facebook_business.adobjects.objectparser import ObjectParser
from facebook_business.api import FacebookRequest
from facebook_business.typechecker import TypeChecker
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class FavoriteRequest(
AbstractCrudObject,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isFavoriteRequest = True
super(FavoriteRequest, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
api_version = 'api_version'
description = 'description'
graph_path = 'graph_path'
hash = 'hash'
http_method = 'http_method'
id = 'id'
post_params = 'post_params'
query_params = 'query_params'
class HttpMethod:
get = 'GET'
post = 'POST'
delete = 'DELETE'
class ApiVersion:
unversioned = 'unversioned'
v1_0 = 'v1.0'
v2_0 = 'v2.0'
v2_1 = 'v2.1'
v2_2 = 'v2.2'
v2_3 = 'v2.3'
v2_4 = 'v2.4'
v2_5 = 'v2.5'
v2_6 = 'v2.6'
v2_7 = 'v2.7'
v2_8 = 'v2.8'
v2_9 = 'v2.9'
v2_10 = 'v2.10'
v2_11 = 'v2.11'
v2_12 = 'v2.12'
v3_0 = 'v3.0'
v3_1 = 'v3.1'
v3_2 = 'v3.2'
v4_0 = 'v4.0'
def api_delete(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='DELETE',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AbstractCrudObject,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def api_get(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=FavoriteRequest,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
_field_types = {
'api_version': 'string',
'description': 'string',
'graph_path': 'string',
'hash': 'string',
'http_method': 'HttpMethod',
'id': 'string',
'post_params': 'list<Object>',
'query_params': 'list<Object>',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
field_enum_info['HttpMethod'] = FavoriteRequest.HttpMethod.__dict__.values()
field_enum_info['ApiVersion'] = FavoriteRequest.ApiVersion.__dict__.values()
return field_enum_info
| [
"[email protected]"
] | |
a5a67527ca645c09a5eda7d574e9c5751ba8d7c7 | bcc04939aa70675c9be19c0bf4a9642877db46b1 | /qa/admin.py | fb3490662c6887235e08bad516912b69be9dcd6b | [
"MIT"
] | permissive | zkeshtkar/gapbug | 164398e2ddd8f952d5851eab19e34f9f84a080e1 | eec5baf9b4346aef26bcb10e48ddcb358140d708 | refs/heads/main | 2023-06-20T07:39:50.084126 | 2021-07-16T13:31:10 | 2021-07-16T13:31:10 | 387,550,452 | 0 | 0 | MIT | 2021-07-19T17:53:20 | 2021-07-19T17:53:19 | null | UTF-8 | Python | false | false | 187 | py | from django.contrib import admin
from .models import Question
class QuestionAdmin(admin.ModelAdmin):
list_display = ("title", "user")
admin.site.register(Question, QuestionAdmin)
| [
"[email protected]"
] | |
1b961f70e65bde6e2cf6a2d5479f8a1db3b842ef | 7ecc7092c70d28cfbc4229aca95267673f3b2b57 | /quru/server/mq_client.py | cab3ad58929c75dac7fb68c377ee1f2f1f96f8ad | [
"MIT"
] | permissive | ShawnHan1993/quru | 403bd6499a18901a02378eae82da73f828090107 | 6b103a54d8228e4e2d44b06cc068c60a44b02d67 | refs/heads/main | 2023-07-17T19:38:43.855085 | 2021-09-03T01:40:25 | 2021-09-03T14:07:11 | 400,223,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,594 | py | import asyncio
import time
import typing
import aio_pika
import pika
import aiormq
from ..quru_logger import logger
from ..env import (BROADCAST_EXCHANGE_NAME, MAIN_EXCHANGE_NAME, MQ_HOST,
MQ_PORT, MQ_RETRY, RABBITMQ_PASSWORD, RABBITMQ_USERNAME,
RPC_EXCHANGE_NAME)
class BaseMqClient:
def __init__(self,
mq_host=MQ_HOST,
mq_port=MQ_PORT,
mq_username=RABBITMQ_USERNAME,
mq_password=RABBITMQ_PASSWORD,
retry=MQ_RETRY):
self._mq_host = mq_host
self._mq_port = mq_port
self._mq_username = mq_username
self._mq_password = mq_password
self._URL = 'amqp://{}:{}@{}:{}'.format(
self._mq_username,
self._mq_password,
self._mq_host,
self._mq_port)
self._param = pika.URLParameters(self._URL)
self._retry = retry
def connect(self) -> pika.BlockingConnection:
if self._retry == 0:
upper_bound = float('inf')
else:
upper_bound = self._retry
counter = 0
while counter < upper_bound:
try:
connection = pika.BlockingConnection(self._param)
break
except Exception:
time.sleep(10)
counter += 1
else:
raise TimeoutError('connect failed.')
if connection is None:
raise ConnectionError
logger.info("Succeded_in_connecting_MQ.")
return connection
class AsyncMqClient(BaseMqClient):
'''Async MQ logic wrapper.
'''
EXCHANGE_PROPERTY = {
MAIN_EXCHANGE_NAME: {
"type": aio_pika.ExchangeType.DIRECT
},
RPC_EXCHANGE_NAME: {
"type": aio_pika.ExchangeType.DIRECT
},
BROADCAST_EXCHANGE_NAME: {
"type": aio_pika.ExchangeType.TOPIC
},
}
def __init__(self,
loop,
mq_host=MQ_HOST,
mq_port=MQ_PORT,
mq_username=RABBITMQ_USERNAME,
mq_password=RABBITMQ_PASSWORD,
retry=MQ_RETRY):
super().__init__(mq_host=MQ_HOST,
mq_port=MQ_PORT,
mq_username=RABBITMQ_USERNAME,
mq_password=RABBITMQ_PASSWORD,
retry=MQ_RETRY)
self._connection = None
self._loop = loop
self._q_pool = {}
async def setup(self):
await self._async_connect()
self._pub_channel = \
await self._connection.channel(publisher_confirms=False)
async def _async_connect(self) -> aio_pika.RobustConnection:
if self._retry == 0:
upper_bound = float('inf')
else:
upper_bound = self._retry
counter = 0
while counter < upper_bound:
try:
self._connection = aio_pika.RobustConnection(
self._URL,
loop=self._loop
)
await self._connection.connect()
break
except Exception:
await asyncio.sleep(10)
counter += 1
else:
raise TimeoutError('connect failed.')
if self._connection is None:
raise ConnectionError
logger.info("Succeded_in_connecting_MQ.")
return self._connection
async def publish(self, **kwargs):
err = None
for i in range(3):
try:
await self._pub_channel.channel.basic_publish(**kwargs)
break
except Exception as e:
err = e
await self.setup()
else:
raise err
async def declare_exchange(self,
name,
type,
arguments=None,
bind_exchange=None,
routing_key=None,
channel=None):
'''A broker function to declare an exchange. This function abstract out
a lot details of communicating with the mq server.
'''
if channel is None:
channel: aio_pika.Channel = await self._connection.channel(
publisher_confirms=False)
exchange = await channel.declare_exchange(
name=name, type=type,
arguments=arguments)
if bind_exchange is not None:
assert routing_key is not None
await channel.declare_exchange(
name=bind_exchange,
**self.EXCHANGE_PROPERTY[bind_exchange])
await exchange.bind(bind_exchange, routing_key=routing_key)
return exchange, channel
async def declare_queue(
self,
name,
bind_exchange,
routing_key,
callback,
prefetch_count,
arguments=None,
auto_delete=False,
exclusive=False,
no_ack=True,
channel=None,
consumer_tag=None
) -> typing.Tuple[aio_pika.Queue, aio_pika.Channel, str]:
'''A broker function to declare a queue. This function abstract out
a lot details of communicating with the mq server.
'''
if name in self._q_pool:
queue, channel, consumer_tag = self._q_pool[name]
else:
if channel is None:
channel: aio_pika.Channel = await self._connection.channel(
publisher_confirms=False)
if arguments is None:
arguments = {}
arguments["x-max-length"] = 30000
queue: aio_pika.Queue = await channel.declare_queue(
name=name,
arguments=arguments,
auto_delete=auto_delete,
exclusive=exclusive)
self._q_pool[queue.name] = consumer_tag
try:
await channel.set_qos(prefetch_count=prefetch_count)
consumer_tag = await queue.consume(
callback, no_ack=no_ack,
consumer_tag=consumer_tag)
except aiormq.exceptions.DuplicateConsumerTag:
pass
self._q_pool[queue.name] = (queue, channel, consumer_tag)
await queue.bind(bind_exchange, routing_key=routing_key)
return queue, channel, consumer_tag
async def close(self):
if self._connection is None:
return
await self._connection.close()
| [
"[email protected]"
] | |
2d7c95b9adc03370debcc3242e5f0acf53ab7a6f | 020fbf1db497520abcb30cd3889cfe61c601723f | /practice/readmodclass.py | 491a456b4862d0be7e5870f5afe2748e2fdea45c | [] | no_license | bluecrt/first-project | bc53a49ae23c3cc6beb0ede4d00d94e4ad568771 | 7f5f376adcaa0d48caf4540db6613f843d40e173 | refs/heads/master | 2023-06-21T19:16:46.644489 | 2021-07-22T13:13:15 | 2021-07-22T13:13:15 | 386,676,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | import classcars
car = classcars.Cars('red', 'big')
print('your car\'s color is:{},shape is:{},now you can drive it。'.format(car.color, car.shape)) | [
"email"
] | email |
a6fb58c89014ee062dd4f0919c71670b7ccf61ec | 444a9480bce2035565332d4d4654244c0b5cd47b | /research/audio/FastSpeech/src/dataset.py | e72e66213e867feab300003b59edf5352fb51bbb | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] | permissive | mindspore-ai/models | 7ede9c6454e77e995e674628204e1c6e76bd7b27 | eab643f51336dbf7d711f02d27e6516e5affee59 | refs/heads/master | 2023-07-20T01:49:34.614616 | 2023-07-17T11:43:18 | 2023-07-17T11:43:18 | 417,393,380 | 301 | 92 | Apache-2.0 | 2023-05-17T11:22:28 | 2021-10-15T06:38:37 | Python | UTF-8 | Python | false | false | 5,247 | py | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Data preprocessing."""
import os
from pathlib import Path
import numpy as np
from mindspore import Tensor
from mindspore import dtype as mstype
from src.cfg.config import config as hp
from src.text import text_to_sequence
from src.utils import pad_1d_tensor
from src.utils import pad_2d_tensor
from src.utils import process_text
def get_data_to_buffer():
"""
Put data to memory, for faster training.
"""
with Path(hp.dataset_path, 'train_indices.txt').open('r') as file:
train_part = np.array([i[:-1] for i in file.readlines()], np.int32)
train_part.sort()
buffer = list()
raw_text = process_text(os.path.join(hp.dataset_path, "metadata.txt"))
for i in train_part:
mel_gt_name = os.path.join(hp.dataset_path, 'mels', "ljspeech-mel-%05d.npy" % (i+1))
mel_gt_target = np.load(mel_gt_name)
duration = np.load(os.path.join(hp.dataset_path, 'alignments', str(i)+".npy"))
character = raw_text[i][: len(raw_text[i])-1]
character = np.array(text_to_sequence(character, hp.text_cleaners))
buffer.append(
{
"text": character,
"duration": duration,
"mel_target": mel_gt_target
}
)
return buffer
def reprocess_tensor(data_dict):
"""
Prepare data for training.
Apply padding for all samples, in reason of static graph.
Args:
data_dict (dict): Dictionary of np.array type data.
Returns:
out (dict): Dictionary with prepared data for training, np.array type.
"""
text = data_dict["text"]
mel_target = data_dict["mel_target"]
duration = data_dict["duration"]
max_len = hp.character_max_length
length_text = text.shape[0]
src_pos = np.pad([i+1 for i in range(int(length_text))], (0, max_len-int(length_text)), 'constant')
max_mel_len = hp.mel_max_length
length_mel = mel_target.shape[0]
mel_pos = np.pad([i+1 for i in range(int(length_mel))], (0, max_mel_len-int(length_mel)), 'constant')
text = pad_1d_tensor(text)
duration = pad_1d_tensor(duration)
mel_target = pad_2d_tensor(mel_target)
out = {
"text": text, # shape (hp.character_max_length)
"src_pos": src_pos, # shape (hp.character_max_length)
"mel_pos": mel_pos, # shape (hp.mel_max_length)
"duration": duration, # shape (hp.character_max_length)
"mel_target": mel_target, # shape (hp.mel_max_length, hp.num_mels)
"mel_max_len": max_mel_len,
}
return out
def preprocess_data(buffer):
"""
Prepare data for training.
Args:
buffer (list): Raw data inputs.
Returns:
preprocessed_data (list): Padded and converted data, ready for training.
"""
preprocessed_data = []
for squeeze_data in buffer:
db = reprocess_tensor(squeeze_data)
preprocessed_data.append(
(
db["text"].astype(np.float32),
db["src_pos"].astype(np.float32),
db["mel_pos"].astype(np.float32),
db["duration"].astype(np.int32),
db["mel_target"].astype(np.float32),
db["mel_max_len"],
)
)
return preprocessed_data
class BufferDataset:
"""
Dataloader.
"""
def __init__(self, buffer):
self.length_dataset = len(buffer)
self.preprocessed_data = preprocess_data(buffer)
def __len__(self):
return self.length_dataset
def __getitem__(self, idx):
return self.preprocessed_data[idx]
def get_val_data(data_url):
"""Get validation data."""
data_list = list()
with Path(data_url, 'validation.txt').open('r') as file:
data_paths = file.readlines()
root_wav_path = os.path.join(data_url, 'wavs')
wav_paths = [root_wav_path + '/' + raw_path.split('|')[0] + '.wav' for raw_path in data_paths]
val_txts = [raw_path.split('|')[1][:-1] for raw_path in data_paths]
for orig_text, wav_path in zip(val_txts, wav_paths):
sequence = text_to_sequence(orig_text, hp.text_cleaners)
sequence = np.expand_dims(sequence, 0)
src_pos = np.array([i + 1 for i in range(sequence.shape[1])])
src_pos = np.expand_dims(src_pos, 0)
sequence = Tensor([np.pad(sequence[0], (0, hp.character_max_length - sequence.shape[1]))], mstype.float32)
src_pos = Tensor([np.pad(src_pos[0], (0, hp.character_max_length - src_pos.shape[1]))], mstype.float32)
data_list.append([sequence, src_pos, wav_path])
return data_list
| [
"[email protected]"
] | |
01936368e6eccfa81284c68fe14ec28c64e46899 | 456433ac78b70cb8ae076ae166a85e349f181d7f | /systems/KURSSKLAD/KURSTERM/INCOMEDC/income.py | da1cc79d3b9404d80e97bace43769dfaef5fd48e | [] | no_license | shybkoi/WMS-Demo | 854c1679b121c68323445b60f3992959f922be8d | 2525559c4f56654acfbc21b41b3f5e40387b89e0 | refs/heads/master | 2021-01-23T01:51:20.074825 | 2017-03-23T11:51:18 | 2017-03-23T11:51:18 | 85,937,726 | 0 | 0 | null | null | null | null | WINDOWS-1251 | Python | false | false | 18,623 | py | # -*- coding: cp1251 -*-
from systems.KURSSKLAD.KURSTERM.common import TCommonTerm
#Import Templates
from systems.KURSSKLAD.KURSTERM.INCOMEDC.templates.index import index
from systems.KURSSKLAD.KURSTERM.INCOMEDC.templates.task import task
from systems.KURSSKLAD.KURSTERM.INCOMEDC.templates.taskBL import taskBL
from systems.KURSSKLAD.KURSTERM.INCOMEDC.templates.taskSite import taskSite
from systems.KURSSKLAD.KURSTERM.INCOMEDC.templates.taskWares import taskWares
from systems.KURSSKLAD.KURSTERM.INCOMEDC.templates.taskWaresAdd import taskWaresAdd
from systems.KURSSKLAD.KURSTERM.INCOMEDC.templates.taskWaresLot import taskWaresLot
from systems.KURSSKLAD.KURSTERM.INCOMEDC.templates.printer import printer
from systems.KURSSKLAD.KURSTERM.INCOMEDC.templates.pallet import pallet
from systems.KURSSKLAD.KURSTERM.INCOMEDC.templates.rangeWares import rangeWares
from kinterbasdb import DatabaseError as FBExc
from cherrypy import HTTPRedirect
from systems.KURSSKLAD.cheetahutils import TimeStampToDate
class TIncome(TCommonTerm):
helpSystem = False
tmplIndex = index
tmplTask = task
tmplRangeWares = rangeWares
tmplTaskBL = taskBL
tmplTaskSite = taskSite
tmplTaskWares = taskWares
tmplTaskWaresAdd = taskWaresAdd
tmplTaskWaresLot = taskWaresLot
tmplPrinter = printer
tmplPallet = pallet
def qTaskJoin(self, tid):
try:
self.dbExec(sql="execute procedure K_SESSION_JOIN_TASK(?,?)", params=[tid, self.getIfaceVar('wmsid')],
fetch='none')
except FBExc, exc:
raise HTTPRedirect('main?mes=%s' % (self.fbExcText(exc[1])))
def qTaskWaresAddExtDatalist(self, tid, wid):
pass
def index(self, id_system=None):
TCommonTerm.index(self, id_system)
self.setIfaceVar('wmsid', self.GetKSessionID())
return self.main()
index.exposed = True
def chgZone(self, id):
try:
self.dbExec(sql="execute procedure WH_SESSION_SETZONE(?,?)", params=[self.getIfaceVar('wmsid'), id],
fetch='none')
except FBExc, exc:
raise HTTPRedirect('main?mes=%s' % (self.fbExcText(exc[1])))
else:
raise HTTPRedirect('main')
chgZone.exposed = True
def main(self, barcode=None, mes=None):
if barcode:
mes = _('Invalid barcode')
bcInfo = self.kBarCodeInfo(barcode)
if bcInfo and bcInfo['result'] == 0:
if bcInfo['usercode'] == 'DOCUMENT':
t = self.dbExec(sql="select * from K_WH_INCOME_LISTDOCS(?) where docid=?",
params=[self.getIfaceVar('wmsid'), self.kId(bcInfo['recordid'])], fetch='one')
if t and t['taskid']: raise HTTPRedirect('task?tid=%s' % (t['taskid']))
elif bcInfo['usercode'] == 'PRINTER':
raise HTTPRedirect('printer?id=%s' % (self.kId(bcInfo['recordid'])))
docs = self.dbExec(sql="select * from K_WH_INCOME_LISTDOCS(?)", params=[self.getIfaceVar('wmsid')], fetch='all')
zonedocs = self.dbExec(sql="select * from K_WH_INCOME_LISTOBJDOCS(?)", params=[self.getIfaceVar('wmsid')],
fetch='all')
zonedocs['zd'] = zonedocs['datalist']
del zonedocs['datalist']
return self.drawTemplate(templ=self.tmplIndex, data=[docs, zonedocs, {'mes': mes, 'reloadurl': 'main'}])
main.exposed = True
def task(self, tid, showList=None, mes=None):
self.qTaskJoin(tid)
t = self.taskInfo(tid)
if not t['SITEID']: raise HTTPRedirect('taskSite?tid=%s' % (tid))
if showList is None:
showList = self.getIfaceVar('taskShowList')
if showList is None: showList = '0'
self.setIfaceVar('taskShowList', showList)
if showList != '0':
tw = self.dbExec(sql="select * from K_WH_INCOME_LISTWARES(?)", params=[tid], fetch='all')
else:
tw = None
return self.drawTemplate(templ=self.tmplTask, data=[t, tw, {'mes': mes, 'showList': showList, 'backurl': 'main',
'treeName': '№%s' % (tid)}])
task.exposed = True
def taskBL(self, **args):
tid = args['tid']
pw = ''
pq = ''
mes = None
for i in args:
if i[0] == 'w':
wid = i[1:]
if args[i]:
pw = pw + wid + ';'
pq = pq + args[i] + ';'
if pw != '' and pq != '':
try:
self.dbExec(sql="execute procedure K_WH_INCOME_BL_SET(?,?,?)", fetch="none", params=[tid, pw, pq])
except FBExc, exc:
mes = exc[1]
else:
raise HTTPRedirect('task?tid=%s' % (tid))
t = self.taskInfo(tid)
tw = self.dbExec(sql="select * from K_WH_INCOME_BL_LISTWARES(?)", params=[tid], fetch='all')
return self.drawTemplate(templ=self.tmplTaskBL, data=[t, tw, {'mes': mes, 'backurl': 'task?tid=%s' % (tid),
'treeName': '№%s' % (tid)}])
taskBL.exposed = True
def taskSite(self, tid, barcode=None):
if barcode:
mes = _('Invalid barcode')
bcInfo = self.kBarCodeInfo(barcode)
if bcInfo and bcInfo['result'] == 0 and bcInfo['usercode'] == 'SITE':
try:
self.dbExec(sql="execute procedure K_WH_INCOMEDC_SET_SITE(?,?)",
params=[tid, self.kId(bcInfo['recordid'])], fetch='none')
except FBExc, exc:
mes = self.fbExcText(exc[1])
else:
raise HTTPRedirect('task?tid=%s' % (tid))
else:
mes = None
return self.drawTemplate(templ=self.tmplTaskSite,
data=[self.taskInfo(tid), {'mes': mes, 'backurl': 'main', 'treeName': '№%s' % (tid)}])
taskSite.exposed = True
def rangeWares(self, tid, barcode):
t = self.taskInfo(tid)
w = self.dbExec(sql='select * from WH_INCOME_LISTWARES_BY_BARCODE(?,?)', params=[tid, barcode], fetch='all')
return self.drawTemplate(templ=self.tmplRangeWares, data=[t, w,
{'barcode': barcode, 'backurl': 'task?tid=%s' % (tid),
'treeName': '№%s' % (tid)}])
rangeWares.exposed = True
def taskWares(self, tid, wid=None, wuid=None, mes=None):
if wid is None and wuid:
wu = self.waresUnitInfo(waresunitid=wuid)
wid = wu['waresid']
wid = self.kId(wid)
params = {'mes': mes, 'backurl': 'task?tid=%s' % (tid), 'treeName': '№%s' % (tid)}
tl = self.dbExec(sql="select * from K_WH_INCOME_LISTWARESLOT(?,?)", params=[tid, wid], fetch='all')
if not mes and len(tl['datalist']) == 0:
if wuid:
raise HTTPRedirect('taskWaresAdd?tid=%s&wid=%s&wuid=%s' % (tid, wid, wuid))
else:
raise HTTPRedirect('taskWaresAdd?tid=%s&wid=%s' % (tid, wid))
t = self.taskInfo(tid)
wz = self.objWaresIncomeZone(objid=t['TOID'], waresid=wid)
w = self.waresInfo(wid)
return self.drawTemplate(templ=self.tmplTaskWares, data=[t, w, tl, wz, params])
taskWares.exposed = True
def taskWaresScan(self, tid, barcode, wid=None):
mes = _('ШК не обрабатывается')
bcInfo = self.kBarCodeInfo(barcode)
if bcInfo and bcInfo['result'] == 0:
mes = _('Invalid barcode')
if bcInfo['usercode'] == 'WARESUNIT':
tw = self.dbExec(sql="select * from WH_INCOME_LISTWARES_BY_BARCODE(?,?)", params=[tid, barcode],
fetch='all')
if len(tw['datalist']) == 1:
tw0 = tw['datalist'][0]
raise HTTPRedirect('taskWares?tid=%s&wid=%s&wuid=%s' % (tid, tw0['WID'], tw0['WUID']))
elif len(tw['datalist']) > 1:
raise HTTPRedirect('rangeWares?tid=%s&barcode=%s' % (tid, barcode))
else:
mes = _('Товары с этим ШК не найдены в задании на приемку!')
elif bcInfo['usercode'] == 'WARES':
tw = self.dbExec(sql="select * from K_WH_INCOME_LISTWARES(?) where wid=?",
params=[tid, self.kId(bcInfo['recordid'])], fetch='one')
if tw and tw['WID']:
raise HTTPRedirect('taskWares?tid=%s&wid=%s' % (tid, bcInfo['recordid']))
else:
mes = _('Товар не найден в задании!')
raise HTTPRedirect('taskWares?tid=%s&wid=%s' % (tid, bcInfo['recordid']))
elif bcInfo['usercode'] == 'WARESWEIGHT':
ww = self.dbExec(sql=bcInfo['SELECTSQL'], params=[], fetch='one')
if ww: raise HTTPRedirect('taskWaresAdd?tid=%s&wid=%s&amount=%s' % (tid, ww['WID'], ww['WWEIGHT']))
elif bcInfo['usercode'] == 'PALLET':
url = 'pallet?id=%s&tid=%s' % (bcInfo['recordid'], tid)
if wid: url += '&wid=%s' % (wid)
raise HTTPRedirect(url)
elif bcInfo['usercode'] == 'PRINTER' and self.urlTaskPrinter:
raise HTTPRedirect(self.urlTaskPrinter + '?tid=%s&prid=%s' % (tid, bcInfo['recordid']))
if wid:
raise HTTPRedirect('taskWares?tid=%s&wid=%s&mes=%s' % (tid, wid, mes))
else:
raise HTTPRedirect('task?tid=%s&mes=%s' % (tid, mes))
taskWaresScan.exposed = True
def taskWaresAdd(self, tid, wid, wuid=None, prdate=None, amount=None, barcode=None):
wid = self.kId(wid)
params = {'backurl': 'task?tid=%s' % (tid), 'treeName': '№%s' % (tid)}
if barcode:
bcInfo = self.kBarCodeInfo(barcode)
if bcInfo and bcInfo['result'] == 0 and bcInfo['usercode'] == 'WARESWEIGHT':
if self.kId(wid) == self.kId(bcInfo['recordid']):
ww = self.dbExec(sql=bcInfo['SELECTSQL'], params=[], fetch='one')
if amount:
amount = float(amount) + float(ww['WWEIGHT'])
else:
amount = ww['WWEIGHT']
else:
params['mes'] = _('Отсканирован весовой стикер другого товара!')
elif amount and prdate and prdate != self.dateMask:
try:
self.dbExec(sql="execute procedure K_WH_INCOMEDC_DO(?,?,?,?,?,?)", fetch='none',
params=[tid, wuid, prdate, amount, barcode, self.getIfaceVar('wmsid')])
except FBExc, exc:
params['mes'] = self.fbExcText(exc[1])
else:
raise HTTPRedirect('taskWares?tid=%s&wid=%s' % (tid, wid))
t = self.taskInfo(tid)
wz = self.objWaresIncomeZone(objid=t['TOID'], waresid=wid)
if not wz or not wz['ZID']:
raise HTTPRedirect('taskWares?tid=%s&wid=%s&mes=%s' % (tid, wid, _('Не установлена зона хранения товара!')))
#проставим время
self.dbExec(
sql='update wm_task_wares tw set tw.begintime = current_timestamp where tw.taskid = ? and tw.waresid = ? and tw.begintime is NULL',
params=[tid, wid], fetch='none')
if prdate:
params['prdate'] = prdate
else:
ld = self.dbExec(sql='select * from K_WH_INCOME_WARESLASTDATA(?,?)', params=[tid, wid], fetch='one')
if ld and ld['PRODUCTDATE']:
params['prdate'] = TimeStampToDate(ld['PRODUCTDATE'])
else:
params['prdate'] = self.dateMask
w = self.waresInfo(wid)
wt = self.waresType(wid)
if amount:
params['amount'] = amount
else:
params['amount'] = ''
if wuid:
wu = self.waresUnitInfo(waresunitid=wuid)
else:
wu = None
params['wuid'] = wuid
extDL = self.qTaskWaresAddExtDatalist(tid, wid)
if extDL:
params['extDL'] = extDL['datalist']
return self.drawTemplate(templ=self.tmplTaskWaresAdd, data=[t, w, wt, wu, params])
taskWaresAdd.exposed = True
def taskWaresLot(self, tid, wid, wlotid, palletid, wuid=None, amount=None, barcode=None):
wid = self.kId(wid)
params = {'backurl': 'taskWares?tid=%s&wid=%s' % (tid, wid), 'treeName': '№%s' % (tid)}
if barcode:
bcInfo = self.kBarCodeInfo(barcode)
if bcInfo and bcInfo['result'] == 0 and bcInfo['usercode'] == 'WARESWEIGHT':
if self.kId(wid) == self.kId(bcInfo['recordid']):
ww = self.dbExec(sql=bcInfo['SELECTSQL'], params=[], fetch='one')
if amount:
amount = float(amount) + float(ww['WWEIGHT'])
else:
amount = ww['WWEIGHT']
else:
params['mes'] = _('Отсканирован весовой стикер другого товара!')
else:
try:
self.dbExec(sql="execute procedure K_WH_INCOMEDC_DO_UPD(?,?,?,?,?,?,?)", fetch='none',
params=[tid, wlotid, palletid, wuid, amount, barcode, self.getIfaceVar('wmsid')])
except FBExc, exc:
params['mes'] = self.fbExcText(exc[1])
else:
raise HTTPRedirect('taskWares?tid=%s&wid=%s' % (tid, wid))
t = self.taskInfo(tid)
w = self.waresInfo(wid)
wu = self.waresUnitInfo(wuid)
p = self.palletInfo(palletid)
wl = self.dbExec(sql="select wlotid,productdate from wareslot wl where wlotid=?", fetch='one', params=[wlotid])
wli = self.dbExec(sql="select amount as wlamount from WH_INCOMEDC_WARESLOTITEM(?,?,?)", fetch='one',
params=[t['DOCID'], wlotid, palletid])
if amount:
params['amount'] = amount
else:
params['amount'] = '0'
if wuid: params['wuid'] = wuid
wt = self.waresType(wid)
return self.drawTemplate(templ=self.tmplTaskWaresLot, data=[t, p, w, wu, wl, wli, wt, params])
taskWaresLot.exposed = True
def printer(self, id, cnt=None):
params = {'printerid': id, 'cnt': cnt, 'backurl': 'main', 'mes': None}
if cnt:
try:
self.dbExec(sql="execute procedure WH_INCOME_PRINTPALLET('INCOME',?,NULL,?,?)",
params=[self.getIfaceVar('wmsid'), id, cnt], fetch='none')
except FBExc, exc:
params['mes'] = self.fbExcText(exc[1])
else:
raise HTTPRedirect('main')
p = self.dbExec(sql="select * from WM_PRINTERS where PRINTERID = ?", params=[id], fetch='one')
return self.drawTemplate(templ=self.tmplPrinter, data=[p, params])
printer.exposed = True
def pallet(self, id, tid, wid=None):
id = self.kId(id)
p = self.palletInfo(id)
if p['OBJID'] != self.wmSesZoneObj(wmsesid=self.getIfaceVar('wmsid'))['OBJID']:
return self.wpMain(mes=_('Поддон принадлежит другому объекту!'))
w = self.dbExec(sql="select * from K_WORKPALLET_LISTWARES(?)", params=[id], fetch='all')
if wid:
backurl = 'taskWares?tid=%s&wid=%s' % (tid, wid)
else:
backurl = 'task?tid=%s' % (tid)
return self.drawTemplate(templ=self.tmplPallet, data=[p, w, {'backurl': backurl}])
pallet.exposed = True
def taskEnd(self, tid):
try:
self.dbExec(sql="execute procedure K_WH_INCOMEDC_TASKEND(?,?)", params=[tid, self.getIfaceVar('wmsid')],
fetch='none')
except FBExc, exc:
raise HTTPRedirect('task?tid=%s&mes=%s' % (tid, self.fbExcText(exc[1])))
else:
raise HTTPRedirect('main')
taskEnd.exposed = True
####################################################################################################################
# Отклонения от схемы
####################################################################################################################
from systems.KURSSKLAD.KURSTERM.INCOMEDC.templates.taskPrinter import taskPrinter
class TIncomeTransit(TIncome):
urlTaskPrinter = 'taskPrinter'
tmplTaskPrinter = taskPrinter
def qTaskWaresAddExtDatalist(self, tid, wid):
return self.dbExec(sql="select * from WH_INCOMEDC_TRN_WARESCLIENTS(?,?)", params=[tid, wid], fetch='all')
def taskPrinter(self, **args):
tid = self.kId(args['tid'])
prid = self.kId(args['prid'])
mes = None
clients = ''
amounts = ''
dates = ''
for key in args:
if key == 'tid':
tid = self.kId(args[key])
elif key == 'prid':
prid = self.kId(args[key])
elif key == 'mes':
mes = args[key]
elif key[:2] == 'q_' and args[key]:
if clients == '':
clients = key.split('_')[1]
dates = key.split('_')[2]
amounts = args[key]
else:
clients += ',' + key.split('_')[1]
dates += ',' + key.split('_')[2]
amounts += ',' + args[key]
if clients:
try:
self.dbExec(sql="execute procedure WH_INCOMEDC_PRINTPALLETCLIENTS('INCOME',?,?,?,?,?)",
params=[self.getIfaceVar('wmsid'), prid, clients, dates, amounts], fetch='none')
except FBExc, exc:
mes = self.fbExcText(exc[1])
else:
raise HTTPRedirect('task?tid=%s' % (tid))
lc = self.dbExec(sql="select * from WH_INCOMEDC_TRN_LISTCLIENTS(?)", params=[tid], fetch='all')
p = self.dbExec(sql="select * from WM_PRINTERS where PRINTERID = ?", params=[prid], fetch='one')
t = self.taskInfo(tid)
return self.drawTemplate(templ=self.tmplTaskPrinter,
data=[lc, p, t, {'mes': mes, 'backurl': 'task?tid=%s' % (tid)}])
taskPrinter.exposed = True
| [
"[email protected]"
] | |
0cdecd0af3dafb9d82746ec845df802d9645f53a | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/8/u0j.py | f3000c2754bb1143fdbc84d3add04b9d26cc4120 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'u0J':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
891767424910c6312d21ec2883cabb1ee174ed30 | 094a82883b0f4490dbca6c042e129faf0593d7bc | /thingflow/filters/timeout.py | 714092327ac119541bf3e1f1efb166cdb5bf6395 | [
"Apache-2.0"
] | permissive | kesking82/thingflow-python | 904495aa370fb0fdef5e1eb162f0553a37bd7271 | 4c00deafd1bf425ec90ef2159fc5f3ea2553ade8 | refs/heads/master | 2020-04-21T13:05:57.615247 | 2019-02-28T09:59:13 | 2019-02-28T09:59:13 | 169,587,091 | 0 | 0 | Apache-2.0 | 2019-02-07T14:44:45 | 2019-02-07T14:44:44 | null | UTF-8 | Python | false | false | 4,506 | py | # Copyright 2016 by MPI-SWS and Data-Ken Research.
# Licensed under the Apache 2.0 License.
"""Timeout-related output things and filters.
"""
from thingflow.base import OutputThing, DirectOutputThingMixin, FunctionFilter,\
FatalError, filtermethod
class Timeout(OutputThing, DirectOutputThingMixin):
"""An output thing that can shedule timeouts for itself. When a
timeout occurs, an event is sent on the default port.
The timeout_thunk is called to get the actual event.
"""
def __init__(self, scheduler, timeout_thunk):
super().__init__()
self.scheduler = scheduler
self.timeout_thunk = timeout_thunk
self.cancel = None
def start(self, interval):
if self.cancel:
self.cancel()
self.cancel = self.scheduler.schedule_later_one_time(self, interval)
def clear(self):
if self.cancel:
self.cancel()
self.cancel = None
def _observe(self):
"""If this gets called, we hit the timeout
"""
self.cancel = None
self._dispatch_next(self.timeout_thunk())
class EventWatcher:
"""Watch the event stream and then produce an event for a timeout
when asked. This can be subclassed to implement different
policies.
"""
def on_next(self, x):
pass # we get a regular event
def produce_event_for_timeout(self):
return None # return the timeout event
def close(self): # called for on_completed or on_error
pass
class SupplyEventWhenTimeout(FunctionFilter):
"""This filter sits in a chain and passes incoming events through to
its output. It also passes all events to the on_next() method of the
event watcher. If no event arrives on the input after the interval has
passed since the last event, event_watcher.produce_event_for_timeout()
is called to get a dummy event, which is passed upstream.
"""
def __init__(self, previous_in_chain, event_watcher, scheduler, interval):
self.event_watcher = event_watcher
self.timeout_thing = \
Timeout(scheduler, self.event_watcher.produce_event_for_timeout)
self.interval = interval
def on_next(self, x):
self.event_watcher.on_next(x)
# reset the timer
self.timeout_thing.start(self.interval)
self._dispatch_next(x)
def on_completed(self):
self.event_watcher.close()
self.timeout_thing.clear()
self._dispatch_completed()
def on_error(self, e):
self.event_watcher.close()
self.timeout_thing.clear()
self._dispatch_error(e)
super().__init__(previous_in_chain, on_next=on_next,
on_completed=on_completed, on_error=on_error,
name='supply_event_when_timeout')
# pass the timeout_thing's timeout events to my on_timeout_next()
# method<
self.timeout_thing.connect(self,
port_mapping=('default','timeout'))
# We start the timeout now - if we don't get a first event from the
# input within the timeout, we should supply a timeout event. This
# timeout won't start counting down until we start the scheduler.
self.timeout_thing.start(interval)
def on_timeout_next(self, x):
"""This method is connected to the Timeout thing's output. If it
gets called, the timeout has fired. We need to reschedule the timeout
as well, so that we continue to produce events in the case of multiple
consecutive timeouts.
"""
self.timeout_thing.start(self.interval)
self._dispatch_next(x)
def on_timeout_error(self, e):
"""This won't get called, as the Timeout thing does not republish any
errors it receives.
"""
raise FatalError("%s.on_timeout_error should not be called" % self)
def on_timeout_completed(self):
"""This won't get called, as the timeout thing does not propate
any completions. We just use the primary event stream to figure out when
things are done and clear any pending timeouts at that time.
"""
raise FatalError("%s.on_timeout_completed should not be called" % self)
@filtermethod(OutputThing)
def supply_event_when_timeout(this, event_watcher, scheduler, interval):
return SupplyEventWhenTimeout(this, event_watcher, scheduler, interval)
| [
"[email protected]"
] | |
958691096444cbd3d3c96fb700112e2199f368ca | e86851297175203451374021595659adbd516b59 | /tools/convert2txt.py | eecab18170bcdafe344373ef87bedb7667c58484 | [
"MIT"
] | permissive | stcolumbas/free-church-psalms | f0417d07af449300a5ada758dc95e153712b0e9e | 0eee5faa19306a79d77a55019ff82fcba72fc9b4 | refs/heads/master | 2022-12-16T15:31:44.907547 | 2017-12-08T22:53:40 | 2017-12-08T22:53:40 | 28,723,518 | 2 | 0 | null | 2022-12-07T23:51:49 | 2015-01-02T19:23:24 | Elm | UTF-8 | Python | false | false | 1,290 | py | import os
from utils import (load_scottish_psalter, load_sing_psalms, make_output_folder,
remove_folder, remove_markup, zip_folder)
def write_text_file(psalm, output_folder, fname):
fname += ".txt"
with open(os.path.join(output_folder, fname), 'w') as f:
text = psalm['name'] + "\r\n" # use windows compat. line breaks
text += psalm['metre'] + "\r\n\r\n"
text += "\r\n\r\n".join(psalm['stanzas'])
if psalm['copyright'] is not None:
text += "\r\n\r\n© " + psalm['copyright']
remove_markup(text)
f.write(text)
def convert2txt():
"""Convert both sets of Psalms to text files and
save in output/plain_text
"""
# sing psalms
output_folder = make_output_folder(["PlainText", "Sing Psalms"])
psalms = load_sing_psalms()
for psalm in psalms:
write_text_file(psalm, output_folder, psalm['file_name'])
# trad psalms
output_folder = make_output_folder(["PlainText", "Scottish Psalter"])
psalms = load_scottish_psalter()
for psalm in psalms:
write_text_file(psalm, output_folder, psalm['file_name'])
zip_folder(os.path.dirname(output_folder))
remove_folder(os.path.dirname(output_folder))
if __name__ == '__main__':
convert2txt()
| [
"[email protected]"
] | |
60995d970bc68dc1ec94fb35ac1deb625a4a25b0 | 648f742d6db2ea4e97b83c99b6fc49abd59e9667 | /common/vault/oas/models/v1_release.py | 390e9e08ca27c8e6365b47a2aae883c73a864c3e | [] | no_license | jmiller-tm/replit | c56ce63718f6eb2d9b53bd09d3f7b3ef3496cb86 | c8e6af3268c4ef8da66516154850919ea79055dc | refs/heads/main | 2023-08-30T00:49:35.738089 | 2021-11-16T23:09:08 | 2021-11-16T23:09:08 | 428,809,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,968 | py | # coding: utf-8
"""
vault/kernel/core_api/proto/v1/accounts/core_api_account_schedule_tags.proto
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: version not set
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class V1Release(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'amount': 'str',
'denomination': 'str',
'target_account_id': 'str',
'internal_account_id': 'str'
}
attribute_map = {
'amount': 'amount',
'denomination': 'denomination',
'target_account_id': 'target_account_id',
'internal_account_id': 'internal_account_id'
}
def __init__(self, amount=None, denomination=None, target_account_id=None, internal_account_id=None): # noqa: E501
"""V1Release - a model defined in Swagger""" # noqa: E501
self._amount = None
self._denomination = None
self._target_account_id = None
self._internal_account_id = None
self.discriminator = None
if amount is not None:
self.amount = amount
if denomination is not None:
self.denomination = denomination
if target_account_id is not None:
self.target_account_id = target_account_id
if internal_account_id is not None:
self.internal_account_id = internal_account_id
@property
def amount(self):
"""Gets the amount of this V1Release. # noqa: E501
The amount released. # noqa: E501
:return: The amount of this V1Release. # noqa: E501
:rtype: str
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this V1Release.
The amount released. # noqa: E501
:param amount: The amount of this V1Release. # noqa: E501
:type: str
"""
self._amount = amount
@property
def denomination(self):
"""Gets the denomination of this V1Release. # noqa: E501
The instruction release denomination. # noqa: E501
:return: The denomination of this V1Release. # noqa: E501
:rtype: str
"""
return self._denomination
@denomination.setter
def denomination(self, denomination):
"""Sets the denomination of this V1Release.
The instruction release denomination. # noqa: E501
:param denomination: The denomination of this V1Release. # noqa: E501
:type: str
"""
self._denomination = denomination
@property
def target_account_id(self):
"""Gets the target_account_id of this V1Release. # noqa: E501
The instruction `target_account_id`. # noqa: E501
:return: The target_account_id of this V1Release. # noqa: E501
:rtype: str
"""
return self._target_account_id
@target_account_id.setter
def target_account_id(self, target_account_id):
"""Sets the target_account_id of this V1Release.
The instruction `target_account_id`. # noqa: E501
:param target_account_id: The target_account_id of this V1Release. # noqa: E501
:type: str
"""
self._target_account_id = target_account_id
@property
def internal_account_id(self):
"""Gets the internal_account_id of this V1Release. # noqa: E501
The instruction `internal_account_id`. # noqa: E501
:return: The internal_account_id of this V1Release. # noqa: E501
:rtype: str
"""
return self._internal_account_id
@internal_account_id.setter
def internal_account_id(self, internal_account_id):
"""Sets the internal_account_id of this V1Release.
The instruction `internal_account_id`. # noqa: E501
:param internal_account_id: The internal_account_id of this V1Release. # noqa: E501
:type: str
"""
self._internal_account_id = internal_account_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(V1Release, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Release):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
24eec6af70636c91b4a73525876f8dd6d1baaa4a | d932716790743d0e2ae7db7218fa6d24f9bc85dc | /net/data/verify_certificate_chain_unittest/expired-root/generate-chains.py | 93d5bb72522e7d46dc7a0d48bd93b24afe02747a | [
"BSD-3-Clause"
] | permissive | vade/chromium | c43f0c92fdede38e8a9b858abd4fd7c2bb679d9c | 35c8a0b1c1a76210ae000a946a17d8979b7d81eb | refs/heads/Syphon | 2023-02-28T00:10:11.977720 | 2017-05-24T16:38:21 | 2017-05-24T16:38:21 | 80,049,719 | 19 | 3 | null | 2017-05-24T19:05:34 | 2017-01-25T19:31:53 | null | UTF-8 | Python | false | false | 1,185 | py | #!/usr/bin/python
# Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Certificate chain with a root, intermediate and target. The root has a
smaller validity range than the other certificates, making it easy to violate
just its validity.
Root: 2015/03/01 -> 2015/09/01
Intermediate: 2015/01/01 -> 2016/01/01
Target: 2015/01/01 -> 2016/01/01
"""
import sys
sys.path += ['..']
import common
# Self-signed root certificate.
root = common.create_self_signed_root_certificate('Root')
root.set_validity_range(common.MARCH_1_2015_UTC, common.SEPTEMBER_1_2015_UTC)
# Intermediate certificate.
intermediate = common.create_intermediate_certificate('Intermediate', root)
intermediate.set_validity_range(common.JANUARY_1_2015_UTC,
common.JANUARY_1_2016_UTC)
# Target certificate.
target = common.create_end_entity_certificate('Target', intermediate)
target.set_validity_range(common.JANUARY_1_2015_UTC, common.JANUARY_1_2016_UTC)
chain = [target, intermediate, root]
common.write_chain(__doc__, chain, 'chain.pem')
| [
"[email protected]"
] | |
0a846cfd6088e8cd659faee4934e1bd8e097a172 | 3d8ee0556e21cda020272ca958c1ea3c40bf648f | /130401_MultiLanguageChanging/PosMulti04_showSelectList_referToCase10.py | bce89870480eaf77d71614ddbaaf80c00480d02a | [] | no_license | nikoladang/POS_Indonesia | b60bf5c32d07c6aff065090cf5c8b5fede329d30 | e06d5433af3480a5e215f980bbf263cbb374010d | refs/heads/master | 2020-04-12T03:24:07.059745 | 2014-02-24T10:07:53 | 2014-02-24T10:07:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,782 | py | #-*- coding: utf-8 -*-
##http://www.saltycrane.com/blog/2007/10/python-finditer-regular-expression/
"""
Python version: 2.7
Purpose: posui:showSelectList
130506: apply (not k.endswith('0'))
ignore blank line inside
apply suffix 'MultiLang'
Note: If can find staticValue but can not find appropriate key for that value, prefix isMultiLang still set to "true"
apply inplaceFag 1 (i.e can write to file)
create function findCorrespondentKey()
130507: if can not find key, remove 'space', <br> or ' ' --> find again ; checkFlag 2 3
case-insensitive for searching value
130513: + label key, staticValues keys be in _0000 in order to show orginal value
"""
import sys, re, os, glob, shutil, fileinput
from datetime import datetime
from xlrd import open_workbook
import teoconstants
root_dir='C:\\130521_Temp\\Temp\\poscoMES_M80\\Pgm_Dev\\2nd_iteration\\'
##root_dir='C:\\jdev904_MULTI\\j2ee\\home\\applications\\M84010APP\\'
##root_dir='C:\\130417_dest\\'
constantFolders=[
'public_html',
## 'M84010WEB',
]
labelDict = {}
keyNotFoundList = []
showSelectListCount = 0
alreadyChanged = 0
showSelectListChanged = 0
labelMainList=[]
inplaceFlag = 1
def run(target_dir, inplaceFlag=0):
global showSelectListCount, alreadyChanged, showSelectListChanged, labelList
retrieveFlag = 0 # 1: start retrieve; 2: end retrieve
showTextFields = ''
previousLineIndent = ''
for root, dirs, files in os.walk(target_dir):
for file in files:
if file.endswith('.jsp') and (file.lower() in teoconstants.uipgms):
print('Processing ' + file)
if inplaceFlag == 0: #improve performance
f = fileinput.input(root+"\\"+ file, inplace=inplaceFlag, openhook=fileinput.hook_encoded('utf-8'))
elif inplaceFlag == 1:
f = fileinput.input(root+"\\"+ file, inplace=inplaceFlag)
label = ''
staticValues = ''
totalValue = ''
for i, line in enumerate(f):
if(re.search('posui:showSelectList', line, re.IGNORECASE)):
showSelectListCount += 1
retrieveFlag = 1
showTextFields += line
if retrieveFlag == 1:
if line in ('\n', '\r\n'): # ignore blank line
continue
if (not re.search('/>', line)) and (not re.search('posui:showSelectList', line)):
previousLineIndent = (re.search('^(?P<indent>[ \t]*)[a-zA-Z\</\n]?',line,re.IGNORECASE)).group('indent')
if re.search('label.*\=',line): ##1111111111111111111111111111111
if(re.search("\<.*\>",line)):
if inplaceFlag == 1:
sys.stdout.write(line)
continue
else:
m = re.search('(?P<before>^.*)label.*=.*"(?P<label>.*)"',line)
label = m.group('label')
keyfoundFlag = 0
keyFound = ''
subList=[]
subList.append(file)
subList.append(i+1)
subList.append(label)
if inplaceFlag == 1:
label = label.decode('utf-8')
keyFound = findCorrespondentKey(label)
if keyFound != label:
keyfoundFlag = 1
subList.append(1)
keyFound = re.sub('_000\d','_0000',keyFound)
line = m.group('before')+'label="'+keyFound+'"\n'
## if inplaceFlag == 0:
## print('FOUNDDDDDDD:'+ keyFound)
if inplaceFlag == 1:
line = line.encode('utf-8')
else:
subList.append(0)
labelMainList.append(subList)
elif(re.search('staticValues.*\=', line)): ##222222222222222222222222222
if inplaceFlag == 0:
## print('staticValues FOUND ==>'+line)
pass
if(re.search("\<.*\>",line)): ## E.g: totalValue="<%=PosM800500099ConstantsIF.C_LOV_ALL_VALUE%>"
if inplaceFlag == 1:
sys.stdout.write(line)
continue
else:
m = re.search('(?P<before>^.*)staticValues.*=.*"(?P<staticValues>.*)"',line)
staticValues = m.group('staticValues')
staticValues = re.sub('|$','',staticValues)
newString = ''
for value in re.split('\|',staticValues):
subList=[]
subList.append(file)
subList.append(i+1)
subList.append(value)
if inplaceFlag == 1:
value = value.decode('utf-8')
keyFound = findCorrespondentKey(value)
if keyFound != value:
subList.append(1)
keyFound = re.sub('_000\d','_0000',keyFound)
newString += keyFound + '|'
if inplaceFlag == 0:
print('FOUNDDDDDDD:'+ keyFound)
else:
subList.append(0)
newString += value + '|'
if inplaceFlag == 0:
print('CAN NOT FIND APPROPRIATE KEY for staticValues')
labelMainList.append(subList)
newString = re.sub('\|$','',newString)
line = m.group('before')+'staticValues="'+newString+'"\n'
if inplaceFlag == 1:
line = line.encode('utf-8')
elif(re.search('totalValue.*\=', line)): ##3333333333333333333333
## if inplaceFlag == 0:
## print('totalValue FOUND ==>'+line)
if(re.search("\<.*\>",line) or re.search('"-+"',line)): ## E.g: totalValue="<%=PosM800500099ConstantsIF.C_LOV_ALL_VALUE%>" ; totalValue="------------"
if inplaceFlag == 1:
sys.stdout.write(line)
continue
else:
m = re.search('(?P<before>^.*)totalValue.*=.*"(?P<totalValue>.*)"',line)
totalValue = m.group('totalValue')
keyfoundFlag = 0
subList=[]
subList.append(file)
subList.append(i+1)
subList.append(totalValue)
if inplaceFlag == 1:
totalValue = totalValue.decode('utf-8')
keyFound = findCorrespondentKey(totalValue)
if keyFound != totalValue:
keyfoundFlag = 1
subList.append(1)
line = m.group('before')+'totalValue="'+keyFound+'"\n'
if inplaceFlag == 0:
print('FOUNDDDDDDD:'+ keyFound)
elif inplaceFlag == 1:
line = line.encode('utf-8')
else:
subList.append(0)
labelMainList.append(subList)
showTextFields += line
if (re.search('/>', line)) and (retrieveFlag == 1):
retrieveFlag = 2
if re.search('isMultiLang', showTextFields) \
or re.search('isLabelMultiLang', showTextFields) \
or re.search('isTotalValueMultiLang', showTextFields) :
alreadyChanged += 1
if staticValues and not re.search('isMultiLang', showTextFields):
line = re.sub('^',previousLineIndent+'isMultiLang="true"\n', line)
else:
if label and totalValue=='' and not re.search('isLabelMultiLang', showTextFields): # only label
line = re.sub('^',previousLineIndent+'isLabelMultiLang="true"\n', line)
elif totalValue and label=='' and not re.search('isTotalValueMultiLang', showTextFields): # only totalValue
line = re.sub('^',previousLineIndent+'isTotalValueMultiLang="true"\n', line)
elif totalValue and label and not re.search('isLabelMultiLang', showTextFields) and not re.search('isTotalValueMultiLang', showTextFields):
line = re.sub('^',previousLineIndent+'isLabelMultiLang="true"\n', line)
line = re.sub('^',previousLineIndent+'isTotalValueMultiLang="true"\n', line)
showTextFields += line
if inplaceFlag == 0:
sys.stdout.write(showTextFields)
if retrieveFlag == 2:
retrieveFlag = 0
keyFound = ''
showTextFields = ''
label = ''
staticValues = ''
totalValue = ''
if inplaceFlag == 1:
sys.stdout.write(line)
f.close()
def findCorrespondentKey(value):
keyFound = ''
checkFlag = 0
originalValue = value
if value == '': # keep the last '|' if have --> staticNames="90|180|270|360|" staticValues="M00L12078_0001|M00L12093_0001|M00L12097_0001|M00L12064_0001|"
return ''
elif re.search('_000\d$',value): # performance tuning
return value
if hasattr(value, 'lower'): # used for case-insensitive comparition
value = value.lower()
while not checkFlag == 9:
for (k,v) in labelDict.items():
if((value == v) and not k.endswith('0')):
keyFound = k
checkFlag = 9
break
if checkFlag == 0:
if re.search(' ', value):
value = value.replace(' ','')
checkFlag = 1
elif re.search('<br>', value):
value = value.replace('<br>','')
checkFlag = 2
elif re.search('\ ',value):
value = value.replace(r' ','')
checkFlag = 3
else: checkFlag = 9
elif checkFlag == 1:
if re.search('<br>', value):
value = value.replace('<br>','')
checkFlag = 2
elif re.search('\ ',value):
value = value.replace(r' ','')
checkFlag = 3
else: checkFlag = 9
elif checkFlag == 2:
if re.search('\ ',value):
value = value.replace(r' ','')
checkFlag = 3
else: checkFlag = 9
elif checkFlag == 3: checkFlag = 9
elif checkFlag == 9:
break
if keyFound == '': # in case of can not find the not key.endswith('0')
for (k,v) in labelDict.items():
if value == v:
keyFound = k
if keyFound != '':
return keyFound
else:
if(originalValue not in keyNotFoundList):
keyNotFoundList.append(originalValue)
if inplaceFlag == 0:
pass
return originalValue
def addDataToDictLoweredValue():
rb = open_workbook('Label_KO.xls')
sheet = rb.sheet_by_name('Sheet1')
for i in range(1, sheet.nrows):
row = sheet.row(i)
if hasattr(row[2].value, 'lower'):
labelDict[row[1].value] = (row[2].value).lower() # lower for case-insensitive
else: labelDict[row[1].value] = row[2].value
#==main==================
print ("Start time: " + str(datetime.now()))
print ("Root Dir = " + root_dir)
print ("inplaceFlag = " + str(inplaceFlag) + "\n")
addDataToDictLoweredValue()
for folder in constantFolders:
run(root_dir + "\\" + folder, inplaceFlag)
print('\nshowTextFieldsCount = ' + str(showSelectListCount))
print('alreadyChanged = ' + str(alreadyChanged))
print('showSelectListChanged = ' + str(showSelectListChanged)+'\n')
print('labelMainList Details')
for i,subList in enumerate(labelMainList):
if subList[3] == 0: #Label found but can not find in label list
print('KEYNOTFOUND '+str(i+1)+'\t'+subList[0]+' at line '+str(subList[1])+'\t@-@>'+subList[2])
elif subList[3] == 1:
## print(str(i+1)+'\t'+subList[0]+' at line '+str(subList[1])+'\t'+subList[2])
pass
if keyNotFoundList:
print('keyNotFoundList contains:')
for i, key in enumerate(keyNotFoundList):
print(' '+str(i+1)+'\t'+key)
print("\nEnd time: " + str(datetime.now()))
| [
"[email protected]"
] | |
897de58d13bf05f061a944969efdd949a05fd250 | 70d3a0b9592d67627613806361996848cbdf6e81 | /tribune/urls.py | 743c5c2d51d89d1e34935298e5819d997537bf86 | [] | no_license | markmurimi/moringa-tribune | 38c85bfbde40b0f6540f9c0c33150f8d8cb2672e | 51ced550760bfcf05c97889cbef5c891ed33c8e1 | refs/heads/master | 2020-03-15T11:07:25.408649 | 2018-05-18T05:25:23 | 2018-05-18T05:25:23 | 132,112,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | from django.conf.urls import url,include
from django.contrib import admin
from django.contrib.auth import views
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
url(r'^api-token-auth/', obtain_auth_token),
url(r'^admin/', admin.site.urls),
url(r'',include('news.urls')),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^logout/$', views.logout, {"next_page": '/'}),
] | [
"[email protected]"
] | |
a8f12a818bd905bf27433ceb1d88cd1adcb6fd03 | bca9c2fa3c4c3d06dd612280ce39090a9dfab9bd | /neekanee/job_scrapers/plugins/org/link/ecri.py | 454de8cc4ab4aed3dae40d464a9b0baad832bcf6 | [] | no_license | thayton/neekanee | 0890dd5e5cf5bf855d4867ae02de6554291dc349 | f2b2a13e584469d982f7cc20b49a9b19fed8942d | refs/heads/master | 2021-03-27T11:10:07.633264 | 2018-07-13T14:19:30 | 2018-07-13T14:19:30 | 11,584,212 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,543 | py | import re, urlparse, urllib
from neekanee.jobscrapers.jobscraper import JobScraper
from neekanee.htmlparse.soupify import soupify, get_all_text
from neekanee_solr.models import *
COMPANY = {
'name': 'Economic Cycle Research Institute - ECRI',
'hq': 'Plymouth Meeting, PA',
'home_page_url': 'http://www.ecri.org',
'jobs_page_url': 'https://careers.ecri.org/',
'empcnt': [201,500]
}
class EcriJobScraper(JobScraper):
def __init__(self):
super(EcriJobScraper, self).__init__(COMPANY)
def scrape_job_links(self, url):
jobs = []
self.br.open(url)
s = soupify(self.br.response().read())
x = {'class': 'JobLink'}
for a in s.findAll('a', attrs=x):
job = Job(company=self.company)
job.title = a.text
job.url = urlparse.urljoin(self.br.geturl(), a['href'])
job.location = self.company.location
jobs.append(job)
return jobs
def scrape_jobs(self):
job_list = self.scrape_job_links(self.company.jobs_page_url)
self.prune_unlisted_jobs(job_list)
new_jobs = self.new_job_listings(job_list)
for job in new_jobs:
self.br.open(job.url)
s = soupify(self.br.response().read())
t = s.find('table', id='CRCareers1_tblJobDescrDetail')
job.desc = get_all_text(t)
job.save()
def get_scraper():
return EcriJobScraper()
if __name__ == '__main__':
job_scraper = get_scraper()
job_scraper.scrape_jobs()
| [
"[email protected]"
] | |
76cdb6dfccc2ed384bfad32b928caa9558191f83 | 9eee1566e436a883fa3bd0266c6a7375e34ab4d7 | /notes/cli/commands/document/modify.py | 14ee5e002dbbd79ebb3e9e1fe2bda613817eda8d | [] | no_license | gropax/qiq-django | aa87fa070bf2083aba9043ebc96c2287be2cf7e5 | 31f8c6ad717994bd9b37fcdde3fec8549be5aec1 | refs/heads/master | 2020-07-09T05:19:13.117991 | 2017-01-10T16:54:52 | 2017-01-10T16:54:52 | 65,904,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,902 | py | import os
from core.cli.command import Command, command
from notes.cli.utils import Utils
from notes.cli.commands.document import DocumentCommand
@command('modify', DocumentCommand)
class ModifyCommand(Command, Utils):
aliases = ('mod',)
def add_arguments(self, parser):
parser.add_argument('name_or_id', type=str, help='the name or the id of the document')
parser.add_argument('-d', '--description', type=str,
help='the description of the document')
parser.add_argument('-n', '--new-name', type=str,
help='the new name of the document')
parser.add_argument('-f', '--file', type=str,
help='synchronize document with file')
def action(self, args):
name_or_id = args.name_or_id
doc = self.find_document_by_name_or_id_or_error(name_or_id)
old_name, desc_mod, file_mod = None, None, None
new_name = args.new_name
if new_name and new_name != doc.name:
self.check_document_name_is_valid(new_name)
old_name = doc.name
doc.name = new_name
desc = args.description
if desc and desc != doc.description:
desc_mod = True
doc.description = desc
f = None
if args.file:
f = self.absolute_path(args.file)
if f != doc.file:
if os.path.isfile(f):
if not self.ask('File `%s` already exists. Synchronize it anyway ?' % f, default='no'):
self.warning_operation_aborted()
file_mod = True
doc.file = f
self.synchronize_document(doc)
if old_name or desc_mod or file_mod:
doc.save()
self.success_document_modified(doc, old_name, desc_mod or file_mod)
else:
self.warning_nothing_to_do()
| [
"[email protected]"
] | |
308c563f76e19aceed010d3587c7917ad4876a05 | ccfe4eb1a0df72da1ffb35d082ce4eedcf3a55e0 | /grade/views.py | 63fda731f48957eefbc2fed69edd739dd1f5d9ec | [] | no_license | chydream/djangoDemo | a807a8f93f59dee4ecde031388a2cdb5172b3e84 | 94df813dcf3877a46dad572d5421e33862100a0d | refs/heads/master | 2022-07-17T16:34:04.343259 | 2020-05-20T03:29:10 | 2020-05-20T03:29:10 | 258,780,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | from django.db.models import Sum, Max, Count
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
from grade.models import Grade, Student
def page_count(request):
# num = Grade.objects.filter(student_name= '张三').aggregate(total=Sum('score'))
# print(num)
# max_num = Grade.objects.filter(subject_name='语文').aggregate(high_score=Max('score'))
# print(max_num)
# sum_num = Grade.objects.values_list('student_name').annotate(Sum('score'))
# print(sum_num)
sum_num = Student.objects.all().annotate(Sum('stu_grade__score'))
print(sum_num)
for item in sum_num:
print(item.student_name, item.stu_grade__score__sum)
zs = Student.objects.get(pk=1)
list = zs.stu_grade.all()
print(list)
return HttpResponse('ok') | [
"[email protected]"
] | |
ee5485a406a84015222484f56a780b6e480a68cd | 37ba62db61fc4ec62634638763a984cbfbe40fe3 | /day40/聊天/client1.py | 774d118ed33968502dfa2d9a623bffb169cd56ad | [] | no_license | lt910702lt/python | ca2768aee91882c893a9bc6c1bdd1b455ebd511f | c6f13a1a9461b18df17205fccdc28f89854f316c | refs/heads/master | 2020-05-09T22:54:22.587206 | 2019-09-17T09:02:53 | 2019-09-17T09:02:53 | 181,485,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | import socket
import time
sk = socket.socket()
sk.connect(('127.0.0.1', 8080))
while True:
sk.send(b'hi')
ret = sk.recv(1024).decode('utf-8')
print(ret)
time.sleep(1)
sk.close() | [
"[email protected]"
] | |
f2921abb6806d16d0f7ddc14a97f9baeff9ea3f2 | f0856e60a095ce99ec3497b3f27567803056ac60 | /Dacon/01newstopic_4_StratifiedKFold.py | 4133bad05dab0a9658e9397d51e417687e2a2d98 | [] | no_license | hjuju/TF_Study-HAN | dcbac17ce8b8885f5fb7d7f554230c2948fda9ac | c0faf98380e7f220868ddf83a9aaacaa4ebd2c2a | refs/heads/main | 2023-09-04T09:13:33.212258 | 2021-10-27T08:00:49 | 2021-10-27T08:00:49 | 384,371,952 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,418 | py | import numpy as np
import re
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from icecream import ic
from sklearn.metrics import accuracy_score,log_loss
from sklearn.model_selection import StratifiedKFold
import datetime
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Embedding, LSTM, Dropout, Bidirectional
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from tensorflow.keras.utils import plot_model, to_categorical
from tensorflow.keras.optimizers import Adam
path = './Dacon/_data/newstopic/'
train = pd.read_csv(path + 'train_data.csv',header=0, encoding='UTF8')
test = pd.read_csv(path + 'test_data.csv',header=0, encoding='UTF8')
submission = pd.read_csv(path + 'sample_submission.csv')
topic_dict = pd.read_csv(path + 'topic_dict.csv')
# null값 제거
# datasets_train = datasets_train.dropna(axis=0)
# datasets_test = datasets_test.dropna(axis=0)
# x = datasets_train.iloc[:, -2]
# y = datasets_train.iloc[:, -1]
# x_pred = datasets_test.iloc[:, -1]
train['doc_len'] = train.title.apply(lambda words: len(words.split()))
x_train = np.array([x for x in train['title']])
x_predict = np.array([x for x in test['title']])
y_train = np.array([x for x in train['topic_idx']])
def text_cleaning(docs):
for doc in docs:
doc = re.sub("[^ㄱ-ㅎㅏ-ㅣ가-힣 ]", "", doc)
return docs
x = text_cleaning(x_train)
x_predict = text_cleaning(x_predict)
# ic(x.shape) ic| x.shape: (45654,)
# 불용어 제거, 특수문자 제거
# import string
# def define_stopwords(path):
# sw = set()
# for i in string.punctuation:
# sw.add(i)
# with open(path, encoding='utf-8') as f:
# for word in f:
# sw.add(word)
# return sw
# x = define_stopwords(x)
from tensorflow.keras.preprocessing.text import Tokenizer
tokenizer = Tokenizer()
tokenizer.fit_on_texts(x)
sequences_train = tokenizer.texts_to_sequences(x)
sequences_test = tokenizer.texts_to_sequences(x_predict)
#리스트 형태의 빈값 제거 --> 양방향에서는 오류남..
# sequences_train = list(filter(None, sequences_train))
# sequences_test = list(filter(None, sequences_test))
#길이 확인
# x1_len = max(len(i) for i in sequences_train)
# ic(x1_len) # ic| x1_len: 11
# x_pred = max(len(i) for i in sequences_test)
# ic(x_pred) # ic| x_pred: 9
xx = pad_sequences(sequences_train, padding='pre', maxlen = 14)
# ic(xx.shape) ic| xx.shape: (42477, 11)
yy = pad_sequences(sequences_test, padding='pre', maxlen=14)
y = to_categorical(y_train)
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Embedding, LSTM, GRU, Bidirectional
# model = Sequential()
# model.add(Embedding(input_dim=101082, output_dim=77, input_length=11))
# model.add(LSTM(128, activation='relu'))
# model.add(Dense(64, activation= 'relu'))
# model.add(Dropout(0.2))
# model.add(Dense(32, activation= 'relu'))
# model.add(Dense(7, activation='softmax'))
model = Sequential([Embedding(101082, 200, input_length =14),
tf.keras.layers.Bidirectional(LSTM(units = 32, return_sequences = True, activation='relu')),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Bidirectional(LSTM(units = 16, return_sequences = True, activation='relu')),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Bidirectional(LSTM(units = 8, activation='relu')),
Dense(7, activation='softmax')])
import datetime
import time
optimizer = Adam(learning_rate=0.0001)
model.compile(loss= 'categorical_crossentropy', optimizer= optimizer, metrics = ['acc'])
date = datetime.datetime.now()
date_time = date.strftime('%m%d_%H%M')
cvpath = './Dacon/_save/skfoldmcp/'
info = '{epoch:02d}_{val_loss:.4f}'
filepath = ''.join([cvpath, 'test', '_', date_time, '_', info, '.hdf5'])
history = model.fit(xx, y, epochs=10, batch_size=512, validation_split= 0.2)
n_fold = 5
cv = StratifiedKFold(n_splits = n_fold, shuffle=True, random_state=66)
# 테스트데이터의 예측값 담을 곳 생성
test_y = np.zeros((yy.shape[0], 7))
# 조기 종료 옵션 추가
es = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=5, verbose=1, mode='min', baseline=None, restore_best_weights=True)
cp = ModelCheckpoint(monitor='val_loss', save_best_only=True, mode='auto', verbose=1, filepath=filepath)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', verbose=1, patience=10, mode='auto', factor=0.1 )
start_time = time.time()
for i, (i_trn, i_val) in enumerate(cv.split(xx, y_train), 1):
print(f'training model for CV #{i}')
model.fit(xx[i_trn],
to_categorical(y_train[i_trn]),
validation_data=(xx[i_val], to_categorical(y_train[i_val])),
epochs=5,
batch_size=512,
callbacks=[es,cp]) # 조기 종료 옵션
test_y += model.predict(yy) / n_fold
topic = []
for i in range(len(test_y)):
topic.append(np.argmax(test_y[i]))
end_time = time.time() - start_time
submission['topic_idx'] = topic
ic(submission.shape)
date_time = datetime.datetime.now().strftime("%y%m%d_%H%M")
submission.to_csv('./Dacon/_save/csv/predict' + date_time + '.csv', index=False)
| [
"[email protected]"
] | |
6d28554b2234358e85f3e19d6f78774acae86379 | 312d40d6023858891dd32bda67579f7284a54c15 | /02/01/isalpha.py | c0ee947563b96940abf5d767079a9e3478a11056 | [
"CC0-1.0"
] | permissive | pylangstudy/201708 | b67a49f091f5f949954e7b9a910a07761fe9a7d1 | 126b1af96a1d1f57522d5a1d435b58597bea2e57 | refs/heads/master | 2021-01-01T20:49:15.973299 | 2017-08-31T00:18:55 | 2017-08-31T00:18:55 | 98,936,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | print('aAzZ'.isalpha())
print('1'.isalpha())
| [
"[email protected]"
] | |
26c8a0b64e7d7cd923089885b32824ea3c70e05b | 3d2f5c005bbf4b4194fc105b9c2492d2bd09109c | /dynamic_scraper/utils/processors.py | bd0c67b62fb18e5f462ab21b7c780165b1adcc79 | [] | no_license | mtaziz/django-dynamic-scraper | e9a51a3b95a84767412df5edb9806dae5bdb69e1 | 87ae65ec97a405e03e1c2493637581bfe2545410 | refs/heads/master | 2021-01-22T01:43:55.587001 | 2016-01-07T09:43:51 | 2016-01-07T09:43:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,393 | py | #Stage 2 Update (Python 3)
from __future__ import unicode_literals
from builtins import str
import datetime
from scrapy import log
def string_strip(text, loader_context):
if not isinstance(text, str):
text = str(text)
chars = loader_context.get('string_strip', ' \n\t\r')
return text.strip(chars)
def pre_string(text, loader_context):
pre_str = loader_context.get('pre_string', '')
return pre_str + text
def post_string(text, loader_context):
post_str = loader_context.get('post_string', '')
return text + post_str
def pre_url(text, loader_context):
pre_url = loader_context.get('pre_url', '')
if(pre_url[0:7] == 'http://' and text[0:7] == 'http://'):
return text
if(pre_url[-1:] == '/' and text[0:1] == '/'):
pre_url = pre_url[:-1]
return pre_url + text
def replace(text, loader_context):
replace = loader_context.get('replace', '')
return replace
def static(text, loader_context):
static = loader_context.get('static', '')
return static
def date(text, loader_context):
cformat = loader_context.get('date')
try:
if text.lower() in ['gestern', 'yesterday',]:
date = datetime.date.today() - datetime.timedelta(1)
elif text.lower() in ['heute', 'today',]:
date = datetime.date.today()
elif text.lower() in ['morgen', 'tomorrow',]:
date = datetime.date.today() + datetime.timedelta(1)
else:
date = datetime.datetime.strptime(text, cformat)
except ValueError:
loader_context.get('spider').log('Date could not be parsed ("%s", Format string: "%s")!' % (text, cformat), log.ERROR)
return None
return date.strftime('%Y-%m-%d')
def time(text, loader_context):
cformat = loader_context.get('time')
try:
time = datetime.datetime.strptime(text, cformat)
except ValueError:
loader_context.get('spider').log('Time could not be parsed ("%s", Format string: "%s")!' % (text, cformat), log.ERROR)
return None
return time.strftime('%H:%M:%S')
def ts_to_date(ts_str, loader_context):
try:
ts_int = int(ts_str)
return datetime.datetime.fromtimestamp(ts_int).strftime('%Y-%m-%d')
except ValueError:
loader_context.get('spider').log('Timestamp could not be parsed ("%s")!' % ts_str, log.ERROR)
return None
def ts_to_time(ts_str, loader_context):
try:
ts_int = int(ts_str)
return datetime.datetime.fromtimestamp(ts_int).strftime('%H:%M:%S')
except ValueError:
loader_context.get('spider').log('Timestamp could not be parsed ("%s")!' % ts_str, log.ERROR)
return None
def _breakdown_time_unit_overlap(time_str, limit):
time_list = time_str.split(':')
first = int(time_list[0])
if first >= limit:
time_list[0] = str(first % limit)
time_list.insert(0, str(first // limit))
else:
if(len(time_list[0]) == 1):
time_list[0] = '0' + time_list[0]
time_list.insert(0, '00')
time_str = ':'.join(time_list)
return time_str
def duration(text, loader_context):
cformat = loader_context.get('duration')
#Value completion in special cases
text_int = None
try:
text_int = int(text)
except ValueError:
pass
if(cformat == '%H:%M'):
if text_int:
text += ':00'
if(cformat == '%M'):
text = _breakdown_time_unit_overlap(text, 60)
cformat = '%H:%M'
if(cformat == '%M:%S'):
if text_int:
text += ':00'
text = _breakdown_time_unit_overlap(text, 60)
cformat = '%H:%M:%S'
if(cformat == '%S'):
if text_int:
if text_int >= 3600:
hours_str = str(text_int // 3600) + ':'
secs_under_hour_str = str(text_int % 3600)
text = hours_str + _breakdown_time_unit_overlap(secs_under_hour_str, 60)
cformat = '%H:%M:%S'
else:
text = _breakdown_time_unit_overlap(text, 60)
cformat = '%M:%S'
try:
duration = datetime.datetime.strptime(text, cformat)
except ValueError:
loader_context.get('spider').log('Duration could not be parsed ("%s", Format string: "%s")!' % (text, cformat), log.ERROR)
return None
return duration.strftime('%H:%M:%S')
| [
"[email protected]"
] | |
19ef8cdbc0dc5200b8b935d3ddd5ac3d2fe2bd1d | c7bb79543b679061376eb10df110041b0556d54f | /ilastik/applets/thresholdTwoLevels/thresholdTwoLevelsGui.py | a73dad2e8b9c30fd44c512ec28348c480e45f383 | [] | no_license | thorbenk/ilastik | 26eaf01d571415b195a945f169bd3acd47a79613 | fc009544f4c3119f082f0623ed82303527c6fd88 | refs/heads/master | 2021-01-18T21:55:37.803554 | 2013-04-08T16:32:19 | 2013-04-08T16:33:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,726 | py | import os
import logging
from functools import partial
from PyQt4 import uic
from PyQt4.QtCore import Qt, QEvent
from PyQt4.QtGui import QColor
from volumina.api import LazyflowSource, AlphaModulatedLayer
from ilastik.applets.layerViewer import LayerViewerGui
from ilastik.utility.gui import threadRouted
logger = logging.getLogger(__name__)
traceLogger = logging.getLogger("TRACE." + __name__)
class ThresholdTwoLevelsGui( LayerViewerGui ):
def __init__(self, *args, **kwargs):
super( self.__class__, self ).__init__(*args, **kwargs)
self._channelColors = self._createDefault16ColorColorTable()
def initAppletDrawerUi(self):
"""
Reimplemented from LayerViewerGui base class.
"""
# Load the ui file (find it in our own directory)
localDir = os.path.split(__file__)[0]
self._drawer = uic.loadUi(localDir+"/drawer.ui")
self._drawer.applyButton.clicked.connect( self._onApplyButtonClicked )
self._sigmaSpinBoxes = { 'x' : self._drawer.sigmaSpinBox_X,
'y' : self._drawer.sigmaSpinBox_Y,
'z' : self._drawer.sigmaSpinBox_Z }
self._allWatchedWidgets = self._sigmaSpinBoxes.values() + \
[
self._drawer.inputChannelSpinBox,
self._drawer.lowThresholdSpinBox,
self._drawer.highThresholdSpinBox,
self._drawer.minSizeSpinBox,
self._drawer.maxSizeSpinBox
]
for widget in self._allWatchedWidgets:
# If the user pressed enter inside a spinbox, auto-click "Apply"
widget.installEventFilter( self )
self._updateGuiFromOperator()
@threadRouted
def _updateGuiFromOperator(self):
op = self.topLevelOperatorView
# Channel
channelIndex = op.InputImage.meta.axistags.index('c')
numChannels = op.InputImage.meta.shape[channelIndex]
self._drawer.inputChannelSpinBox.setRange( 0, numChannels-1 )
self._drawer.inputChannelSpinBox.setValue( op.Channel.value )
# Sigmas
sigmaDict = self.topLevelOperatorView.SmootherSigma.value
for axiskey, spinBox in self._sigmaSpinBoxes.items():
spinBox.setValue( sigmaDict[axiskey] )
# Thresholds
self._drawer.lowThresholdSpinBox.setValue( op.LowThreshold.value )
self._drawer.highThresholdSpinBox.setValue( op.HighThreshold.value )
# Size filters
self._drawer.minSizeSpinBox.setValue( op.MinSize.value )
self._drawer.maxSizeSpinBox.setValue( op.MaxSize.value )
def _updateOperatorFromGui(self):
op = self.topLevelOperatorView
# Read all gui settings before updating the operator
# (The gui is still responding to operator changes,
# and we don't want it to update until we've read all gui values.)
# Read Channel
channel = self._drawer.inputChannelSpinBox.value()
# Read Sigmas
sigmaSlot = self.topLevelOperatorView.SmootherSigma
block_shape_dict = dict( sigmaSlot.value )
block_shape_dict['x'] = self._sigmaSpinBoxes['x'].value()
block_shape_dict['y'] = self._sigmaSpinBoxes['y'].value()
block_shape_dict['z'] = self._sigmaSpinBoxes['z'].value()
# Read Thresholds
lowThreshold = self._drawer.lowThresholdSpinBox.value()
highThreshold = self._drawer.highThresholdSpinBox.value()
# Read Size filters
minSize = self._drawer.minSizeSpinBox.value()
maxSize = self._drawer.maxSizeSpinBox.value()
# Apply new settings to the operator
op.Channel.setValue( channel )
sigmaSlot.setValue( block_shape_dict )
op.LowThreshold.setValue( lowThreshold )
op.HighThreshold.setValue( highThreshold )
op.MinSize.setValue( minSize )
op.MaxSize.setValue( maxSize )
def _onApplyButtonClicked(self):
self._updateOperatorFromGui()
def eventFilter(self, watched, event):
"""
If the user pressed 'enter' within a spinbox, auto-click the "apply" button.
"""
if watched in self._allWatchedWidgets:
if event.type() == QEvent.KeyPress and event.key() == Qt.Key_Enter:
self._drawer.applyButton.click()
return True
return False
def setupLayers(self):
layers = []
op = self.topLevelOperatorView
# Show the cached output, since it goes through a blocked cache
if op.CachedOutput.ready():
outputLayer = self.createStandardLayerFromSlot( op.CachedOutput )
outputLayer.name = "Output (Cached)"
outputLayer.visible = False
outputLayer.opacity = 1.0
layers.append(outputLayer)
if op.BigRegions.ready():
lowThresholdLayer = self.createStandardLayerFromSlot( op.BigRegions )
lowThresholdLayer.name = "Big Regions"
lowThresholdLayer.visible = False
lowThresholdLayer.opacity = 1.0
layers.append(lowThresholdLayer)
if op.FilteredSmallLabels.ready():
filteredSmallLabelsLayer = self.createStandardLayerFromSlot( op.FilteredSmallLabels, lastChannelIsAlpha=True )
filteredSmallLabelsLayer.name = "Filtered Small Labels"
filteredSmallLabelsLayer.visible = False
filteredSmallLabelsLayer.opacity = 1.0
layers.append(filteredSmallLabelsLayer)
if op.SmallRegions.ready():
lowThresholdLayer = self.createStandardLayerFromSlot( op.SmallRegions )
lowThresholdLayer.name = "Small Regions"
lowThresholdLayer.visible = False
lowThresholdLayer.opacity = 1.0
layers.append(lowThresholdLayer)
# Selected input channel, smoothed.
if op.Smoothed.ready():
smoothedLayer = self.createStandardLayerFromSlot( op.Smoothed )
smoothedLayer.name = "Smoothed Input"
smoothedLayer.visible = True
smoothedLayer.opacity = 1.0
layers.append(smoothedLayer)
# Show each input channel as a separate layer
for channelIndex, channelSlot in enumerate(op.InputChannels):
if op.InputChannels.ready():
drange = channelSlot.meta.drange
if drange is None:
drange = (0.0, 1.0)
channelSrc = LazyflowSource(channelSlot)
channelLayer = AlphaModulatedLayer( channelSrc,
tintColor=QColor(self._channelColors[channelIndex]),
range=drange,
normalize=drange )
channelLayer.name = "Input Ch{}".format(channelIndex)
channelLayer.opacity = 1.0
channelLayer.visible = channelIndex == op.Channel.value # By default, only the selected input channel is visible.
layers.append(channelLayer)
# Show the raw input data
rawSlot = self.topLevelOperatorView.RawInput
if rawSlot.ready():
rawLayer = self.createStandardLayerFromSlot( rawSlot )
rawLayer.name = "Raw Data"
rawLayer.visible = True
rawLayer.opacity = 1.0
layers.append(rawLayer)
return layers
def _createDefault16ColorColorTable(self):
colors = []
# SKIP: Transparent for the zero label
#colors.append(QColor(0,0,0,0))
# ilastik v0.5 colors
colors.append( QColor( Qt.red ) )
colors.append( QColor( Qt.green ) )
colors.append( QColor( Qt.yellow ) )
colors.append( QColor( Qt.blue ) )
colors.append( QColor( Qt.magenta ) )
colors.append( QColor( Qt.darkYellow ) )
colors.append( QColor( Qt.lightGray ) )
# Additional colors
colors.append( QColor(255, 105, 180) ) #hot pink
colors.append( QColor(102, 205, 170) ) #dark aquamarine
colors.append( QColor(165, 42, 42) ) #brown
colors.append( QColor(0, 0, 128) ) #navy
colors.append( QColor(255, 165, 0) ) #orange
colors.append( QColor(173, 255, 47) ) #green-yellow
colors.append( QColor(128,0, 128) ) #purple
colors.append( QColor(240, 230, 140) ) #khaki
colors.append( QColor(192, 192, 192) ) #silver
# colors.append( QColor(69, 69, 69) ) # dark grey
# colors.append( QColor( Qt.cyan ) )
assert len(colors) == 16
return [c.rgba() for c in colors]
| [
"[email protected]"
] | |
20a6e72b54eb96bd50108fafb8ea71a7affed7ec | 1be65c272e2788f647534db9b3b003fc98228a71 | /mi-api/miapi/controllers/about.py | dc84e905469c3868744ffa99e5665e04dad872b1 | [] | no_license | camwes/thisis.me | e93e530cdddaab08d8796985ef88c0882ae142d0 | e1c32d237a7410298fdb034cab8378d5ecc14973 | refs/heads/master | 2021-01-20T21:45:09.389189 | 2012-09-12T00:45:19 | 2012-09-12T00:45:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py |
def about(request):
return {'project': 'mi-api'}
| [
"[email protected]"
] | |
8e45a5d0f33b6ef3c3ec1f9a9cbe1fafc2635b40 | aef4faef04d851fe24f2b3f28ae98daa2152e543 | /spikestim/negbin_bayes.py | c4b32098dcf07116803003630d8ad78e1e479204 | [] | no_license | dattalab/spikestim | b1580eec250e0a6b03796b200dfe22b15112228b | 631152b3f173dc6c8fa2601ad917f899dc1210b9 | refs/heads/master | 2021-01-21T08:44:00.379078 | 2015-12-04T13:53:51 | 2015-12-04T13:53:51 | 45,796,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,267 | py | from __future__ import division
import numpy as np
from pybasicbayes.distributions import NegativeBinomial
'''
The code in this file provides utilities for Bayesian estimation of negative
binomial parameters through MCMC methods provided by pybasicbayes.
The main function to use is get_posterior_samples(data, num_samples).
The NB class sets the prior to use sensible defaults, namely
p ~ Beta(alpha=1., beta=1.)
r ~ Gamma(k=1., theta=1)
That is, the prior on p is uniform on [0,1] and the prior on r is exponential
with rate 1.
'''
class NB(NegativeBinomial):
def __init__(self, r=None, p=None, alpha=1., beta=1., k=1., theta=1.):
super(NB, self).__init__(
r=r, p=p, alpha_0=alpha, beta_0=beta, k_0=k, theta_0=theta)
def get_posterior_samples(data, num_samples):
distn = NB()
data = np.require(data, requirements='C')
samples = []
for _ in xrange(num_samples):
distn.resample(data)
samples.append((distn.r, distn.p))
return samples
# these next two functions are redundant with negbin_maxlike.py, but use
# pybasicbayes implementations instead
def negbin_loglike(r, p, x):
return NB(r=r, p=p).log_likelihood(x)
def negbin_sample(r, p, size):
return NB(r=r, p=p).rvs(size)
| [
"[email protected]"
] | |
e1f6613cc9681c42b7d3e9225e62499c04ee0236 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2/VelvetTie/pancakes.py | c27cdec091f729b20528e32b3b22c5d62d391cef | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,853 | py | import sys
def flip(to_flip):
"""
>>> flip('-')
'+'
>>> flip('+')
'-'
>>> flip('+++')
'---'
>>> flip('-+')
'-+'
>>> flip('--+')
'-++'
:param to_flip:
:return:
"""
num_pancakes = len(to_flip)
flipped = [0 for i in range(num_pancakes)]
for i in range(num_pancakes):
if to_flip[i] == '-':
flipped[num_pancakes - 1 - i] = '+'
else:
flipped[num_pancakes - 1 - i] = '-'
return ''.join(flipped)
def count_flips(pancakes):
"""
>>> count_flips('-')
1
>>> count_flips('+')
0
>>> count_flips('-+')
1
>>> count_flips('+-')
2
>>> count_flips('+++')
0
>>> count_flips('--+-')
3
:param pancakes - string
"""
num_flips = 0
if '-' not in pancakes:
#print('No flips needed')
return num_flips
elif '+' not in pancakes:
#print('Just flip once')
pancakes = flip(pancakes)
num_flips = 1
return num_flips
else:
ref_sign = pancakes[0]
to_flip = ''
for p in pancakes:
if p != ref_sign:
break
else:
to_flip += p
print("to_flip={}".format(to_flip), file=sys.stderr)
flipped = flip(to_flip)
num_flips += 1
# Put together new pancake stack.
num_flipped = len(flipped)
new_stack = flipped + pancakes[num_flipped:]
print("new_stack={}".format(new_stack), file=sys.stderr)
num_flips += count_flips(new_stack)
return num_flips
def read_input():
num_test_cases = int(input())
for t in range(1, num_test_cases + 1):
pancakes = input()
flips = count_flips(pancakes)
print("Case #{}: {}".format(t, flips))
if __name__ == "__main__":
read_input()
| [
"[[email protected]]"
] | |
b6d9a025d803dd7983357c17927e7543de6c6a3d | c2415d039d12cc3b1d587ce497527ff62867df41 | /authentication/config.py | 618701e23120609d0033d92185799d74b676d777 | [
"MIT"
] | permissive | admin-dashboards/django-dashboard-light | dc207c07d1987b7b4af52c449502ccd797d4d979 | 96142cf7f9e807e575a1d444e1bb291d3f956652 | refs/heads/master | 2022-09-04T16:48:08.087092 | 2022-08-23T07:37:03 | 2022-08-23T07:37:03 | 232,156,115 | 1 | 0 | MIT | 2020-01-06T18:02:01 | 2020-01-06T18:02:00 | null | UTF-8 | Python | false | false | 173 | py | # -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
from django.apps import AppConfig
class AuthConfig(AppConfig):
name = 'authcfg'
| [
"[email protected]"
] | |
a42a79d97b8e29b148476dc6d40564bfd92ecc97 | d644b6cabb4fa88cf900c59799a2897f5a0702d8 | /tests/base_tests/polygon_tests/test_triangulate.py | 47fe7d21c053c0b7be0a50fce9abd3e709ea7763 | [
"MIT"
] | permissive | lycantropos/gon | c3f89a754c60424c8e2609e441d7be85af985455 | 177bd0de37255462c60adcbfcdf76bfdc343a9c1 | refs/heads/master | 2023-07-06T01:11:57.028646 | 2023-06-26T20:47:14 | 2023-06-27T00:30:06 | 194,597,548 | 15 | 1 | MIT | 2023-06-27T00:30:07 | 2019-07-01T04:06:06 | Python | UTF-8 | Python | false | false | 546 | py | from functools import reduce
from operator import or_
from hypothesis import given
from gon.base import (Polygon,
Triangulation)
from . import strategies
@given(strategies.polygons)
def test_basic(polygon: Polygon) -> None:
result = polygon.triangulate()
assert isinstance(result, Triangulation)
@given(strategies.polygons)
def test_round_trip(polygon: Polygon) -> None:
result = polygon.triangulate()
assert (reduce(or_, [Polygon(contour) for contour in result.triangles()])
== polygon)
| [
"[email protected]"
] | |
b118f7c657f941a6bddce32f35f2d83780071954 | d5fcafedcd49a666e960eff3c61cfc1bfe9e081c | /setup.py | e2c764c5e29695769d00c9b9812696849ab732a7 | [
"BSD-2-Clause"
] | permissive | IndustriaTech/filebrowser-safe | 29155ef662604d5b4f6ec8a9a94002d4c0e82858 | 2eda8ae594617a58455bca63f88689f2f0579aad | refs/heads/master | 2021-05-29T07:49:26.957672 | 2014-02-05T09:13:42 | 2014-02-05T09:13:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py |
from setuptools import setup, find_packages
setup(
name="filebrowser_safe",
version="0.3.1",
description="A snapshot of the filebrowser_3 branch of django-filebrowser, "
"packaged as a dependency for the Mezzanine CMS for Django.",
long_description=open("README.rst").read(),
author="Patrick Kranzlmueller, Axel Swoboda (vonautomatisch)",
author_email="[email protected]",
maintainer="Stephen McDonald",
maintainer_email="[email protected]",
url="http://github.com/stephenmcd/filebrowser-safe",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
)
| [
"[email protected]"
] | |
903b3cf6c2069424d4e7db0124097fd3a7ef999a | 5d6ff6a6dc2174a6362d2d2782470aab7c72b909 | /quokka/core/tests/test_models.py | e09141ef787b87e931b2da032962caee0c496ede | [
"MIT"
] | permissive | imgugu/quokka | 41afb4abe095223ba6f1c53aa5e695213832426e | 301a1300bbd76b6f53569decdf3b3999ba87543f | refs/heads/master | 2020-05-20T22:23:43.282781 | 2013-11-21T21:06:21 | 2013-11-21T21:06:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,876 | py | # coding: utf-8
from . import BaseTestCase
from ..models import Channel, Config
class TestChannel(BaseTestCase):
def setUp(self):
# Create method was not returning the created object with
# the create() method
self.parent, new = Channel.objects.get_or_create(
title=u'Father',
)
self.channel, new = Channel.objects.get_or_create(
title=u'Monkey Island',
description=u'The coolest pirate history ever',
parent=self.parent,
tags=['tag1', 'tag2'],
)
def tearDown(self):
self.channel.delete()
self.parent.delete()
def test_channel_fields(self):
self.assertEqual(self.channel.title, u'Monkey Island')
self.assertEqual(self.channel.slug, u'monkey-island')
self.assertEqual(self.channel.long_slug, u'father/monkey-island')
self.assertEqual(self.channel.mpath, u',father,monkey-island,')
self.assertEqual(self.channel.description,
u'The coolest pirate history ever')
self.assertEqual(self.channel.tags, ['tag1', 'tag2'])
self.assertEqual(self.channel.parent, self.parent)
self.assertEqual(unicode(self.channel), u'father/monkey-island')
def test_get_ancestors(self):
self.assertEqual(list(self.channel.get_ancestors()), [self.channel,
self.parent])
def test_get_ancestors_slug(self):
self.assertEqual(self.channel.get_ancestors_slugs(),
[u'father/monkey-island', u'father'])
def test_get_children(self):
self.assertEqual(list(self.parent.get_children()), [self.channel])
def test_get_descendants(self):
self.assertEqual(list(self.parent.get_descendants()),
[self.parent, self.channel])
def test_absolute_urls(self):
self.assertEqual(self.channel.get_absolute_url(),
'/father/monkey-island/')
self.assertEqual(self.parent.get_absolute_url(),
'/father/')
def test_get_canonical_url(self):
self.assertEqual(self.channel.get_canonical_url(),
'/father/monkey-island/')
self.assertEqual(self.parent.get_canonical_url(),
'/father/')
class TestConfig(BaseTestCase):
def setUp(self):
# Create method was not returning the created object with
# the create() method
self.config, new = Config.objects.get_or_create(
group=u'test group',
)
def tearDown(self):
self.config.delete()
def test_config_fields(self):
self.assertEqual(self.config.group, u'test group')
self.assertEqual(self.config.content_format, 'html')
self.assertEqual(unicode(self.config), u'test group')
| [
"[email protected]"
] | |
b180b7ee13e37740ff672f524e6d84abb68a9392 | 4ce2cff60ddbb9a3b6fc2850187c86f866091b13 | /tfrecords/src/wai/tfrecords/object_detection/utils/json_utils_test.py | abd785fffd16944d382209e289dc19bd61f7b9e2 | [
"MIT",
"Apache-2.0"
] | permissive | 8176135/tensorflow | 18cb8a0432ab2a0ea5bacd03309e647f39cb9dd0 | 2c3b4b1d66a80537f3e277d75ec1d4b43e894bf1 | refs/heads/master | 2020-11-26T05:00:56.213093 | 2019-12-19T08:13:44 | 2019-12-19T08:13:44 | 228,970,478 | 0 | 0 | null | 2019-12-19T03:51:38 | 2019-12-19T03:51:37 | null | UTF-8 | Python | false | false | 3,594 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for google3.image.understanding.object_detection.utils.json_utils."""
import os
import tensorflow as tf
from wai.tfrecords.object_detection.utils import json_utils
class JsonUtilsTest(tf.test.TestCase):
def testDumpReasonablePrecision(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump(1.0, f, float_digits=2)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '1.00')
def testDumpPassExtraParams(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump([1.0], f, float_digits=2, indent=3)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '[\n 1.00\n]')
def testDumpZeroPrecision(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump(1.0, f, float_digits=0, indent=3)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '1')
def testDumpUnspecifiedPrecision(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump(1.012345, f)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '1.012345')
def testDumpsReasonablePrecision(self):
s = json_utils.Dumps(1.0, float_digits=2)
self.assertEqual(s, '1.00')
def testDumpsPassExtraParams(self):
s = json_utils.Dumps([1.0], float_digits=2, indent=3)
self.assertEqual(s, '[\n 1.00\n]')
def testDumpsZeroPrecision(self):
s = json_utils.Dumps(1.0, float_digits=0)
self.assertEqual(s, '1')
def testDumpsUnspecifiedPrecision(self):
s = json_utils.Dumps(1.012345)
self.assertEqual(s, '1.012345')
def testPrettyParams(self):
s = json_utils.Dumps({'v': 1.012345, 'n': 2}, **json_utils.PrettyParams())
self.assertEqual(s, '{\n "n": 2,\n "v": 1.0123\n}')
def testPrettyParamsExtraParamsInside(self):
s = json_utils.Dumps(
{'v': 1.012345,
'n': float('nan')}, **json_utils.PrettyParams(allow_nan=True))
self.assertEqual(s, '{\n "n": NaN,\n "v": 1.0123\n}')
with self.assertRaises(ValueError):
s = json_utils.Dumps(
{'v': 1.012345,
'n': float('nan')}, **json_utils.PrettyParams(allow_nan=False))
def testPrettyParamsExtraParamsOutside(self):
s = json_utils.Dumps(
{'v': 1.012345,
'n': float('nan')}, allow_nan=True, **json_utils.PrettyParams())
self.assertEqual(s, '{\n "n": NaN,\n "v": 1.0123\n}')
with self.assertRaises(ValueError):
s = json_utils.Dumps(
{'v': 1.012345,
'n': float('nan')}, allow_nan=False, **json_utils.PrettyParams())
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
] | |
a547b0c580f6eb57a424adc7697b39ea1301b365 | 98cb4fbb8bd776f187a9baf42d66911ed52211c9 | /archives/vocalSeparation/network.py | 598ceef69799222575f8631b585a63b4c1585f5e | [
"MIT"
] | permissive | daniel769/Audio-Source-Separation | 017878dd5302f76948f9db800c50a4a4c43e2c63 | 1693a9736fc08c53935aba7218ad82a271b42525 | refs/heads/master | 2022-02-13T16:14:28.279016 | 2018-10-24T04:15:25 | 2018-10-24T04:15:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | from module import *
import hyperparams as hp
def network(input_, use_mulaw=hp.use_mulaw):
input_ = conv1d(input_, output_channels=hp.hidden_dim, filter_width=3)
skip_connections = list()
for i in hp.dilation:
skip, res = residual_block(input_, rate=i, scope="res_%d" % i)
input_ = res
skip_connections.append(skip)
skip_output = tf.add_n(skip_connections)
output = skip_connection(skip_output, use_mulaw=use_mulaw)
return output
| [
"[email protected]"
] | |
8c63a54063013f4c1c82f843d7730ceee77f0320 | 64e0208fa243ebbab4855980a3f21be78a4a1025 | /test/tests/object_new_arguments.py | 873ff6bf2839456b969c7e3f8e8ae6893704f34f | [
"Python-2.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | H8ter/pyston | 581f5fcb59bb8b19399347626639a688e92c80ff | 4cd23054a81b58a8de32ecf43daab2cb2e67f53f | refs/heads/master | 2021-01-15T13:28:46.252887 | 2015-07-24T21:39:13 | 2015-07-24T21:39:13 | 39,681,136 | 0 | 0 | null | 2015-07-25T10:46:01 | 2015-07-25T10:46:01 | null | UTF-8 | Python | false | false | 484 | py | # object.__new__ doesn't complain if __init__ is overridden:
class C1(object):
def __init__(self, a):
pass
class C2(object):
pass
print "Trying C1"
object.__new__(C1, 1)
object.__new__(C1, a=1)
print "Trying C2"
try:
object.__new__(C2, 1)
except TypeError as e:
print "caught TypeError"
# These are some tricky cases, since they can potentially look like arguments
# are being passed, but really they are not.
type.__call__(*[C2])
type.__call__(C2, **{})
| [
"[email protected]"
] | |
ff987874fe44fb55ab175f210783ce133f0d2703 | da6d4be0d0eaa328972798ee50c5caf2f1e835c0 | /third_party/blink/web_tests/external/wpt/tools/third_party/packaging/tests/test_tags.py | 68b109a89f331a048e00491b0310a62c43b719d7 | [
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"LicenseRef-scancode-other-copyleft"
] | permissive | typememo/chromium | 11aaa35726ee96d534ed6887827f3300520b463f | 6d6968680269418cc260e000318ca70ae2d2c034 | refs/heads/master | 2023-02-13T19:23:39.438224 | 2021-01-13T07:45:16 | 2021-01-13T07:45:16 | 328,891,336 | 0 | 0 | BSD-3-Clause | 2021-01-12T06:19:07 | 2021-01-12T06:19:07 | null | UTF-8 | Python | false | false | 54,693 | py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
try:
import ctypes
except ImportError:
ctypes = None
import distutils.util
import os
import platform
import re
import sys
import sysconfig
import types
import warnings
import pretend
import pytest
from packaging import tags
@pytest.fixture
def example_tag():
return tags.Tag("py3", "none", "any")
@pytest.fixture
def is_x86():
return re.match(r"(i\d86|x86_64)", platform.machine()) is not None
@pytest.fixture
def manylinux_module(monkeypatch):
monkeypatch.setattr(tags, "_get_glibc_version", lambda *args: (2, 20))
module_name = "_manylinux"
module = types.ModuleType(module_name)
monkeypatch.setitem(sys.modules, module_name, module)
return module
@pytest.fixture
def mock_interpreter_name(monkeypatch):
def mock(name):
if hasattr(sys, "implementation") and sys.implementation.name != name.lower():
monkeypatch.setattr(sys.implementation, "name", name.lower())
return True
elif platform.python_implementation() != name:
monkeypatch.setattr(platform, "python_implementation", lambda: name)
return True
return False
return mock
class TestTag:
def test_lowercasing(self):
tag = tags.Tag("PY3", "None", "ANY")
assert tag.interpreter == "py3"
assert tag.abi == "none"
assert tag.platform == "any"
def test_equality(self):
args = "py3", "none", "any"
assert tags.Tag(*args) == tags.Tag(*args)
def test_equality_fails_with_non_tag(self):
assert not tags.Tag("py3", "none", "any") == "non-tag"
def test_hashing(self, example_tag):
tags = {example_tag} # Should not raise TypeError.
assert example_tag in tags
def test_hash_equality(self, example_tag):
equal_tag = tags.Tag("py3", "none", "any")
assert example_tag == equal_tag # Sanity check.
assert example_tag.__hash__() == equal_tag.__hash__()
def test_str(self, example_tag):
assert str(example_tag) == "py3-none-any"
def test_repr(self, example_tag):
assert repr(example_tag) == "<py3-none-any @ {tag_id}>".format(
tag_id=id(example_tag)
)
def test_attribute_access(self, example_tag):
assert example_tag.interpreter == "py3"
assert example_tag.abi == "none"
assert example_tag.platform == "any"
class TestWarnKeywordOnlyParameter:
def test_no_argument(self):
assert not tags._warn_keyword_parameter("test_warn_keyword_parameters", {})
def test_false(self):
assert not tags._warn_keyword_parameter(
"test_warn_keyword_parameters", {"warn": False}
)
def test_true(self):
assert tags._warn_keyword_parameter(
"test_warn_keyword_parameters", {"warn": True}
)
def test_too_many_arguments(self):
message_re = re.compile(r"too_many.+{!r}".format("whatever"))
with pytest.raises(TypeError, match=message_re):
tags._warn_keyword_parameter("too_many", {"warn": True, "whatever": True})
def test_wrong_argument(self):
message_re = re.compile(r"missing.+{!r}".format("unexpected"))
with pytest.raises(TypeError, match=message_re):
tags._warn_keyword_parameter("missing", {"unexpected": True})
class TestParseTag:
def test_simple(self, example_tag):
parsed_tags = tags.parse_tag(str(example_tag))
assert parsed_tags == {example_tag}
def test_multi_interpreter(self, example_tag):
expected = {example_tag, tags.Tag("py2", "none", "any")}
given = tags.parse_tag("py2.py3-none-any")
assert given == expected
def test_multi_platform(self):
expected = {
tags.Tag("cp37", "cp37m", platform)
for platform in (
"macosx_10_6_intel",
"macosx_10_9_intel",
"macosx_10_9_x86_64",
"macosx_10_10_intel",
"macosx_10_10_x86_64",
)
}
given = tags.parse_tag(
"cp37-cp37m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64."
"macosx_10_10_intel.macosx_10_10_x86_64"
)
assert given == expected
class TestInterpreterName:
def test_sys_implementation_name(self, monkeypatch):
class MockImplementation(object):
pass
mock_implementation = MockImplementation()
mock_implementation.name = "sillywalk"
monkeypatch.setattr(sys, "implementation", mock_implementation, raising=False)
assert tags.interpreter_name() == "sillywalk"
def test_platform(self, monkeypatch):
monkeypatch.delattr(sys, "implementation", raising=False)
name = "SillyWalk"
monkeypatch.setattr(platform, "python_implementation", lambda: name)
assert tags.interpreter_name() == name.lower()
def test_interpreter_short_names(self, mock_interpreter_name, monkeypatch):
mock_interpreter_name("cpython")
assert tags.interpreter_name() == "cp"
class TestInterpreterVersion:
def test_warn(self, monkeypatch):
class MockConfigVar(object):
def __init__(self, return_):
self.warn = None
self._return = return_
def __call__(self, name, warn):
self.warn = warn
return self._return
mock_config_var = MockConfigVar("38")
monkeypatch.setattr(tags, "_get_config_var", mock_config_var)
tags.interpreter_version(warn=True)
assert mock_config_var.warn
def test_python_version_nodot(self, monkeypatch):
monkeypatch.setattr(tags, "_get_config_var", lambda var, warn: "NN")
assert tags.interpreter_version() == "NN"
@pytest.mark.parametrize(
"version_info,version_str",
[
((1, 2, 3), "12"),
((1, 12, 3), "1_12"),
((11, 2, 3), "11_2"),
((11, 12, 3), "11_12"),
((1, 2, 13), "12"),
],
)
def test_sys_version_info(self, version_info, version_str, monkeypatch):
monkeypatch.setattr(tags, "_get_config_var", lambda *args, **kwargs: None)
monkeypatch.setattr(sys, "version_info", version_info)
assert tags.interpreter_version() == version_str
class TestMacOSPlatforms:
@pytest.mark.parametrize(
"arch, is_32bit, expected",
[
("i386", True, "i386"),
("ppc", True, "ppc"),
("x86_64", False, "x86_64"),
("x86_64", True, "i386"),
("ppc64", False, "ppc64"),
("ppc64", True, "ppc"),
],
)
def test_architectures(self, arch, is_32bit, expected):
assert tags._mac_arch(arch, is_32bit=is_32bit) == expected
@pytest.mark.parametrize(
"version,arch,expected",
[
(
(10, 15),
"x86_64",
["x86_64", "intel", "fat64", "fat32", "universal2", "universal"],
),
(
(10, 4),
"x86_64",
["x86_64", "intel", "fat64", "fat32", "universal2", "universal"],
),
((10, 3), "x86_64", []),
((10, 15), "i386", ["i386", "intel", "fat32", "fat", "universal"]),
((10, 4), "i386", ["i386", "intel", "fat32", "fat", "universal"]),
((10, 3), "i386", []),
((10, 15), "ppc64", []),
((10, 6), "ppc64", []),
((10, 5), "ppc64", ["ppc64", "fat64", "universal"]),
((10, 3), "ppc64", []),
((10, 15), "ppc", []),
((10, 7), "ppc", []),
((10, 6), "ppc", ["ppc", "fat32", "fat", "universal"]),
((10, 0), "ppc", ["ppc", "fat32", "fat", "universal"]),
((11, 0), "riscv", ["riscv"]),
(
(11, 0),
"x86_64",
["x86_64", "intel", "fat64", "fat32", "universal2", "universal"],
),
((11, 0), "arm64", ["arm64", "universal2"]),
((11, 1), "arm64", ["arm64", "universal2"]),
((12, 0), "arm64", ["arm64", "universal2"]),
],
)
def test_binary_formats(self, version, arch, expected):
assert tags._mac_binary_formats(version, arch) == expected
def test_version_detection(self, monkeypatch):
if platform.system() != "Darwin":
monkeypatch.setattr(
platform, "mac_ver", lambda: ("10.14", ("", "", ""), "x86_64")
)
version = platform.mac_ver()[0].split(".")
expected = "macosx_{major}_{minor}".format(major=version[0], minor=version[1])
platforms = list(tags.mac_platforms(arch="x86_64"))
assert platforms[0].startswith(expected)
@pytest.mark.parametrize("arch", ["x86_64", "i386"])
def test_arch_detection(self, arch, monkeypatch):
if platform.system() != "Darwin" or platform.mac_ver()[2] != arch:
monkeypatch.setattr(
platform, "mac_ver", lambda: ("10.14", ("", "", ""), arch)
)
monkeypatch.setattr(tags, "_mac_arch", lambda *args: arch)
assert next(tags.mac_platforms((10, 14))).endswith(arch)
def test_mac_platforms(self):
platforms = list(tags.mac_platforms((10, 5), "x86_64"))
assert platforms == [
"macosx_10_5_x86_64",
"macosx_10_5_intel",
"macosx_10_5_fat64",
"macosx_10_5_fat32",
"macosx_10_5_universal2",
"macosx_10_5_universal",
"macosx_10_4_x86_64",
"macosx_10_4_intel",
"macosx_10_4_fat64",
"macosx_10_4_fat32",
"macosx_10_4_universal2",
"macosx_10_4_universal",
]
assert len(list(tags.mac_platforms((10, 17), "x86_64"))) == 14 * 6
assert not list(tags.mac_platforms((10, 0), "x86_64"))
@pytest.mark.parametrize("major,minor", [(11, 0), (11, 3), (12, 0), (12, 3)])
def test_macos_11(self, major, minor):
platforms = list(tags.mac_platforms((major, minor), "x86_64"))
assert "macosx_11_0_arm64" not in platforms
assert "macosx_11_0_x86_64" in platforms
assert "macosx_11_3_x86_64" not in platforms
assert "macosx_11_0_universal" in platforms
assert "macosx_11_0_universal2" in platforms
# Mac OS "10.16" is the version number that binaries compiled against an old
# (pre 11.0) SDK will see. It can also be enabled explicitly for a process
# with the environment variable SYSTEM_VERSION_COMPAT=1.
assert "macosx_10_16_x86_64" in platforms
assert "macosx_10_15_x86_64" in platforms
assert "macosx_10_4_x86_64" in platforms
assert "macosx_10_3_x86_64" not in platforms
if major >= 12:
assert "macosx_12_0_x86_64" in platforms
assert "macosx_12_0_universal" in platforms
assert "macosx_12_0_universal2" in platforms
platforms = list(tags.mac_platforms((major, minor), "arm64"))
assert "macosx_11_0_arm64" in platforms
assert "macosx_11_3_arm64" not in platforms
assert "macosx_11_0_universal" not in platforms
assert "macosx_11_0_universal2" in platforms
assert "macosx_10_15_x86_64" not in platforms
assert "macosx_10_4_x86_64" not in platforms
assert "macosx_10_3_x86_64" not in platforms
if major >= 12:
assert "macosx_12_0_arm64" in platforms
assert "macosx_12_0_universal2" in platforms
class TestManylinuxPlatform:
def teardown_method(self):
# Clear the version cache
tags._glibc_version = []
@pytest.mark.parametrize("tf", (True, False))
@pytest.mark.parametrize(
"attribute,glibc", (("1", (2, 5)), ("2010", (2, 12)), ("2014", (2, 17)))
)
def test_module_declaration(
self, monkeypatch, manylinux_module, attribute, glibc, tf
):
manylinux = "manylinux{}_compatible".format(attribute)
monkeypatch.setattr(manylinux_module, manylinux, tf, raising=False)
res = tags._is_manylinux_compatible(manylinux, "x86_64", glibc)
assert tf is res
@pytest.mark.parametrize(
"attribute,glibc", (("1", (2, 5)), ("2010", (2, 12)), ("2014", (2, 17)))
)
def test_module_declaration_missing_attribute(
self, monkeypatch, manylinux_module, attribute, glibc
):
manylinux = "manylinux{}_compatible".format(attribute)
monkeypatch.delattr(manylinux_module, manylinux, raising=False)
assert tags._is_manylinux_compatible(manylinux, "x86_64", glibc)
@pytest.mark.parametrize(
"version,compatible", (((2, 0), True), ((2, 5), True), ((2, 10), False))
)
def test_is_manylinux_compatible_glibc_support(
self, version, compatible, monkeypatch
):
monkeypatch.setitem(sys.modules, "_manylinux", None)
monkeypatch.setattr(tags, "_get_glibc_version", lambda: (2, 5))
assert (
bool(tags._is_manylinux_compatible("manylinux1", "any", version))
== compatible
)
@pytest.mark.parametrize("version_str", ["glibc-2.4.5", "2"])
def test_check_glibc_version_warning(self, version_str):
with warnings.catch_warnings(record=True) as w:
tags._parse_glibc_version(version_str)
assert len(w) == 1
assert issubclass(w[0].category, RuntimeWarning)
@pytest.mark.skipif(not ctypes, reason="requires ctypes")
@pytest.mark.parametrize(
"version_str,expected",
[
# Be very explicit about bytes and Unicode for Python 2 testing.
(b"2.4", "2.4"),
(u"2.4", "2.4"),
],
)
def test_glibc_version_string(self, version_str, expected, monkeypatch):
class LibcVersion:
def __init__(self, version_str):
self.version_str = version_str
def __call__(self):
return version_str
class ProcessNamespace:
def __init__(self, libc_version):
self.gnu_get_libc_version = libc_version
process_namespace = ProcessNamespace(LibcVersion(version_str))
monkeypatch.setattr(ctypes, "CDLL", lambda _: process_namespace)
monkeypatch.setattr(tags, "_glibc_version_string_confstr", lambda: False)
assert tags._glibc_version_string() == expected
del process_namespace.gnu_get_libc_version
assert tags._glibc_version_string() is None
def test_glibc_version_string_confstr(self, monkeypatch):
monkeypatch.setattr(os, "confstr", lambda x: "glibc 2.20", raising=False)
assert tags._glibc_version_string_confstr() == "2.20"
def test_glibc_version_string_fail(self, monkeypatch):
monkeypatch.setattr(os, "confstr", lambda x: None, raising=False)
monkeypatch.setitem(sys.modules, "ctypes", None)
assert tags._glibc_version_string() is None
assert tags._get_glibc_version() == (-1, -1)
@pytest.mark.parametrize(
"failure",
[pretend.raiser(ValueError), pretend.raiser(OSError), lambda x: "XXX"],
)
def test_glibc_version_string_confstr_fail(self, monkeypatch, failure):
monkeypatch.setattr(os, "confstr", failure, raising=False)
assert tags._glibc_version_string_confstr() is None
def test_glibc_version_string_confstr_missing(self, monkeypatch):
monkeypatch.delattr(os, "confstr", raising=False)
assert tags._glibc_version_string_confstr() is None
def test_glibc_version_string_ctypes_missing(self, monkeypatch):
monkeypatch.setitem(sys.modules, "ctypes", None)
assert tags._glibc_version_string_ctypes() is None
def test_glibc_version_string_ctypes_raise_oserror(self, monkeypatch):
def patched_cdll(name):
raise OSError("Dynamic loading not supported")
monkeypatch.setattr(ctypes, "CDLL", patched_cdll)
assert tags._glibc_version_string_ctypes() is None
def test_get_config_var_does_not_log(self, monkeypatch):
debug = pretend.call_recorder(lambda *a: None)
monkeypatch.setattr(tags.logger, "debug", debug)
tags._get_config_var("missing")
assert debug.calls == []
def test_get_config_var_does_log(self, monkeypatch):
debug = pretend.call_recorder(lambda *a: None)
monkeypatch.setattr(tags.logger, "debug", debug)
tags._get_config_var("missing", warn=True)
assert debug.calls == [
pretend.call(
"Config variable '%s' is unset, Python ABI tag may be incorrect",
"missing",
)
]
@pytest.mark.skipif(platform.system() != "Linux", reason="requires Linux")
def test_is_manylinux_compatible_old(self):
# Assuming no one is running this test with a version of glibc released in
# 1997.
assert tags._is_manylinux_compatible("any", "any", (2, 0))
def test_is_manylinux_compatible(self, monkeypatch):
monkeypatch.setattr(tags, "_glibc_version_string", lambda: "2.4")
assert tags._is_manylinux_compatible("", "any", (2, 4))
def test_glibc_version_string_none(self, monkeypatch):
monkeypatch.setattr(tags, "_glibc_version_string", lambda: None)
assert not tags._is_manylinux_compatible("any", "any", (2, 4))
@pytest.mark.parametrize(
"arch,is_32bit,expected",
[
("linux-x86_64", False, "linux_x86_64"),
("linux-x86_64", True, "linux_i686"),
("linux-aarch64", False, "linux_aarch64"),
("linux-aarch64", True, "linux_armv7l"),
],
)
def test_linux_platforms_32_64bit_on_64bit_os(
self, arch, is_32bit, expected, monkeypatch
):
monkeypatch.setattr(distutils.util, "get_platform", lambda: arch)
monkeypatch.setattr(os, "confstr", lambda x: "glibc 2.20", raising=False)
monkeypatch.setattr(tags, "_is_manylinux_compatible", lambda *args: False)
linux_platform = list(tags._linux_platforms(is_32bit=is_32bit))[-1]
assert linux_platform == expected
def test_linux_platforms_manylinux_unsupported(self, monkeypatch):
monkeypatch.setattr(distutils.util, "get_platform", lambda: "linux_x86_64")
monkeypatch.setattr(os, "confstr", lambda x: "glibc 2.20", raising=False)
monkeypatch.setattr(tags, "_is_manylinux_compatible", lambda *args: False)
linux_platform = list(tags._linux_platforms(is_32bit=False))
assert linux_platform == ["linux_x86_64"]
def test_linux_platforms_manylinux1(self, is_x86, monkeypatch):
monkeypatch.setattr(
tags, "_is_manylinux_compatible", lambda name, *args: name == "manylinux1"
)
monkeypatch.setattr(distutils.util, "get_platform", lambda: "linux_x86_64")
monkeypatch.setattr(platform, "machine", lambda: "x86_64")
monkeypatch.setattr(os, "confstr", lambda x: "glibc 2.20", raising=False)
platforms = list(tags._linux_platforms(is_32bit=False))
arch = platform.machine()
assert platforms == ["manylinux1_" + arch, "linux_" + arch]
def test_linux_platforms_manylinux2010(self, is_x86, monkeypatch):
monkeypatch.setattr(distutils.util, "get_platform", lambda: "linux_x86_64")
monkeypatch.setattr(platform, "machine", lambda: "x86_64")
monkeypatch.setattr(os, "confstr", lambda x: "glibc 2.12", raising=False)
platforms = list(tags._linux_platforms(is_32bit=False))
arch = platform.machine()
expected = [
"manylinux_2_12_" + arch,
"manylinux2010_" + arch,
"manylinux_2_11_" + arch,
"manylinux_2_10_" + arch,
"manylinux_2_9_" + arch,
"manylinux_2_8_" + arch,
"manylinux_2_7_" + arch,
"manylinux_2_6_" + arch,
"manylinux_2_5_" + arch,
"manylinux1_" + arch,
"linux_" + arch,
]
assert platforms == expected
def test_linux_platforms_manylinux2014(self, is_x86, monkeypatch):
monkeypatch.setattr(distutils.util, "get_platform", lambda: "linux_x86_64")
monkeypatch.setattr(platform, "machine", lambda: "x86_64")
monkeypatch.setattr(os, "confstr", lambda x: "glibc 2.17", raising=False)
platforms = list(tags._linux_platforms(is_32bit=False))
arch = platform.machine()
expected = [
"manylinux_2_17_" + arch,
"manylinux2014_" + arch,
"manylinux_2_16_" + arch,
"manylinux_2_15_" + arch,
"manylinux_2_14_" + arch,
"manylinux_2_13_" + arch,
"manylinux_2_12_" + arch,
"manylinux2010_" + arch,
"manylinux_2_11_" + arch,
"manylinux_2_10_" + arch,
"manylinux_2_9_" + arch,
"manylinux_2_8_" + arch,
"manylinux_2_7_" + arch,
"manylinux_2_6_" + arch,
"manylinux_2_5_" + arch,
"manylinux1_" + arch,
"linux_" + arch,
]
assert platforms == expected
def test_linux_platforms_manylinux2014_armhf_abi(self, monkeypatch):
monkeypatch.setattr(tags, "_glibc_version_string", lambda: "2.30")
monkeypatch.setattr(
tags,
"_is_manylinux_compatible",
lambda name, *args: name == "manylinux2014",
)
monkeypatch.setattr(distutils.util, "get_platform", lambda: "linux_armv7l")
monkeypatch.setattr(
sys,
"executable",
os.path.join(os.path.dirname(__file__), "hello-world-armv7l-armhf"),
)
platforms = list(tags._linux_platforms(is_32bit=True))
expected = ["manylinux2014_armv7l", "linux_armv7l"]
assert platforms == expected
def test_linux_platforms_manylinux2014_i386_abi(self, monkeypatch):
monkeypatch.setattr(tags, "_glibc_version_string", lambda: "2.17")
monkeypatch.setattr(distutils.util, "get_platform", lambda: "linux_x86_64")
monkeypatch.setattr(
sys,
"executable",
os.path.join(os.path.dirname(__file__), "hello-world-x86_64-i386"),
)
platforms = list(tags._linux_platforms(is_32bit=True))
expected = [
"manylinux_2_17_i686",
"manylinux2014_i686",
"manylinux_2_16_i686",
"manylinux_2_15_i686",
"manylinux_2_14_i686",
"manylinux_2_13_i686",
"manylinux_2_12_i686",
"manylinux2010_i686",
"manylinux_2_11_i686",
"manylinux_2_10_i686",
"manylinux_2_9_i686",
"manylinux_2_8_i686",
"manylinux_2_7_i686",
"manylinux_2_6_i686",
"manylinux_2_5_i686",
"manylinux1_i686",
"linux_i686",
]
assert platforms == expected
def test_linux_platforms_manylinux_glibc3(self, monkeypatch):
# test for a future glic 3.x version
monkeypatch.setattr(tags, "_glibc_version_string", lambda: "3.2")
monkeypatch.setattr(tags, "_is_manylinux_compatible", lambda name, *args: True)
monkeypatch.setattr(distutils.util, "get_platform", lambda: "linux_aarch64")
monkeypatch.setattr(
sys,
"executable",
os.path.join(os.path.dirname(__file__), "hello-world-aarch64"),
)
platforms = list(tags._linux_platforms(is_32bit=False))
expected = (
["manylinux_3_2_aarch64", "manylinux_3_1_aarch64", "manylinux_3_0_aarch64"]
+ ["manylinux_2_{}_aarch64".format(i) for i in range(50, 16, -1)]
+ ["manylinux2014_aarch64", "linux_aarch64"]
)
assert platforms == expected
def test_linux_platforms_manylinux2014_armv6l(self, monkeypatch):
monkeypatch.setattr(
tags, "_is_manylinux_compatible", lambda name, _: name == "manylinux2014"
)
monkeypatch.setattr(distutils.util, "get_platform", lambda: "linux_armv6l")
monkeypatch.setattr(os, "confstr", lambda x: "glibc 2.20", raising=False)
platforms = list(tags._linux_platforms(is_32bit=True))
expected = ["linux_armv6l"]
assert platforms == expected
@pytest.mark.parametrize(
"machine, abi, alt_machine",
[("x86_64", "x32", "i686"), ("armv7l", "armel", "armv7l")],
)
def test_linux_platforms_not_manylinux_abi(
self, monkeypatch, machine, abi, alt_machine
):
monkeypatch.setattr(tags, "_is_manylinux_compatible", lambda name, _: False)
monkeypatch.setattr(
distutils.util, "get_platform", lambda: "linux_{}".format(machine)
)
monkeypatch.setattr(
sys,
"executable",
os.path.join(
os.path.dirname(__file__), "hello-world-{}-{}".format(machine, abi)
),
)
platforms = list(tags._linux_platforms(is_32bit=True))
expected = ["linux_{}".format(alt_machine)]
assert platforms == expected
@pytest.mark.parametrize(
"machine, abi, elf_class, elf_data, elf_machine",
[
(
"x86_64",
"x32",
tags._ELFFileHeader.ELFCLASS32,
tags._ELFFileHeader.ELFDATA2LSB,
tags._ELFFileHeader.EM_X86_64,
),
(
"x86_64",
"i386",
tags._ELFFileHeader.ELFCLASS32,
tags._ELFFileHeader.ELFDATA2LSB,
tags._ELFFileHeader.EM_386,
),
(
"x86_64",
"amd64",
tags._ELFFileHeader.ELFCLASS64,
tags._ELFFileHeader.ELFDATA2LSB,
tags._ELFFileHeader.EM_X86_64,
),
(
"armv7l",
"armel",
tags._ELFFileHeader.ELFCLASS32,
tags._ELFFileHeader.ELFDATA2LSB,
tags._ELFFileHeader.EM_ARM,
),
(
"armv7l",
"armhf",
tags._ELFFileHeader.ELFCLASS32,
tags._ELFFileHeader.ELFDATA2LSB,
tags._ELFFileHeader.EM_ARM,
),
(
"s390x",
"s390x",
tags._ELFFileHeader.ELFCLASS64,
tags._ELFFileHeader.ELFDATA2MSB,
tags._ELFFileHeader.EM_S390,
),
],
)
def test_get_elf_header(
self, monkeypatch, machine, abi, elf_class, elf_data, elf_machine
):
path = os.path.join(
os.path.dirname(__file__), "hello-world-{}-{}".format(machine, abi)
)
monkeypatch.setattr(sys, "executable", path)
elf_header = tags._get_elf_header()
assert elf_header.e_ident_class == elf_class
assert elf_header.e_ident_data == elf_data
assert elf_header.e_machine == elf_machine
@pytest.mark.parametrize(
"content", [None, "invalid-magic", "invalid-class", "invalid-data", "too-short"]
)
def test_get_elf_header_bad_excutable(self, monkeypatch, content):
if content:
path = os.path.join(
os.path.dirname(__file__), "hello-world-{}".format(content)
)
else:
path = None
monkeypatch.setattr(sys, "executable", path)
assert tags._get_elf_header() is None
def test_is_linux_armhf_not_elf(self, monkeypatch):
monkeypatch.setattr(tags, "_get_elf_header", lambda: None)
assert not tags._is_linux_armhf()
def test_is_linux_i686_not_elf(self, monkeypatch):
monkeypatch.setattr(tags, "_get_elf_header", lambda: None)
assert not tags._is_linux_i686()
@pytest.mark.parametrize(
"platform_name,dispatch_func",
[
("Darwin", "mac_platforms"),
("Linux", "_linux_platforms"),
("Generic", "_generic_platforms"),
],
)
def test__platform_tags(platform_name, dispatch_func, monkeypatch):
expected = ["sillywalk"]
monkeypatch.setattr(platform, "system", lambda: platform_name)
monkeypatch.setattr(tags, dispatch_func, lambda: expected)
assert tags._platform_tags() == expected
class TestCPythonABI:
@pytest.mark.parametrize(
"py_debug,gettotalrefcount,result",
[(1, False, True), (0, False, False), (None, True, True)],
)
def test_debug(self, py_debug, gettotalrefcount, result, monkeypatch):
config = {"Py_DEBUG": py_debug, "WITH_PYMALLOC": 0, "Py_UNICODE_SIZE": 2}
monkeypatch.setattr(sysconfig, "get_config_var", config.__getitem__)
if gettotalrefcount:
monkeypatch.setattr(sys, "gettotalrefcount", 1, raising=False)
expected = ["cp37d" if result else "cp37"]
assert tags._cpython_abis((3, 7)) == expected
def test_debug_file_extension(self, monkeypatch):
config = {"Py_DEBUG": None}
monkeypatch.setattr(sysconfig, "get_config_var", config.__getitem__)
monkeypatch.delattr(sys, "gettotalrefcount", raising=False)
monkeypatch.setattr(tags, "EXTENSION_SUFFIXES", {"_d.pyd"})
assert tags._cpython_abis((3, 8)) == ["cp38d", "cp38"]
@pytest.mark.parametrize(
"debug,expected", [(True, ["cp38d", "cp38"]), (False, ["cp38"])]
)
def test__debug_cp38(self, debug, expected, monkeypatch):
config = {"Py_DEBUG": debug}
monkeypatch.setattr(sysconfig, "get_config_var", config.__getitem__)
assert tags._cpython_abis((3, 8)) == expected
@pytest.mark.parametrize(
"pymalloc,version,result",
[
(1, (3, 7), True),
(0, (3, 7), False),
(None, (3, 7), True),
(1, (3, 8), False),
],
)
def test_pymalloc(self, pymalloc, version, result, monkeypatch):
config = {"Py_DEBUG": 0, "WITH_PYMALLOC": pymalloc, "Py_UNICODE_SIZE": 2}
monkeypatch.setattr(sysconfig, "get_config_var", config.__getitem__)
base_abi = "cp{}{}".format(version[0], version[1])
expected = [base_abi + "m" if result else base_abi]
assert tags._cpython_abis(version) == expected
@pytest.mark.parametrize(
"unicode_size,maxunicode,version,result",
[
(4, 0x10FFFF, (3, 2), True),
(2, 0xFFFF, (3, 2), False),
(None, 0x10FFFF, (3, 2), True),
(None, 0xFFFF, (3, 2), False),
(4, 0x10FFFF, (3, 3), False),
],
)
def test_wide_unicode(self, unicode_size, maxunicode, version, result, monkeypatch):
config = {"Py_DEBUG": 0, "WITH_PYMALLOC": 0, "Py_UNICODE_SIZE": unicode_size}
monkeypatch.setattr(sysconfig, "get_config_var", config.__getitem__)
monkeypatch.setattr(sys, "maxunicode", maxunicode)
base_abi = "cp" + tags._version_nodot(version)
expected = [base_abi + "u" if result else base_abi]
assert tags._cpython_abis(version) == expected
class TestCPythonTags:
def test_iterator_returned(self):
result_iterator = tags.cpython_tags(
(3, 8), ["cp38d", "cp38"], ["plat1", "plat2"]
)
assert isinstance(result_iterator, collections_abc.Iterator)
def test_all_args(self):
result_iterator = tags.cpython_tags(
(3, 11), ["cp3_11d", "cp3_11"], ["plat1", "plat2"]
)
result = list(result_iterator)
assert result == [
tags.Tag("cp3_11", "cp3_11d", "plat1"),
tags.Tag("cp3_11", "cp3_11d", "plat2"),
tags.Tag("cp3_11", "cp3_11", "plat1"),
tags.Tag("cp3_11", "cp3_11", "plat2"),
tags.Tag("cp3_11", "abi3", "plat1"),
tags.Tag("cp3_11", "abi3", "plat2"),
tags.Tag("cp3_11", "none", "plat1"),
tags.Tag("cp3_11", "none", "plat2"),
tags.Tag("cp3_10", "abi3", "plat1"),
tags.Tag("cp3_10", "abi3", "plat2"),
tags.Tag("cp39", "abi3", "plat1"),
tags.Tag("cp39", "abi3", "plat2"),
tags.Tag("cp38", "abi3", "plat1"),
tags.Tag("cp38", "abi3", "plat2"),
tags.Tag("cp37", "abi3", "plat1"),
tags.Tag("cp37", "abi3", "plat2"),
tags.Tag("cp36", "abi3", "plat1"),
tags.Tag("cp36", "abi3", "plat2"),
tags.Tag("cp35", "abi3", "plat1"),
tags.Tag("cp35", "abi3", "plat2"),
tags.Tag("cp34", "abi3", "plat1"),
tags.Tag("cp34", "abi3", "plat2"),
tags.Tag("cp33", "abi3", "plat1"),
tags.Tag("cp33", "abi3", "plat2"),
tags.Tag("cp32", "abi3", "plat1"),
tags.Tag("cp32", "abi3", "plat2"),
]
result_iterator = tags.cpython_tags(
(3, 8), ["cp38d", "cp38"], ["plat1", "plat2"]
)
result = list(result_iterator)
assert result == [
tags.Tag("cp38", "cp38d", "plat1"),
tags.Tag("cp38", "cp38d", "plat2"),
tags.Tag("cp38", "cp38", "plat1"),
tags.Tag("cp38", "cp38", "plat2"),
tags.Tag("cp38", "abi3", "plat1"),
tags.Tag("cp38", "abi3", "plat2"),
tags.Tag("cp38", "none", "plat1"),
tags.Tag("cp38", "none", "plat2"),
tags.Tag("cp37", "abi3", "plat1"),
tags.Tag("cp37", "abi3", "plat2"),
tags.Tag("cp36", "abi3", "plat1"),
tags.Tag("cp36", "abi3", "plat2"),
tags.Tag("cp35", "abi3", "plat1"),
tags.Tag("cp35", "abi3", "plat2"),
tags.Tag("cp34", "abi3", "plat1"),
tags.Tag("cp34", "abi3", "plat2"),
tags.Tag("cp33", "abi3", "plat1"),
tags.Tag("cp33", "abi3", "plat2"),
tags.Tag("cp32", "abi3", "plat1"),
tags.Tag("cp32", "abi3", "plat2"),
]
result = list(tags.cpython_tags((3, 3), ["cp33m"], ["plat1", "plat2"]))
assert result == [
tags.Tag("cp33", "cp33m", "plat1"),
tags.Tag("cp33", "cp33m", "plat2"),
tags.Tag("cp33", "abi3", "plat1"),
tags.Tag("cp33", "abi3", "plat2"),
tags.Tag("cp33", "none", "plat1"),
tags.Tag("cp33", "none", "plat2"),
tags.Tag("cp32", "abi3", "plat1"),
tags.Tag("cp32", "abi3", "plat2"),
]
def test_python_version_defaults(self):
tag = next(tags.cpython_tags(abis=["abi3"], platforms=["any"]))
interpreter = "cp" + tags._version_nodot(sys.version_info[:2])
assert interpreter == tag.interpreter
def test_abi_defaults(self, monkeypatch):
monkeypatch.setattr(tags, "_cpython_abis", lambda _1, _2: ["cp38"])
result = list(tags.cpython_tags((3, 8), platforms=["any"]))
assert tags.Tag("cp38", "cp38", "any") in result
assert tags.Tag("cp38", "abi3", "any") in result
assert tags.Tag("cp38", "none", "any") in result
def test_abi_defaults_needs_underscore(self, monkeypatch):
monkeypatch.setattr(tags, "_cpython_abis", lambda _1, _2: ["cp3_11"])
result = list(tags.cpython_tags((3, 11), platforms=["any"]))
assert tags.Tag("cp3_11", "cp3_11", "any") in result
assert tags.Tag("cp3_11", "abi3", "any") in result
assert tags.Tag("cp3_11", "none", "any") in result
def test_platforms_defaults(self, monkeypatch):
monkeypatch.setattr(tags, "_platform_tags", lambda: ["plat1"])
result = list(tags.cpython_tags((3, 8), abis=["whatever"]))
assert tags.Tag("cp38", "whatever", "plat1") in result
def test_platforms_defaults_needs_underscore(self, monkeypatch):
monkeypatch.setattr(tags, "_platform_tags", lambda: ["plat1"])
result = list(tags.cpython_tags((3, 11), abis=["whatever"]))
assert tags.Tag("cp3_11", "whatever", "plat1") in result
def test_major_only_python_version(self):
result = list(tags.cpython_tags((3,), ["abi"], ["plat"]))
assert result == [
tags.Tag("cp3", "abi", "plat"),
tags.Tag("cp3", "none", "plat"),
]
def test_major_only_python_version_with_default_abis(self):
result = list(tags.cpython_tags((3,), platforms=["plat"]))
assert result == [tags.Tag("cp3", "none", "plat")]
@pytest.mark.parametrize("abis", [[], ["abi3"], ["none"]])
def test_skip_redundant_abis(self, abis):
results = list(tags.cpython_tags((3, 0), abis=abis, platforms=["any"]))
assert results == [tags.Tag("cp30", "none", "any")]
def test_abi3_python33(self):
results = list(tags.cpython_tags((3, 3), abis=["cp33"], platforms=["plat"]))
assert results == [
tags.Tag("cp33", "cp33", "plat"),
tags.Tag("cp33", "abi3", "plat"),
tags.Tag("cp33", "none", "plat"),
tags.Tag("cp32", "abi3", "plat"),
]
def test_no_excess_abi3_python32(self):
results = list(tags.cpython_tags((3, 2), abis=["cp32"], platforms=["plat"]))
assert results == [
tags.Tag("cp32", "cp32", "plat"),
tags.Tag("cp32", "abi3", "plat"),
tags.Tag("cp32", "none", "plat"),
]
def test_no_abi3_python31(self):
results = list(tags.cpython_tags((3, 1), abis=["cp31"], platforms=["plat"]))
assert results == [
tags.Tag("cp31", "cp31", "plat"),
tags.Tag("cp31", "none", "plat"),
]
def test_no_abi3_python27(self):
results = list(tags.cpython_tags((2, 7), abis=["cp27"], platforms=["plat"]))
assert results == [
tags.Tag("cp27", "cp27", "plat"),
tags.Tag("cp27", "none", "plat"),
]
class TestGenericTags:
@pytest.mark.skipif(
not sysconfig.get_config_var("SOABI"), reason="SOABI not defined"
)
def test__generic_abi_soabi_provided(self):
abi = sysconfig.get_config_var("SOABI").replace(".", "_").replace("-", "_")
assert [abi] == list(tags._generic_abi())
def test__generic_abi(self, monkeypatch):
monkeypatch.setattr(
sysconfig, "get_config_var", lambda key: "cpython-37m-darwin"
)
assert list(tags._generic_abi()) == ["cpython_37m_darwin"]
def test__generic_abi_no_soabi(self, monkeypatch):
monkeypatch.setattr(sysconfig, "get_config_var", lambda key: None)
assert not list(tags._generic_abi())
def test_generic_platforms(self):
platform = distutils.util.get_platform().replace("-", "_")
platform = platform.replace(".", "_")
assert list(tags._generic_platforms()) == [platform]
def test_iterator_returned(self):
result_iterator = tags.generic_tags("sillywalk33", ["abi"], ["plat1", "plat2"])
assert isinstance(result_iterator, collections_abc.Iterator)
def test_all_args(self):
result_iterator = tags.generic_tags("sillywalk33", ["abi"], ["plat1", "plat2"])
result = list(result_iterator)
assert result == [
tags.Tag("sillywalk33", "abi", "plat1"),
tags.Tag("sillywalk33", "abi", "plat2"),
tags.Tag("sillywalk33", "none", "plat1"),
tags.Tag("sillywalk33", "none", "plat2"),
]
@pytest.mark.parametrize("abi", [[], ["none"]])
def test_abi_unspecified(self, abi):
no_abi = list(tags.generic_tags("sillywalk34", abi, ["plat1", "plat2"]))
assert no_abi == [
tags.Tag("sillywalk34", "none", "plat1"),
tags.Tag("sillywalk34", "none", "plat2"),
]
def test_interpreter_default(self, monkeypatch):
monkeypatch.setattr(tags, "interpreter_name", lambda: "sillywalk")
monkeypatch.setattr(tags, "interpreter_version", lambda warn: "NN")
result = list(tags.generic_tags(abis=["none"], platforms=["any"]))
assert result == [tags.Tag("sillywalkNN", "none", "any")]
def test_abis_default(self, monkeypatch):
monkeypatch.setattr(tags, "_generic_abi", lambda: iter(["abi"]))
result = list(tags.generic_tags(interpreter="sillywalk", platforms=["any"]))
assert result == [
tags.Tag("sillywalk", "abi", "any"),
tags.Tag("sillywalk", "none", "any"),
]
def test_platforms_default(self, monkeypatch):
monkeypatch.setattr(tags, "_platform_tags", lambda: ["plat"])
result = list(tags.generic_tags(interpreter="sillywalk", abis=["none"]))
assert result == [tags.Tag("sillywalk", "none", "plat")]
class TestCompatibleTags:
def test_all_args(self):
result = list(tags.compatible_tags((3, 3), "cp33", ["plat1", "plat2"]))
assert result == [
tags.Tag("py33", "none", "plat1"),
tags.Tag("py33", "none", "plat2"),
tags.Tag("py3", "none", "plat1"),
tags.Tag("py3", "none", "plat2"),
tags.Tag("py32", "none", "plat1"),
tags.Tag("py32", "none", "plat2"),
tags.Tag("py31", "none", "plat1"),
tags.Tag("py31", "none", "plat2"),
tags.Tag("py30", "none", "plat1"),
tags.Tag("py30", "none", "plat2"),
tags.Tag("cp33", "none", "any"),
tags.Tag("py33", "none", "any"),
tags.Tag("py3", "none", "any"),
tags.Tag("py32", "none", "any"),
tags.Tag("py31", "none", "any"),
tags.Tag("py30", "none", "any"),
]
def test_all_args_needs_underscore(self):
result = list(tags.compatible_tags((3, 11), "cp3_11", ["plat1", "plat2"]))
assert result == [
tags.Tag("py3_11", "none", "plat1"),
tags.Tag("py3_11", "none", "plat2"),
tags.Tag("py3", "none", "plat1"),
tags.Tag("py3", "none", "plat2"),
tags.Tag("py3_10", "none", "plat1"),
tags.Tag("py3_10", "none", "plat2"),
tags.Tag("py39", "none", "plat1"),
tags.Tag("py39", "none", "plat2"),
tags.Tag("py38", "none", "plat1"),
tags.Tag("py38", "none", "plat2"),
tags.Tag("py37", "none", "plat1"),
tags.Tag("py37", "none", "plat2"),
tags.Tag("py36", "none", "plat1"),
tags.Tag("py36", "none", "plat2"),
tags.Tag("py35", "none", "plat1"),
tags.Tag("py35", "none", "plat2"),
tags.Tag("py34", "none", "plat1"),
tags.Tag("py34", "none", "plat2"),
tags.Tag("py33", "none", "plat1"),
tags.Tag("py33", "none", "plat2"),
tags.Tag("py32", "none", "plat1"),
tags.Tag("py32", "none", "plat2"),
tags.Tag("py31", "none", "plat1"),
tags.Tag("py31", "none", "plat2"),
tags.Tag("py30", "none", "plat1"),
tags.Tag("py30", "none", "plat2"),
tags.Tag("cp3_11", "none", "any"),
tags.Tag("py3_11", "none", "any"),
tags.Tag("py3", "none", "any"),
tags.Tag("py3_10", "none", "any"),
tags.Tag("py39", "none", "any"),
tags.Tag("py38", "none", "any"),
tags.Tag("py37", "none", "any"),
tags.Tag("py36", "none", "any"),
tags.Tag("py35", "none", "any"),
tags.Tag("py34", "none", "any"),
tags.Tag("py33", "none", "any"),
tags.Tag("py32", "none", "any"),
tags.Tag("py31", "none", "any"),
tags.Tag("py30", "none", "any"),
]
def test_major_only_python_version(self):
result = list(tags.compatible_tags((3,), "cp33", ["plat"]))
assert result == [
tags.Tag("py3", "none", "plat"),
tags.Tag("cp33", "none", "any"),
tags.Tag("py3", "none", "any"),
]
def test_default_python_version(self, monkeypatch):
monkeypatch.setattr(sys, "version_info", (3, 1))
result = list(tags.compatible_tags(interpreter="cp31", platforms=["plat"]))
assert result == [
tags.Tag("py31", "none", "plat"),
tags.Tag("py3", "none", "plat"),
tags.Tag("py30", "none", "plat"),
tags.Tag("cp31", "none", "any"),
tags.Tag("py31", "none", "any"),
tags.Tag("py3", "none", "any"),
tags.Tag("py30", "none", "any"),
]
def test_default_python_version_needs_underscore(self, monkeypatch):
monkeypatch.setattr(sys, "version_info", (3, 11))
result = list(tags.compatible_tags(interpreter="cp3_11", platforms=["plat"]))
assert result == [
tags.Tag("py3_11", "none", "plat"),
tags.Tag("py3", "none", "plat"),
tags.Tag("py3_10", "none", "plat"),
tags.Tag("py39", "none", "plat"),
tags.Tag("py38", "none", "plat"),
tags.Tag("py37", "none", "plat"),
tags.Tag("py36", "none", "plat"),
tags.Tag("py35", "none", "plat"),
tags.Tag("py34", "none", "plat"),
tags.Tag("py33", "none", "plat"),
tags.Tag("py32", "none", "plat"),
tags.Tag("py31", "none", "plat"),
tags.Tag("py30", "none", "plat"),
tags.Tag("cp3_11", "none", "any"),
tags.Tag("py3_11", "none", "any"),
tags.Tag("py3", "none", "any"),
tags.Tag("py3_10", "none", "any"),
tags.Tag("py39", "none", "any"),
tags.Tag("py38", "none", "any"),
tags.Tag("py37", "none", "any"),
tags.Tag("py36", "none", "any"),
tags.Tag("py35", "none", "any"),
tags.Tag("py34", "none", "any"),
tags.Tag("py33", "none", "any"),
tags.Tag("py32", "none", "any"),
tags.Tag("py31", "none", "any"),
tags.Tag("py30", "none", "any"),
]
def test_default_interpreter(self):
result = list(tags.compatible_tags((3, 1), platforms=["plat"]))
assert result == [
tags.Tag("py31", "none", "plat"),
tags.Tag("py3", "none", "plat"),
tags.Tag("py30", "none", "plat"),
tags.Tag("py31", "none", "any"),
tags.Tag("py3", "none", "any"),
tags.Tag("py30", "none", "any"),
]
def test_default_platforms(self, monkeypatch):
monkeypatch.setattr(tags, "_platform_tags", lambda: iter(["plat", "plat2"]))
result = list(tags.compatible_tags((3, 1), "cp31"))
assert result == [
tags.Tag("py31", "none", "plat"),
tags.Tag("py31", "none", "plat2"),
tags.Tag("py3", "none", "plat"),
tags.Tag("py3", "none", "plat2"),
tags.Tag("py30", "none", "plat"),
tags.Tag("py30", "none", "plat2"),
tags.Tag("cp31", "none", "any"),
tags.Tag("py31", "none", "any"),
tags.Tag("py3", "none", "any"),
tags.Tag("py30", "none", "any"),
]
class TestSysTags:
def teardown_method(self):
# Clear the version cache
tags._glibc_version = []
@pytest.mark.parametrize(
"name,expected",
[("CPython", "cp"), ("PyPy", "pp"), ("Jython", "jy"), ("IronPython", "ip")],
)
def test_interpreter_name(self, name, expected, mock_interpreter_name):
mock_interpreter_name(name)
assert tags.interpreter_name() == expected
def test_iterator(self):
assert isinstance(tags.sys_tags(), collections_abc.Iterator)
def test_mac_cpython(self, mock_interpreter_name, monkeypatch):
if mock_interpreter_name("CPython"):
monkeypatch.setattr(tags, "_cpython_abis", lambda *a: ["cp33m"])
if platform.system() != "Darwin":
monkeypatch.setattr(platform, "system", lambda: "Darwin")
monkeypatch.setattr(tags, "mac_platforms", lambda: ["macosx_10_5_x86_64"])
abis = tags._cpython_abis(sys.version_info[:2])
platforms = list(tags.mac_platforms())
result = list(tags.sys_tags())
assert len(abis) == 1
assert result[0] == tags.Tag(
"cp" + tags._version_nodot(sys.version_info[:2]), abis[0], platforms[0]
)
assert result[-1] == tags.Tag(
"py" + tags._version_nodot((sys.version_info[0], 0)), "none", "any"
)
def test_windows_cpython(self, mock_interpreter_name, monkeypatch):
if mock_interpreter_name("CPython"):
monkeypatch.setattr(tags, "_cpython_abis", lambda *a: ["cp33m"])
if platform.system() != "Windows":
monkeypatch.setattr(platform, "system", lambda: "Windows")
monkeypatch.setattr(tags, "_generic_platforms", lambda: ["win_amd64"])
abis = list(tags._cpython_abis(sys.version_info[:2]))
platforms = list(tags._generic_platforms())
result = list(tags.sys_tags())
interpreter = "cp" + tags._version_nodot(sys.version_info[:2])
assert len(abis) == 1
expected = tags.Tag(interpreter, abis[0], platforms[0])
assert result[0] == expected
expected = tags.Tag(
"py" + tags._version_nodot((sys.version_info[0], 0)), "none", "any"
)
assert result[-1] == expected
def test_linux_cpython(self, mock_interpreter_name, monkeypatch):
if mock_interpreter_name("CPython"):
monkeypatch.setattr(tags, "_cpython_abis", lambda *a: ["cp33m"])
if platform.system() != "Linux":
monkeypatch.setattr(platform, "system", lambda: "Linux")
monkeypatch.setattr(tags, "_linux_platforms", lambda: ["linux_x86_64"])
abis = list(tags._cpython_abis(sys.version_info[:2]))
platforms = list(tags._linux_platforms())
result = list(tags.sys_tags())
expected_interpreter = "cp" + tags._version_nodot(sys.version_info[:2])
assert len(abis) == 1
assert result[0] == tags.Tag(expected_interpreter, abis[0], platforms[0])
expected = tags.Tag(
"py" + tags._version_nodot((sys.version_info[0], 0)), "none", "any"
)
assert result[-1] == expected
def test_generic(self, monkeypatch):
monkeypatch.setattr(platform, "system", lambda: "Generic")
monkeypatch.setattr(tags, "interpreter_name", lambda: "generic")
result = list(tags.sys_tags())
expected = tags.Tag(
"py" + tags._version_nodot((sys.version_info[0], 0)), "none", "any"
)
assert result[-1] == expected
def test_linux_platforms_manylinux2014_armv6l(self, monkeypatch, manylinux_module):
monkeypatch.setattr(distutils.util, "get_platform", lambda: "linux_armv6l")
monkeypatch.setattr(os, "confstr", lambda x: "glibc 2.20", raising=False)
platforms = list(tags._linux_platforms(is_32bit=True))
expected = ["linux_armv6l"]
assert platforms == expected
def test_skip_manylinux_2014(self, monkeypatch, manylinux_module):
monkeypatch.setattr(distutils.util, "get_platform", lambda: "linux_ppc64")
monkeypatch.setattr(tags, "_get_glibc_version", lambda: (2, 20))
monkeypatch.setattr(
manylinux_module, "manylinux2014_compatible", False, raising=False
)
expected = [
"manylinux_2_20_ppc64",
"manylinux_2_19_ppc64",
"manylinux_2_18_ppc64",
# "manylinux2014_ppc64", # this one is skipped
# "manylinux_2_17_ppc64", # this one is also skipped
"linux_ppc64",
]
platforms = list(tags._linux_platforms())
assert platforms == expected
@pytest.mark.parametrize(
"machine, abi, alt_machine",
[("x86_64", "x32", "i686"), ("armv7l", "armel", "armv7l")],
)
def test_linux_platforms_not_manylinux_abi(
self, monkeypatch, manylinux_module, machine, abi, alt_machine
):
monkeypatch.setattr(
distutils.util, "get_platform", lambda: "linux_{}".format(machine)
)
monkeypatch.setattr(
sys,
"executable",
os.path.join(
os.path.dirname(__file__), "hello-world-{}-{}".format(machine, abi)
),
)
platforms = list(tags._linux_platforms(is_32bit=True))
expected = ["linux_{}".format(alt_machine)]
assert platforms == expected
@pytest.mark.parametrize(
"machine, major, minor, tf", [("x86_64", 2, 20, False), ("s390x", 2, 22, True)]
)
def test_linux_use_manylinux_compatible(
self, monkeypatch, manylinux_module, machine, major, minor, tf
):
def manylinux_compatible(tag_major, tag_minor, tag_arch):
if tag_major == 2 and tag_minor == 22:
return tag_arch == "s390x"
return False
monkeypatch.setattr(tags, "_get_glibc_version", lambda: (major, minor))
monkeypatch.setattr(
distutils.util, "get_platform", lambda: "linux_{}".format(machine)
)
monkeypatch.setattr(
manylinux_module,
"manylinux_compatible",
manylinux_compatible,
raising=False,
)
platforms = list(tags._linux_platforms(is_32bit=False))
if tf:
expected = ["manylinux_2_22_{}".format(machine)]
else:
expected = []
expected.append("linux_{}".format(machine))
assert platforms == expected
def test_linux_use_manylinux_compatible_none(self, monkeypatch, manylinux_module):
def manylinux_compatible(tag_major, tag_minor, tag_arch):
if tag_major == 2 and tag_minor < 25:
return False
return None
monkeypatch.setattr(tags, "_get_glibc_version", lambda: (2, 30))
monkeypatch.setattr(distutils.util, "get_platform", lambda: "linux_x86_64")
monkeypatch.setattr(
manylinux_module,
"manylinux_compatible",
manylinux_compatible,
raising=False,
)
platforms = list(tags._linux_platforms(is_32bit=False))
expected = [
"manylinux_2_30_x86_64",
"manylinux_2_29_x86_64",
"manylinux_2_28_x86_64",
"manylinux_2_27_x86_64",
"manylinux_2_26_x86_64",
"manylinux_2_25_x86_64",
"linux_x86_64",
]
assert platforms == expected
| [
"[email protected]"
] | |
f290ff4dd28be8504759cd53e837052886f33c30 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/Triangle_20200731215410.py | fe4e6be1a02b61ab6337d20bc82d3816c5bb2875 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | def triangle(A):
A.sort()
for i in range(len(A)-2):
p = A[i]
q = A[i+1]
r = A[i+2]
print('p',p,'q',q,'r',r)
if (p + q) < r:
return 0
elif (q + r) < p:
return 0
elif (r + p) < q:
return 0
return 1
triangle
triangle([10,2,5,1,8,20]) | [
"[email protected]"
] | |
9d964dff99e224a5b1083c30488dc44ae8cf0580 | d05e4b5f195d03506360483041c1864895e8f4da | /swe/helper.py | 7ec2819606a2c728509b50d57c6d1d04c0e6db70 | [] | no_license | swesust/backEnd | bf01724429e68c5c1f3d21330c6b3acf617dceff | eb4eae0981d6d88071a11f1b973c83916e6d199e | refs/heads/master | 2021-11-21T16:42:51.275389 | 2019-07-03T22:01:14 | 2019-07-03T22:01:14 | 162,592,471 | 6 | 0 | null | 2021-09-08T00:48:02 | 2018-12-20T14:46:35 | Python | UTF-8 | Python | false | false | 3,507 | py |
from swe import models
from django.core.exceptions import ObjectDoesNotExist
from django.core.files.storage import FileSystemStorage as FSS
from io import BytesIO
from PIL import Image as ImageProcess
from time import time
from os import remove, makedirs
from os.path import isdir
class Image():
"""
storing display images of profiles.
root location : data/
"""
def save(loc, bytesdata):
"""
this function will save an image and return the file location
>>helper.Image.save(location, image bytes file)
locations:
`see` : swe.variable #storage folder locations
# here hid = 8 length hash code generated by teacher email id
to show the image on html templates: <img src="/{{ user.imgsrc }}">
image will rename with current millisecond.type
"""
img,ext = Image.process(bytesdata)
# save the image
f = FSS(location = loc)
filename = str(int(time()*1000000))
filename = filename+'.'+ext
# make sure the dir already exist or not
if isdir(f.location) == False:
makedirs(f.location)
# save in storage
img.save(f.location+'/'+filename, format=ext, quality=90)
# return the storage location
return '/'+loc+'/'+filename
def delete(loc):
"""
delete a file from storage
"""
try:
remove(loc)
return True
except Exception as e:
return False
def process(bytesdata):
"""
retrieve the image file from bytes and resize (1000*x)
"""
img = ImageProcess.open(BytesIO(bytesdata))
size = img.size
if size[1] > 1000:
height = int(float(1000*size[1])/size[0])
return img.resize((1000,height)), img.format
return img, img.format
def is_valid_format(filename):
"""
required image format: .JPG, .PNG, .JPEG
"""
# transform the string to lower character
filename = filename.lower()
# get the last dot index
dotindex = filename.rfind('.')
# check whether the file has any dot or extenstion
if dotindex != -1:
# split the file extension
extension = filename[dotindex:len(filename)]
# check valid extensions
if extension == '.jpg':
return True
elif extension == '.png':
return True
elif extension == '.jpeg':
return True
return False
from hashlib import md5
class Token():
"""
Token class is stand for generate token according to a particular user
and check a token validation. This is the backbone of the forget password
functionality of this application.
functions:
`get_token(userid, hashed_password)`
to generate a token with a user ID and user hashed password
`get_userid(token)`
to get a user ID from a token
`is_valid(token)`
to check a token is valid or not
"""
def get_token(userid, hashed_password):
hash_token = md5(hashed_password.encode()).hexdigest()
return hash_token[0:16] + userid + hash_token[16:32]
def get_userid(token):
try:
# remove first 16 char
userid = token[16:len(token)]
# remove last 16 char
return userid[0:-16]
except Exception as e:
return None
def is_valid(token):
try:
h1 = token[0:16]
userid = Token.get_userid(token)
# split last 16 chars
h2 = token[-16:]
encode_password = h1+h2
try:
user = models.AuthUser.objects.get(userid=userid)
hashed_password = md5(user.password.encode()).hexdigest()
if encode_password != hashed_password:
return False
return True
except ObjectDoesNotExist as e:
return False
except Exception as e:
return False
from uuid import uuid4
def new_password_request():
return uuid4().hex
| [
"[email protected]"
] | |
1611270234bf7a281288c63ea3b1f053d5fad8f5 | 29da2ca6def1270be13a3096685a8e5d82828dff | /CIM15/IEC61970/Informative/InfLocations/LandProperty.py | 1506c9c2dae654fc5114f761e075d8afd906fa91 | [
"MIT"
] | permissive | rimbendhaou/PyCIM | 75eb3bcd3729b2410c03f3d5c66d6f1e05e21df3 | d578bb0bf1af344342bd23344385ed9c06c2d0ee | refs/heads/master | 2022-04-28T01:16:12.673867 | 2020-04-16T02:19:09 | 2020-04-16T02:19:09 | 256,085,381 | 0 | 0 | MIT | 2020-04-16T02:15:20 | 2020-04-16T02:08:14 | null | UTF-8 | Python | false | false | 10,945 | py | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Core.IdentifiedObject import IdentifiedObject
class LandProperty(IdentifiedObject):
"""Information about a particular piece of (land) property such as its use. Ownership of the property may be determined through associations to Organisations and/or ErpPersons.Information about a particular piece of (land) property such as its use. Ownership of the property may be determined through associations to Organisations and/or ErpPersons.
"""
def __init__(self, kind="store", externalRecordReference='', demographicKind="other", status=None, LocationGrants=None, ErpSiteLevelDatas=None, ErpPersonRoles=None, ErpOrganisationRoles=None, AssetContainers=None, Locations=None, RightOfWays=None, *args, **kw_args):
"""Initialises a new 'LandProperty' instance.
@param kind: Kind of (land) property, categorised according to its main functional use from the utility's perspective. Values are: "store", "customerPremise", "building", "external", "gridSupplyPoint", "substation", "depot"
@param externalRecordReference: Reference allocated by the governing organisation (such as municipality) to this piece of land that has a formal reference to Surveyor General's records. The governing organisation is specified in associated Organisation.
@param demographicKind: Demographics around the site. Values are: "other", "urban", "rural"
@param status:
@param LocationGrants: All location grants this land property has.
@param ErpSiteLevelDatas:
@param ErpPersonRoles:
@param ErpOrganisationRoles:
@param AssetContainers:
@param Locations: The spatail description of a piece of property.
@param RightOfWays: All rights of way this land property has.
"""
#: Kind of (land) property, categorised according to its main functional use from the utility's perspective. Values are: "store", "customerPremise", "building", "external", "gridSupplyPoint", "substation", "depot"
self.kind = kind
#: Reference allocated by the governing organisation (such as municipality) to this piece of land that has a formal reference to Surveyor General's records. The governing organisation is specified in associated Organisation.
self.externalRecordReference = externalRecordReference
#: Demographics around the site. Values are: "other", "urban", "rural"
self.demographicKind = demographicKind
self.status = status
self._LocationGrants = []
self.LocationGrants = [] if LocationGrants is None else LocationGrants
self._ErpSiteLevelDatas = []
self.ErpSiteLevelDatas = [] if ErpSiteLevelDatas is None else ErpSiteLevelDatas
self._ErpPersonRoles = []
self.ErpPersonRoles = [] if ErpPersonRoles is None else ErpPersonRoles
self._ErpOrganisationRoles = []
self.ErpOrganisationRoles = [] if ErpOrganisationRoles is None else ErpOrganisationRoles
self._AssetContainers = []
self.AssetContainers = [] if AssetContainers is None else AssetContainers
self._Locations = []
self.Locations = [] if Locations is None else Locations
self._RightOfWays = []
self.RightOfWays = [] if RightOfWays is None else RightOfWays
super(LandProperty, self).__init__(*args, **kw_args)
_attrs = ["kind", "externalRecordReference", "demographicKind"]
_attr_types = {"kind": str, "externalRecordReference": str, "demographicKind": str}
_defaults = {"kind": "store", "externalRecordReference": '', "demographicKind": "other"}
_enums = {"kind": "LandPropertyKind", "demographicKind": "DemographicKind"}
_refs = ["status", "LocationGrants", "ErpSiteLevelDatas", "ErpPersonRoles", "ErpOrganisationRoles", "AssetContainers", "Locations", "RightOfWays"]
_many_refs = ["LocationGrants", "ErpSiteLevelDatas", "ErpPersonRoles", "ErpOrganisationRoles", "AssetContainers", "Locations", "RightOfWays"]
status = None
def getLocationGrants(self):
"""All location grants this land property has.
"""
return self._LocationGrants
def setLocationGrants(self, value):
for x in self._LocationGrants:
x.LandProperty = None
for y in value:
y._LandProperty = self
self._LocationGrants = value
LocationGrants = property(getLocationGrants, setLocationGrants)
def addLocationGrants(self, *LocationGrants):
for obj in LocationGrants:
obj.LandProperty = self
def removeLocationGrants(self, *LocationGrants):
for obj in LocationGrants:
obj.LandProperty = None
def getErpSiteLevelDatas(self):
return self._ErpSiteLevelDatas
def setErpSiteLevelDatas(self, value):
for x in self._ErpSiteLevelDatas:
x.LandProperty = None
for y in value:
y._LandProperty = self
self._ErpSiteLevelDatas = value
ErpSiteLevelDatas = property(getErpSiteLevelDatas, setErpSiteLevelDatas)
def addErpSiteLevelDatas(self, *ErpSiteLevelDatas):
for obj in ErpSiteLevelDatas:
obj.LandProperty = self
def removeErpSiteLevelDatas(self, *ErpSiteLevelDatas):
for obj in ErpSiteLevelDatas:
obj.LandProperty = None
def getErpPersonRoles(self):
return self._ErpPersonRoles
def setErpPersonRoles(self, value):
for x in self._ErpPersonRoles:
x.LandProperty = None
for y in value:
y._LandProperty = self
self._ErpPersonRoles = value
ErpPersonRoles = property(getErpPersonRoles, setErpPersonRoles)
def addErpPersonRoles(self, *ErpPersonRoles):
for obj in ErpPersonRoles:
obj.LandProperty = self
def removeErpPersonRoles(self, *ErpPersonRoles):
for obj in ErpPersonRoles:
obj.LandProperty = None
def getErpOrganisationRoles(self):
return self._ErpOrganisationRoles
def setErpOrganisationRoles(self, value):
for p in self._ErpOrganisationRoles:
filtered = [q for q in p.LandProperty if q != self]
self._ErpOrganisationRoles._LandProperty = filtered
for r in value:
if self not in r._LandProperty:
r._LandProperty.append(self)
self._ErpOrganisationRoles = value
ErpOrganisationRoles = property(getErpOrganisationRoles, setErpOrganisationRoles)
def addErpOrganisationRoles(self, *ErpOrganisationRoles):
for obj in ErpOrganisationRoles:
if self not in obj._LandProperty:
obj._LandProperty.append(self)
self._ErpOrganisationRoles.append(obj)
def removeErpOrganisationRoles(self, *ErpOrganisationRoles):
for obj in ErpOrganisationRoles:
if self in obj._LandProperty:
obj._LandProperty.remove(self)
self._ErpOrganisationRoles.remove(obj)
def getAssetContainers(self):
return self._AssetContainers
def setAssetContainers(self, value):
for p in self._AssetContainers:
filtered = [q for q in p.LandProperties if q != self]
self._AssetContainers._LandProperties = filtered
for r in value:
if self not in r._LandProperties:
r._LandProperties.append(self)
self._AssetContainers = value
AssetContainers = property(getAssetContainers, setAssetContainers)
def addAssetContainers(self, *AssetContainers):
for obj in AssetContainers:
if self not in obj._LandProperties:
obj._LandProperties.append(self)
self._AssetContainers.append(obj)
def removeAssetContainers(self, *AssetContainers):
for obj in AssetContainers:
if self in obj._LandProperties:
obj._LandProperties.remove(self)
self._AssetContainers.remove(obj)
def getLocations(self):
"""The spatail description of a piece of property.
"""
return self._Locations
def setLocations(self, value):
for p in self._Locations:
filtered = [q for q in p.LandProperties if q != self]
self._Locations._LandProperties = filtered
for r in value:
if self not in r._LandProperties:
r._LandProperties.append(self)
self._Locations = value
Locations = property(getLocations, setLocations)
def addLocations(self, *Locations):
for obj in Locations:
if self not in obj._LandProperties:
obj._LandProperties.append(self)
self._Locations.append(obj)
def removeLocations(self, *Locations):
for obj in Locations:
if self in obj._LandProperties:
obj._LandProperties.remove(self)
self._Locations.remove(obj)
def getRightOfWays(self):
"""All rights of way this land property has.
"""
return self._RightOfWays
def setRightOfWays(self, value):
for p in self._RightOfWays:
filtered = [q for q in p.LandProperties if q != self]
self._RightOfWays._LandProperties = filtered
for r in value:
if self not in r._LandProperties:
r._LandProperties.append(self)
self._RightOfWays = value
RightOfWays = property(getRightOfWays, setRightOfWays)
def addRightOfWays(self, *RightOfWays):
for obj in RightOfWays:
if self not in obj._LandProperties:
obj._LandProperties.append(self)
self._RightOfWays.append(obj)
def removeRightOfWays(self, *RightOfWays):
for obj in RightOfWays:
if self in obj._LandProperties:
obj._LandProperties.remove(self)
self._RightOfWays.remove(obj)
| [
"[email protected]"
] | |
6f2bc69f6325b24048fbf182011b336ea9ff6e4e | cfa35dc2ea93ee0eceb2399a9e6112e987579c09 | /stonesoup/reader/tests/test_opensky.py | 82d07f42184777b38386c2e114ca28d738c58229 | [
"LicenseRef-scancode-proprietary-license",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-secret-labs-2011"
] | permissive | dstl/Stone-Soup | 227e6a9e6fbdceca14af3f0259f311ec74095597 | f24090cc919b3b590b84f965a3884ed1293d181d | refs/heads/main | 2023-09-01T14:33:14.626428 | 2023-09-01T11:35:46 | 2023-09-01T11:35:46 | 98,420,803 | 315 | 126 | MIT | 2023-09-14T14:55:34 | 2017-07-26T12:34:28 | Python | UTF-8 | Python | false | false | 1,326 | py | import pytest
from ..opensky import OpenSkyNetworkDetectionReader, OpenSkyNetworkGroundTruthReader
pytestmark = pytest.mark.remote_data
@pytest.mark.parametrize(
'reader_type',
(OpenSkyNetworkDetectionReader, OpenSkyNetworkGroundTruthReader))
@pytest.mark.parametrize(
'bbox',
[None, (-7.57216793459, 49.959999905, 1.68153079591, 58.6350001085)],
ids=['None', 'GB'])
def test_opensky_reader(reader_type, bbox):
reader = reader_type(bbox)
prev_time = None
for n, (time, states) in enumerate(reader, 1):
if prev_time is not None:
assert time > prev_time
prev_time = time
for state in states:
if bbox:
assert bbox[0] < state.state_vector[0] < bbox[2]
assert bbox[1] < state.state_vector[1] < bbox[3]
# When using GroundTruthReader, and ID looks like ICAO24 (ignore those missing ICAO24)
if isinstance(reader_type, OpenSkyNetworkGroundTruthReader) and len(state.id) == 6:
assert all(sub_state.metadata['icao24'] == state.id for sub_state in state)
if isinstance(reader_type, OpenSkyNetworkGroundTruthReader):
assert any(len(path) == n for path in states)
assert all(len(path) <= n for path in states)
if n > 3:
break
| [
"[email protected]"
] | |
b6d090f6e0c520b5335e366c013a6a95871dfb02 | b05685baab270b50918c49c1e25d3aef90f0e83d | /UNIV-2016/1-29-16.py | f07a9382203ea83de48a71657283bce4130ad3f7 | [] | no_license | daxaxelrod/notes-from-4th-semester | 1ec25a0503d608bc50e20dd5d12e612c78b27f25 | ba2c257271bc00ce8dd7a2a5d5984069ac5ae4d8 | refs/heads/master | 2016-08-11T12:37:46.605993 | 2016-02-18T00:26:59 | 2016-02-18T00:26:59 | 51,965,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,437 | py | Univ 280C
Venture accelerator 2
Woot fucking woot
the job market millennials face
25% activily engaged
55% not engaged in their work
20% activly disengaged at work
Important priciples from last class
PITCH INCESSEANTLY
80/20 rule
learn from failure
be aware of change and pain
those little victory moments are in lui of compensation
perspective changes are much larger than the changes themselves
Slide on the team
need diverse teams
this was terrifying last year
Phrase loom in terms of change
slide ideaas
number of gaming hours spent on steam
time spent in open world feilds
change is the transition to dynamic worlds. set underlying story line but dont tether the user to that story
charatcerizing the pain is easy
What is loom as far as categorizing goes
market pull
tech push
design driven
idea started
Personal knowledge database
take all class notes
throw it into some seachable DB
Pitch notes
make it a story
keep it less than 60 seconds
course goals
launch Venture
serves as a backup career option
launch in emergant economic ecosystem
upstate ny
midwest
learn frugal innovation
jugaad innovation
"one of the most important things is to have at least 10 minutes of laughter per hour"
| [
"[email protected]"
] | |
6c5bcffedb01927008252fa07772bd2336994e6b | 31a0b0749c30ff37c3a72592387f9d8195de4bd6 | /rllib/agents/sac/rnnsac_torch_policy.py | 8418f88b165cadcf4dab4ca3b8deb974de66af01 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | longshotsyndicate/ray | 15100bad514b602a3fa39bfe205288e7bec75d90 | 3341fae573868338b665bcea8a1c4ee86b702751 | refs/heads/master | 2023-01-28T15:16:00.401509 | 2022-02-18T05:35:47 | 2022-02-18T05:35:47 | 163,961,795 | 1 | 1 | Apache-2.0 | 2023-01-14T08:01:02 | 2019-01-03T11:03:35 | Python | UTF-8 | Python | false | false | 18,307 | py | import gym
import numpy as np
from typing import List, Optional, Tuple, Type, Union
import ray
from ray.rllib.agents.dqn.dqn_tf_policy import PRIO_WEIGHTS
from ray.rllib.agents.sac import SACTorchPolicy
from ray.rllib.agents.sac.rnnsac_torch_model import RNNSACTorchModel
from ray.rllib.agents.sac.sac_torch_policy import _get_dist_class
from ray.rllib.models import ModelCatalog, MODEL_DEFAULTS
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.torch.torch_action_dist import TorchDistributionWrapper
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_utils import huber_loss, sequence_mask
from ray.rllib.utils.typing import ModelInputDict, TensorType, TrainerConfigDict
torch, nn = try_import_torch()
F = None
if nn:
F = nn.functional
def build_rnnsac_model(
policy: Policy,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
) -> ModelV2:
"""Constructs the necessary ModelV2 for the Policy and returns it.
Args:
policy (Policy): The TFPolicy that will use the models.
obs_space (gym.spaces.Space): The observation space.
action_space (gym.spaces.Space): The action space.
config (TrainerConfigDict): The SAC trainer's config dict.
Returns:
ModelV2: The ModelV2 to be used by the Policy. Note: An additional
target model will be created in this function and assigned to
`policy.target_model`.
"""
# With separate state-preprocessor (before obs+action concat).
num_outputs = int(np.product(obs_space.shape))
# Force-ignore any additionally provided hidden layer sizes.
# Everything should be configured using SAC's "Q_model" and "policy_model"
# settings.
policy_model_config = MODEL_DEFAULTS.copy()
policy_model_config.update(config["policy_model"])
q_model_config = MODEL_DEFAULTS.copy()
q_model_config.update(config["Q_model"])
default_model_cls = RNNSACTorchModel
model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=num_outputs,
model_config=config["model"],
framework=config["framework"],
default_model=default_model_cls,
name="sac_model",
policy_model_config=policy_model_config,
q_model_config=q_model_config,
twin_q=config["twin_q"],
initial_alpha=config["initial_alpha"],
target_entropy=config["target_entropy"],
)
assert isinstance(model, default_model_cls)
# Create an exact copy of the model and store it in `policy.target_model`.
# This will be used for tau-synched Q-target models that run behind the
# actual Q-networks and are used for target q-value calculations in the
# loss terms.
policy.target_model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=num_outputs,
model_config=config["model"],
framework=config["framework"],
default_model=default_model_cls,
name="target_sac_model",
policy_model_config=policy_model_config,
q_model_config=q_model_config,
twin_q=config["twin_q"],
initial_alpha=config["initial_alpha"],
target_entropy=config["target_entropy"],
)
assert isinstance(policy.target_model, default_model_cls)
return model
def build_sac_model_and_action_dist(
policy: Policy,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
) -> Tuple[ModelV2, Type[TorchDistributionWrapper]]:
"""Constructs the necessary ModelV2 and action dist class for the Policy.
Args:
policy (Policy): The TFPolicy that will use the models.
obs_space (gym.spaces.Space): The observation space.
action_space (gym.spaces.Space): The action space.
config (TrainerConfigDict): The SAC trainer's config dict.
Returns:
ModelV2: The ModelV2 to be used by the Policy. Note: An additional
target model will be created in this function and assigned to
`policy.target_model`.
"""
model = build_rnnsac_model(policy, obs_space, action_space, config)
assert (
model.get_initial_state() != []
), "RNNSAC requires its model to be a recurrent one!"
action_dist_class = _get_dist_class(policy, config, action_space)
return model, action_dist_class
def action_distribution_fn(
policy: Policy,
model: ModelV2,
input_dict: ModelInputDict,
*,
state_batches: Optional[List[TensorType]] = None,
seq_lens: Optional[TensorType] = None,
prev_action_batch: Optional[TensorType] = None,
prev_reward_batch=None,
explore: Optional[bool] = None,
timestep: Optional[int] = None,
is_training: Optional[bool] = None
) -> Tuple[TensorType, Type[TorchDistributionWrapper], List[TensorType]]:
"""The action distribution function to be used the algorithm.
An action distribution function is used to customize the choice of action
distribution class and the resulting action distribution inputs (to
parameterize the distribution object).
After parameterizing the distribution, a `sample()` call
will be made on it to generate actions.
Args:
policy (Policy): The Policy being queried for actions and calling this
function.
model (TorchModelV2): The SAC specific Model to use to generate the
distribution inputs (see sac_tf|torch_model.py). Must support the
`get_policy_output` method.
input_dict (ModelInputDict): The input-dict to be used for the model
call.
state_batches (Optional[List[TensorType]]): The list of internal state
tensor batches.
seq_lens (Optional[TensorType]): The tensor of sequence lengths used
in RNNs.
prev_action_batch (Optional[TensorType]): Optional batch of prev
actions used by the model.
prev_reward_batch (Optional[TensorType]): Optional batch of prev
rewards used by the model.
explore (Optional[bool]): Whether to activate exploration or not. If
None, use value of `config.explore`.
timestep (Optional[int]): An optional timestep.
is_training (Optional[bool]): An optional is-training flag.
Returns:
Tuple[TensorType, Type[TorchDistributionWrapper], List[TensorType]]:
The dist inputs, dist class, and a list of internal state outputs
(in the RNN case).
"""
# Get base-model output (w/o the SAC specific parts of the network).
model_out, state_in = model(input_dict, state_batches, seq_lens)
# Use the base output to get the policy outputs from the SAC model's
# policy components.
states_in = model.select_state(state_in, ["policy", "q", "twin_q"])
distribution_inputs, policy_state_out = model.get_policy_output(
model_out, states_in["policy"], seq_lens
)
_, q_state_out = model.get_q_values(model_out, states_in["q"], seq_lens)
if model.twin_q_net:
_, twin_q_state_out = model.get_twin_q_values(
model_out, states_in["twin_q"], seq_lens
)
else:
twin_q_state_out = []
# Get a distribution class to be used with the just calculated dist-inputs.
action_dist_class = _get_dist_class(policy, policy.config, policy.action_space)
states_out = policy_state_out + q_state_out + twin_q_state_out
return distribution_inputs, action_dist_class, states_out
def actor_critic_loss(
policy: Policy,
model: ModelV2,
dist_class: Type[TorchDistributionWrapper],
train_batch: SampleBatch,
) -> Union[TensorType, List[TensorType]]:
"""Constructs the loss for the Soft Actor Critic.
Args:
policy (Policy): The Policy to calculate the loss for.
model (ModelV2): The Model to calculate the loss for.
dist_class (Type[TorchDistributionWrapper]: The action distr. class.
train_batch (SampleBatch): The training data.
Returns:
Union[TensorType, List[TensorType]]: A single loss tensor or a list
of loss tensors.
"""
target_model = policy.target_models[model]
# Should be True only for debugging purposes (e.g. test cases)!
deterministic = policy.config["_deterministic_loss"]
i = 0
state_batches = []
while "state_in_{}".format(i) in train_batch:
state_batches.append(train_batch["state_in_{}".format(i)])
i += 1
assert state_batches
seq_lens = train_batch.get(SampleBatch.SEQ_LENS)
model_out_t, state_in_t = model(
SampleBatch(
obs=train_batch[SampleBatch.CUR_OBS],
prev_actions=train_batch[SampleBatch.PREV_ACTIONS],
prev_rewards=train_batch[SampleBatch.PREV_REWARDS],
_is_training=True,
),
state_batches,
seq_lens,
)
states_in_t = model.select_state(state_in_t, ["policy", "q", "twin_q"])
model_out_tp1, state_in_tp1 = model(
SampleBatch(
obs=train_batch[SampleBatch.NEXT_OBS],
prev_actions=train_batch[SampleBatch.ACTIONS],
prev_rewards=train_batch[SampleBatch.REWARDS],
_is_training=True,
),
state_batches,
seq_lens,
)
states_in_tp1 = model.select_state(state_in_tp1, ["policy", "q", "twin_q"])
target_model_out_tp1, target_state_in_tp1 = target_model(
SampleBatch(
obs=train_batch[SampleBatch.NEXT_OBS],
prev_actions=train_batch[SampleBatch.ACTIONS],
prev_rewards=train_batch[SampleBatch.REWARDS],
_is_training=True,
),
state_batches,
seq_lens,
)
target_states_in_tp1 = target_model.select_state(
state_in_tp1, ["policy", "q", "twin_q"]
)
alpha = torch.exp(model.log_alpha)
# Discrete case.
if model.discrete:
# Get all action probs directly from pi and form their logp.
log_pis_t = F.log_softmax(
model.get_policy_output(model_out_t, states_in_t["policy"], seq_lens)[0],
dim=-1,
)
policy_t = torch.exp(log_pis_t)
log_pis_tp1 = F.log_softmax(
model.get_policy_output(model_out_tp1, states_in_tp1["policy"], seq_lens)[
0
],
-1,
)
policy_tp1 = torch.exp(log_pis_tp1)
# Q-values.
q_t = model.get_q_values(model_out_t, states_in_t["q"], seq_lens)[0]
# Target Q-values.
q_tp1 = target_model.get_q_values(
target_model_out_tp1, target_states_in_tp1["q"], seq_lens
)[0]
if policy.config["twin_q"]:
twin_q_t = model.get_twin_q_values(
model_out_t, states_in_t["twin_q"], seq_lens
)[0]
twin_q_tp1 = target_model.get_twin_q_values(
target_model_out_tp1, target_states_in_tp1["twin_q"], seq_lens
)[0]
q_tp1 = torch.min(q_tp1, twin_q_tp1)
q_tp1 -= alpha * log_pis_tp1
# Actually selected Q-values (from the actions batch).
one_hot = F.one_hot(
train_batch[SampleBatch.ACTIONS].long(), num_classes=q_t.size()[-1]
)
q_t_selected = torch.sum(q_t * one_hot, dim=-1)
if policy.config["twin_q"]:
twin_q_t_selected = torch.sum(twin_q_t * one_hot, dim=-1)
# Discrete case: "Best" means weighted by the policy (prob) outputs.
q_tp1_best = torch.sum(torch.mul(policy_tp1, q_tp1), dim=-1)
q_tp1_best_masked = (1.0 - train_batch[SampleBatch.DONES].float()) * q_tp1_best
# Continuous actions case.
else:
# Sample single actions from distribution.
action_dist_class = _get_dist_class(policy, policy.config, policy.action_space)
action_dist_t = action_dist_class(
model.get_policy_output(model_out_t, states_in_t["policy"], seq_lens)[0],
model,
)
policy_t = (
action_dist_t.sample()
if not deterministic
else action_dist_t.deterministic_sample()
)
log_pis_t = torch.unsqueeze(action_dist_t.logp(policy_t), -1)
action_dist_tp1 = action_dist_class(
model.get_policy_output(model_out_tp1, states_in_tp1["policy"], seq_lens)[
0
],
model,
)
policy_tp1 = (
action_dist_tp1.sample()
if not deterministic
else action_dist_tp1.deterministic_sample()
)
log_pis_tp1 = torch.unsqueeze(action_dist_tp1.logp(policy_tp1), -1)
# Q-values for the actually selected actions.
q_t = model.get_q_values(
model_out_t, states_in_t["q"], seq_lens, train_batch[SampleBatch.ACTIONS]
)[0]
if policy.config["twin_q"]:
twin_q_t = model.get_twin_q_values(
model_out_t,
states_in_t["twin_q"],
seq_lens,
train_batch[SampleBatch.ACTIONS],
)[0]
# Q-values for current policy in given current state.
q_t_det_policy = model.get_q_values(
model_out_t, states_in_t["q"], seq_lens, policy_t
)[0]
if policy.config["twin_q"]:
twin_q_t_det_policy = model.get_twin_q_values(
model_out_t, states_in_t["twin_q"], seq_lens, policy_t
)[0]
q_t_det_policy = torch.min(q_t_det_policy, twin_q_t_det_policy)
# Target q network evaluation.
q_tp1 = target_model.get_q_values(
target_model_out_tp1, target_states_in_tp1["q"], seq_lens, policy_tp1
)[0]
if policy.config["twin_q"]:
twin_q_tp1 = target_model.get_twin_q_values(
target_model_out_tp1,
target_states_in_tp1["twin_q"],
seq_lens,
policy_tp1,
)[0]
# Take min over both twin-NNs.
q_tp1 = torch.min(q_tp1, twin_q_tp1)
q_t_selected = torch.squeeze(q_t, dim=-1)
if policy.config["twin_q"]:
twin_q_t_selected = torch.squeeze(twin_q_t, dim=-1)
q_tp1 -= alpha * log_pis_tp1
q_tp1_best = torch.squeeze(input=q_tp1, dim=-1)
q_tp1_best_masked = (1.0 - train_batch[SampleBatch.DONES].float()) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = (
train_batch[SampleBatch.REWARDS]
+ (policy.config["gamma"] ** policy.config["n_step"]) * q_tp1_best_masked
).detach()
# BURNIN #
B = state_batches[0].shape[0]
T = q_t_selected.shape[0] // B
seq_mask = sequence_mask(train_batch[SampleBatch.SEQ_LENS], T)
# Mask away also the burn-in sequence at the beginning.
burn_in = policy.config["burn_in"]
if burn_in > 0 and burn_in < T:
seq_mask[:, :burn_in] = False
seq_mask = seq_mask.reshape(-1)
num_valid = torch.sum(seq_mask)
def reduce_mean_valid(t):
return torch.sum(t[seq_mask]) / num_valid
# Compute the TD-error (potentially clipped).
base_td_error = torch.abs(q_t_selected - q_t_selected_target)
if policy.config["twin_q"]:
twin_td_error = torch.abs(twin_q_t_selected - q_t_selected_target)
td_error = 0.5 * (base_td_error + twin_td_error)
else:
td_error = base_td_error
critic_loss = [
reduce_mean_valid(train_batch[PRIO_WEIGHTS] * huber_loss(base_td_error))
]
if policy.config["twin_q"]:
critic_loss.append(
reduce_mean_valid(train_batch[PRIO_WEIGHTS] * huber_loss(twin_td_error))
)
td_error = td_error * seq_mask
# Alpha- and actor losses.
# Note: In the papers, alpha is used directly, here we take the log.
# Discrete case: Multiply the action probs as weights with the original
# loss terms (no expectations needed).
if model.discrete:
weighted_log_alpha_loss = policy_t.detach() * (
-model.log_alpha * (log_pis_t + model.target_entropy).detach()
)
# Sum up weighted terms and mean over all batch items.
alpha_loss = reduce_mean_valid(torch.sum(weighted_log_alpha_loss, dim=-1))
# Actor loss.
actor_loss = reduce_mean_valid(
torch.sum(
torch.mul(
# NOTE: No stop_grad around policy output here
# (compare with q_t_det_policy for continuous case).
policy_t,
alpha.detach() * log_pis_t - q_t.detach(),
),
dim=-1,
)
)
else:
alpha_loss = -reduce_mean_valid(
model.log_alpha * (log_pis_t + model.target_entropy).detach()
)
# Note: Do not detach q_t_det_policy here b/c is depends partly
# on the policy vars (policy sample pushed through Q-net).
# However, we must make sure `actor_loss` is not used to update
# the Q-net(s)' variables.
actor_loss = reduce_mean_valid(alpha.detach() * log_pis_t - q_t_det_policy)
# Store values for stats function in model (tower), such that for
# multi-GPU, we do not override them during the parallel loss phase.
model.tower_stats["q_t"] = q_t * seq_mask[..., None]
model.tower_stats["policy_t"] = policy_t * seq_mask[..., None]
model.tower_stats["log_pis_t"] = log_pis_t * seq_mask[..., None]
model.tower_stats["actor_loss"] = actor_loss
model.tower_stats["critic_loss"] = critic_loss
model.tower_stats["alpha_loss"] = alpha_loss
# Store per time chunk (b/c we need only one mean
# prioritized replay weight per stored sequence).
model.tower_stats["td_error"] = torch.mean(td_error.reshape([-1, T]), dim=-1)
# Return all loss terms corresponding to our optimizers.
return tuple([actor_loss] + critic_loss + [alpha_loss])
RNNSACTorchPolicy = SACTorchPolicy.with_updates(
name="RNNSACPolicy",
get_default_config=lambda: ray.rllib.agents.sac.rnnsac.DEFAULT_CONFIG,
action_distribution_fn=action_distribution_fn,
make_model_and_action_dist=build_sac_model_and_action_dist,
loss_fn=actor_critic_loss,
)
| [
"[email protected]"
] | |
199c9d62f0c02ac09f43a77d38a29026977066d8 | a5e71a333a86476b9cb1bdf6989bb5f47dd5e409 | /ScrapePlugins/M/KissLoader/ContentLoader.py | 3b88b158cb8837c710fe9f4fc8942aa693f906c4 | [] | no_license | GDXN/MangaCMS | 0e797299f12c48986fda5f2e7de448c2934a62bd | 56be0e2e9a439151ae5302b3e6ceddc7868d8942 | refs/heads/master | 2021-01-18T11:40:51.993195 | 2017-07-22T12:55:32 | 2017-07-22T12:55:32 | 21,105,690 | 6 | 1 | null | 2017-07-22T12:55:33 | 2014-06-22T21:13:19 | Python | UTF-8 | Python | false | false | 8,123 | py |
import logSetup
import runStatus
if __name__ == "__main__":
runStatus.preloadDicts = False
import webFunctions
import settings
import os
import os.path
import nameTools as nt
import time
import sys
import urllib.parse
import html.parser
import zipfile
import traceback
import bs4
import re
import json
import ScrapePlugins.RetreivalBase
from mimetypes import guess_extension
from concurrent.futures import ThreadPoolExecutor
import ScrapePlugins.ScrapeExceptions as ScrapeExceptions
import processDownload
import magic
import execjs
class ContentLoader(ScrapePlugins.RetreivalBase.RetreivalBase):
loggerPath = "Main.Manga.Ki.Cl"
pluginName = "Kiss Manga Content Retreiver"
tableKey = "ki"
dbName = settings.DATABASE_DB_NAME
tableName = "MangaItems"
wg = webFunctions.WebGetRobust(logPath=loggerPath+".Web")
retreivalThreads = 3
itemLimit = 200
def check_recaptcha(self, pgurl, soup=None, markup=None):
if markup:
soup = webFunctions.as_soup(markup)
if not soup:
raise RuntimeError("You have to pass either the raw page markup, or a pre-parsed bs4 soup object!")
capdiv = soup.find("div", class_='g-recaptcha')
if not capdiv:
if markup:
return markup
return soup
raise ScrapeExceptions.LimitedException("Encountered ReCaptcha! Cannot circumvent!")
self.log.warning("Found ReCaptcha div. Need to circumvent.")
sitekey = capdiv['data-sitekey']
# soup.find("")
params = {
'key' : settings.captcha_solvers['2captcha']['api_key'],
'method' : 'userrecaptcha',
'googlekey' : sitekey,
'pageurl' : pgurl,
'json' : 1,
}
# self.wg.getJson("https://2captcha.com/in.php", postData=params)
# # here we post site key to 2captcha to get captcha ID (and we parse it here too)
# captcha_id = s.post("?key={}&method=userrecaptcha&googlekey={}&pageurl={}".format(API_KEY, site_key, url), proxies=proxy).text.split('|')[1]
# # then we parse gresponse from 2captcha response
# recaptcha_answer = s.get("http://2captcha.com/res.php?key={}&action=get&id={}".format(API_KEY, captcha_id), proxies=proxy).text
# print("solving ref captcha...")
# while 'CAPCHA_NOT_READY' in recaptcha_answer:
# sleep(5)
# recaptcha_answer = s.get("http://2captcha.com/res.php?key={}&action=get&id={}".format(API_KEY, captcha_id), proxies=proxy).text
# recaptcha_answer = recaptcha_answer.split('|')[1]
# # we make the payload for the post data here, use something like mitmproxy or fiddler to see what is needed
# payload = {
# 'key': 'value',
# 'gresponse': recaptcha_answer # This is the response from 2captcha, which is needed for the post request to go through.
# }
resolved = {
"reUrl" : "/Manga/Love-Lab-MIYAHARA-Ruri/Vol-010-Ch-001?id=359632",
"g-recaptcha-response" : "03AOP2lf5kLccgf5aAkMmzXR8mN6Kv6s76BoqHIv-raSzGCa98HMPMdx0n04ourhM1mBApnesMRbzr2vFa0264mY83SCkL5slCFcC-i3uWJoHIjVhGh0GN4yyswg5-yZpDg1iK882nPuxEeaxb18pOK790x4Z18ib5UOPGU-NoECVb6LS03S3b4fCjWwRDLNF43WhkHDFd7k-Os7ULCgOZe_7kcF9xbKkovCh2uuK0ytD7rhiKnZUUvl1TimGsSaFkSSrQ1C4cxZchVXrz7kIx0r6Qp2hPr2_PW0CAutCkmr9lt9TS5n0ecdVFhdVQBniSB-NZv9QEpbQ8",
}
# # then send the post request to the url
# response = s.post(url, payload, proxies=proxy)
def getImage(self, imageUrl, referrer):
content, handle = self.wg.getpage(imageUrl, returnMultiple=True, addlHeaders={'Referer': referrer})
if not content or not handle:
raise ValueError("Failed to retreive image from page '%s'!" % referrer)
fileN = urllib.parse.unquote(urllib.parse.urlparse(handle.geturl())[2].split("/")[-1])
fileN = bs4.UnicodeDammit(fileN).unicode_markup
self.log.info("retreived image '%s' with a size of %0.3f K", fileN, len(content)/1000.0)
if not "." in fileN:
info = handle.info()
if 'Content-Type' in info:
tp = info['Content-Type']
if ";" in tp:
tp = tp.split(";")[0]
ext = guess_extension(tp)
if ext == None:
ext = "unknown_ftype"
print(info['Content-Type'], ext)
fileN += "." + ext
else:
fileN += ".jpg"
# Let magic figure out the files for us (it's probably smarter then kissmanga, anyways.)
guessed = magic.from_buffer(content, mime=True)
ext = guess_extension(tp)
if ext:
fileN = fileN + ext
return fileN, content
def getImageUrls(self, baseUrl):
pgctnt, filename, mimetype = self.wg.getItemPhantomJS(baseUrl)
pgctnt = self.check_recaptcha(pgurl=baseUrl, markup=pgctnt)
linkRe = re.compile(r'lstImages\.push\((wrapKA\(".+?"\))\);')
links = linkRe.findall(pgctnt)
pages = []
for item in links:
tgt = self.wg.pjs_driver.execute_script("return %s" % item)
if not tgt.startswith("http"):
raise ScrapeExceptions.LimitedException("URL Decryption failed!")
pages.append(tgt)
self.log.info("Found %s pages", len(pages))
return pages
# Don't download items for 12 hours after relase,
# so that other, (better) sources can potentially host
# the items first.
def checkDelay(self, inTime):
return inTime < (time.time() - 60*60*12)
def getLink(self, link):
sourceUrl = link["sourceUrl"]
print("Link", link)
seriesName = link['seriesName']
try:
self.log.info( "Should retreive url - %s", sourceUrl)
self.updateDbEntry(sourceUrl, dlState=1)
imageUrls = self.getImageUrls(sourceUrl)
if not imageUrls:
self.log.critical("Failure on retreiving content at %s", sourceUrl)
self.log.critical("Page not found - 404")
self.updateDbEntry(sourceUrl, dlState=-1)
return
self.log.info("Downloading = '%s', '%s' ('%s images)", seriesName, link["originName"], len(imageUrls))
dlPath, newDir = self.locateOrCreateDirectoryForSeries(seriesName)
if link["flags"] == None:
link["flags"] = ""
if newDir:
self.updateDbEntry(sourceUrl, flags=" ".join([link["flags"], "haddir"]))
chapterName = nt.makeFilenameSafe(link["originName"])
fqFName = os.path.join(dlPath, chapterName+" [KissManga].zip")
loop = 1
prefix, ext = os.path.splitext(fqFName)
while os.path.exists(fqFName):
fqFName = "%s (%d)%s" % (prefix, loop, ext)
loop += 1
self.log.info("Saving to archive = %s", fqFName)
images = []
imgCnt = 1
for imgUrl in imageUrls:
imageName, imageContent = self.getImage(imgUrl, sourceUrl)
imageName = "{num:03.0f} - {srcName}".format(num=imgCnt, srcName=imageName)
imgCnt += 1
images.append([imageName, imageContent])
if not runStatus.run:
self.log.info( "Breaking due to exit flag being set")
self.updateDbEntry(sourceUrl, dlState=0)
return
self.log.info("Creating archive with %s images", len(images))
if not images:
self.updateDbEntry(sourceUrl, dlState=-1, tags="error-404")
return
#Write all downloaded files to the archive.
arch = zipfile.ZipFile(fqFName, "w")
for imageName, imageContent in images:
arch.writestr(imageName, imageContent)
arch.close()
dedupState = processDownload.processDownload(seriesName, fqFName, deleteDups=True, includePHash=True, rowId=link['dbId'])
self.log.info( "Done")
filePath, fileName = os.path.split(fqFName)
self.updateDbEntry(sourceUrl, dlState=2, downloadPath=filePath, fileName=fileName, tags=dedupState)
return
except SystemExit:
print("SystemExit!")
raise
except Exception:
self.log.critical("Failure on retreiving content at %s", sourceUrl)
self.log.critical("Traceback = %s", traceback.format_exc())
self.updateDbEntry(sourceUrl, dlState=-1)
def setup(self):
'''
poke through cloudflare
'''
if not self.wg.stepThroughCloudFlare("http://kissmanga.com", 'KissManga'):
raise ValueError("Could not access site due to cloudflare protection.")
if __name__ == '__main__':
import utilities.testBase as tb
with tb.testSetup(load=False):
cl = ContentLoader()
# pg = 'http://dynasty-scans.com/chapters/qualia_the_purple_ch16'
# inMarkup = cl.wg.getpage(pg)
# cl.getImageUrls(inMarkup, pg)
cl.do_fetch_content()
# cl.getLink('http://www.webtoons.com/viewer?titleNo=281&episodeNo=3')
# cl.getImageUrls('http://kissmanga.com/Manga/Hanza-Sky/Ch-031-Read-Online?id=225102')
| [
"[email protected]"
] | |
1f0508ba72e0fa6b452f48d450270349e204b152 | 7a550d2268bc4bc7e2fec608ffb1db4b2e5e94a0 | /0801-0900/0889-Construct Binary Tree from Preorder and Postorder Traversal/0889-Construct Binary Tree from Preorder and Postorder Traversal.py | 91552f75a60306b6c33b24b37f46cb02f2423c10 | [
"MIT"
] | permissive | jiadaizhao/LeetCode | be31bd0db50cc6835d9c9eff8e0175747098afc6 | 4ddea0a532fe7c5d053ffbd6870174ec99fc2d60 | refs/heads/master | 2021-11-05T04:38:47.252590 | 2021-10-31T09:54:53 | 2021-10-31T09:54:53 | 99,655,604 | 52 | 28 | MIT | 2020-10-02T12:47:47 | 2017-08-08T05:57:26 | C++ | UTF-8 | Python | false | false | 1,638 | py | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def constructFromPrePost(self, pre: List[int], post: List[int]) -> TreeNode:
if not pre:
return None
root = TreeNode(pre[0])
if len(pre) == 1:
return root
i = post.index(pre[1])
root.left = self.constructFromPrePost(pre[1 : i + 2], post[0 : i + 1])
root.right = self.constructFromPrePost(pre[i + 2 :], post[i + 1 : -1])
return root
class Solution2:
def constructFromPrePost(self, pre: List[int], post: List[int]) -> TreeNode:
table = {v: i for i, v in enumerate(post)}
def dfs(preStart, postStart, n):
if n == 0:
return None
root = TreeNode(pre[preStart])
if n == 1:
return root
i = table[pre[preStart + 1]]
L = i - postStart + 1
root.left = dfs(preStart + 1, postStart, L)
root.right = dfs(preStart + L + 1, i + 1, n - L - 1)
return root
return dfs(0, 0, len(pre))
class Solution3:
def constructFromPrePost(self, pre: List[int], post: List[int]) -> TreeNode:
St = [TreeNode(pre[0])]
i = 0
for val in pre[1:]:
while St[-1].val == post[i]:
St.pop()
i += 1
node = TreeNode(val)
if St[-1].left:
St[-1].right = node
else:
St[-1].left = node
St.append(node)
return St[0]
| [
"[email protected]"
] | |
4a98b8fcc727e88ce26f8d3498359f3e14bd2787 | 76c50a9849b4093c6339dfeff888a5a0672a92bc | /yatsm/phenology/longtermmean.py | 627bb96a977f314a1651c0440907c1813340f769 | [
"MIT"
] | permissive | valpasq/yatsm | d2fac9c7eea6f8a785d6c5b6c24196bb9f441db0 | 44e2124c1bae3dd4245437475d709187f52d376d | refs/heads/master | 2021-01-18T07:40:33.260515 | 2016-09-13T16:22:36 | 2016-09-13T16:22:36 | 40,021,588 | 0 | 0 | null | 2015-07-31T19:30:14 | 2015-07-31T19:30:14 | null | UTF-8 | Python | false | false | 11,900 | py | """ Implementation of Eli Melaas' Landsat phenology algorithm
See:
Melaas, EK, MA Friedl, and Z Zhu. 2013. Detecting interannual variation in
deciduous broadleaf forest phenology using Landsat TM/ETM+ data. Remote
Sensing of Environment 132: 176-185.
"""
from __future__ import division
from datetime import datetime as dt
import logging
import math
import numpy as np
import numpy.lib.recfunctions
# Grab `stats` package from R for smoothing spline
from rpy2 import robjects as ro
from rpy2.robjects.packages import importr
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
Rstats = importr('stats')
from ..vegetation_indices import EVI
logger = logging.getLogger('yatsm')
def group_years(years, interval=3):
""" Return integers representing sequential groupings of years
Note: years specified must be sorted
Args:
years (np.ndarray): the year corresponding to each EVI value
interval (int, optional): number of years to group together
(default: 3)
Returns:
np.ndarray: integers representing sequential year groupings
"""
n_groups = math.ceil((years.max() - years.min()) / interval)
if n_groups <= 1:
return np.zeros_like(years, dtype=np.uint16)
splits = np.array_split(np.arange(years.min(), years.max() + 1), n_groups)
groups = np.zeros_like(years, dtype=np.uint16)
for i, s in enumerate(splits):
groups[np.in1d(years, s)] = i
return groups
def scale_EVI(evi, periods, qmin=10, qmax=90):
""" Returns EVI scaled to upper and lower quantiles
Quantiles are calculated based on EVI within some year-to-year interval.
As part of finding the quantiles, EVI values not within the (0, 1) range
will be removed.
Args:
evi (np.ndarray): EVI values
periods (np.ndarray): intervals of years to group and scale together
qmin (float, optional): lower quantile for scaling (default: 10)
qmax (float, optional): upper quantile for scaling (default: 90)
Returns:
np.ndarray: scaled EVI array
"""
_evi = evi.copy()
for u in np.unique(periods):
index = np.where(periods == u)
evi_min = np.percentile(evi[index], qmin)
evi_max = np.percentile(evi[index], qmax)
_evi[index] = (evi[index] - evi_min) / (evi_max - evi_min)
return _evi
def CRAN_spline(x, y, spar=0.55):
""" Return a prediction function for a smoothing spline from R
Use `rpy2` package to fit a smoothing spline using "smooth.spline".
Args:
x (np.ndarray): independent variable
y (np.ndarray): dependent variable
spar (float): smoothing parameter
Returns:
callable: prediction function of smoothing spline that provides
smoothed estimates of the dependent variable given an input
independent variable array
Example:
Fit a smoothing spline for y ~ x and predict for days in year:
.. code-block:: python
pred_spl = CRAN_spline(x, y)
y_smooth = pred_spl(np.arange(1, 366))
"""
spl = Rstats.smooth_spline(x, y, spar=spar)
return lambda _x: np.array(Rstats.predict_smooth_spline(spl, _x)[1])
def halfmax(x):
""" Return index of the observation closest to the half of some data
Assumes that data are scaled between [0, 1] and half-max is 0.5
Args:
x (np.ndarray): a one dimensional vector
Returns:
int: the index of the observation closest to the half-max of the data
"""
return np.argmin(np.abs(
(x - np.nanmin(x)) /
(np.nanmax(x) - np.nanmin(x)) - 0.5))
def ordinal2yeardoy(ordinal):
""" Convert ordinal dates to two arrays of year and doy
Args:
ordinal (np.ndarray): ordinal dates
Returns:
np.ndarray: nobs x 2 np.ndarray containing the year and DOY for each
ordinal date
"""
_date = [dt.fromordinal(_d) for _d in ordinal]
yeardoy = np.empty((ordinal.size, 2), dtype=np.uint16)
yeardoy[:, 0] = np.array([_d.timetuple().tm_year for _d in _date])
yeardoy[:, 1] = np.array([_d.timetuple().tm_yday for _d in _date])
return yeardoy
class LongTermMeanPhenology(object):
""" Calculate long term mean phenology metrics for each YATSM record
Long term mean phenology metrics describe the general spring greenup and
autumn senescence timing using an algorithm by Melaas *et al.*, 2013 based
on fitting smoothing splines to timeseries of EVI.
Attributes:
self.pheno (np.ndarray): NumPy structured array containing phenology
metrics. These metrics include:
* spring_doy: the long term mean day of year of the start of spring
* autumn_doy: the long term mean day of year of the start of autumn
* pheno_cor: the correlation coefficient of the observed EVI and
the smoothed prediction
* peak_evi: the highest smoothed EVI value within the year (maximum
amplitude of EVI)
* peak_doy: the day of year corresponding to the peak EVI value
* spline_evi: the smoothing spline prediction of EVI for days of
year between 1 and 365
* pheno_nobs: the number of observations used to fit the smoothing
spline
Args:
red_index (int, optional): index of model.Y containing red band
(default: 2)
nir_index (int, optional): index of model.Y containing NIR band
(default: 3)
blue_index (int, optional): index of model.Y containing blue band
(default: 0)
scale (float or np.ndarray, optional): scale factor for reflectance
bands in model.Y to transform data into [0, 1] (default: 0.0001)
evi_index (int, optional): if EVI is already used within timeseries
model, provide index of model.Y containing EVI to override
computation from red/nir/blue bands (default: None)
evi_scale (float, optional): if EVI is already used within timeseries
model, provide scale factor to transform EVI into [0, 1] range
(default: None)
year_interval (int, optional): number of years to group together when
normalizing EVI to upper and lower percentiles of EVI within the
group (default: 3)
q_min (float, optional): lower percentile for scaling EVI (default: 10)
q_max (float, optional): upper percentile for scaling EVI (default: 90)
"""
def __init__(self, red_index=2, nir_index=3, blue_index=0,
scale=0.0001, evi_index=None, evi_scale=None,
year_interval=3, q_min=10, q_max=90):
self.red_index = red_index
self.nir_index = nir_index
self.blue_index = blue_index
self.scale = scale
self.evi_index = evi_index
self.evi_scale = evi_scale
self.year_interval = year_interval
self.q_min = q_min
self.q_max = q_max
def _fit_prep(self, model):
if self.evi_index:
if not isinstance(self.evi_scale, float):
raise ValueError('Must provide scale factor for EVI')
self.evi = model.Y[self.evi_index, :] * self.evi_scale
else:
self.evi = EVI(model.Y[self.red_index, :] * self.scale,
model.Y[self.nir_index, :] * self.scale,
model.Y[self.blue_index, :] * self.scale)
self.ordinal = model.dates.astype(np.uint32)
self.yeardoy = ordinal2yeardoy(self.ordinal)
# Mask based on unusual EVI values
valid_evi = np.where((self.evi >= 0) & (self.evi <= 1))[0]
self.evi = self.evi[valid_evi]
self.ordinal = self.ordinal[valid_evi]
self.yeardoy = self.yeardoy[valid_evi, :]
self.pheno = np.zeros(self.model.record.shape, dtype=[
('spring_doy', 'u2'),
('autumn_doy', 'u2'),
('pheno_cor', 'f4'),
('peak_evi', 'f4'),
('peak_doy', 'u2'),
('spline_evi', 'f8', 366),
('pheno_nobs', 'u2')
])
def _fit_record(self, evi, yeardoy, year_interval, q_min, q_max):
# Calculate year-to-year groupings for EVI normalization
periods = group_years(yeardoy[:, 0], year_interval)
evi_norm = scale_EVI(evi, periods, qmin=q_min, qmax=q_max)
# Mask out np.nan
valid = np.isfinite(evi_norm)
if not np.any(valid):
logger.debug('No valid EVI in segment -- skipping')
return
yeardoy = yeardoy[valid, :]
evi_norm = evi_norm[valid]
# Pad missing DOY values (e.g. in winter) with 0's to improve
# spline fit
pad_start = np.arange(1, yeardoy[:, 1].min() + 1)
pad_end = np.arange(yeardoy[:, 1].max(), 365 + 1)
pad_doy = np.concatenate((yeardoy[:, 1], pad_start, pad_end))
pad_evi_norm = np.concatenate((
evi_norm,
np.zeros_like(pad_start, dtype=evi.dtype),
np.zeros_like(pad_end, dtype=evi.dtype)
))
# Fit spline and predict EVI
spl_pred = CRAN_spline(pad_doy, pad_evi_norm, spar=0.55)
evi_smooth = spl_pred(np.arange(1, 367)) # 366 to include leap years
# Check correlation
pheno_cor = np.corrcoef(evi_smooth[yeardoy[:, 1] - 1], evi_norm)[0, 1]
# Separate into spring / autumn
peak_doy = np.argmax(evi_smooth)
peak_evi = np.max(evi_smooth)
evi_smooth_spring = evi_smooth[:peak_doy + 1]
evi_smooth_autumn = evi_smooth[peak_doy + 1:]
# Compute half-maximum of spring logistic for "ruling in" image dates
# (points) for anomaly calculation
# Note: we add + 1 to go from index (on 0) to day of year (on 1)
if evi_smooth_spring.size > 0:
ltm_spring = halfmax(evi_smooth_spring) + 1
else:
ltm_spring = 0
if evi_smooth_autumn.size > 0:
ltm_autumn = halfmax(evi_smooth_autumn) + 1 + peak_doy + 1
else:
ltm_autumn = 0
return (ltm_spring, ltm_autumn, pheno_cor,
peak_evi, peak_doy, evi_smooth)
def fit(self, model):
""" Fit phenology metrics for each time segment within a YATSM model
Args:
model (yatsm.YATSM): instance of `yatsm.YATSM` that has been run
for change detection
Returns:
np.ndarray: updated copy of YATSM model instance with phenology
added into yatsm.record structured array
"""
self.model = model
# Preprocess EVI and create our `self.pheno` record
self._fit_prep(model)
for i, _record in enumerate(self.model.record):
# Subset variables to range of current record
rec_range = np.where((self.ordinal >= _record['start']) &
(self.ordinal <= _record['end']))[0]
if rec_range.size == 0:
continue
_evi = self.evi[rec_range]
_yeardoy = self.yeardoy[rec_range, :]
# Fit and save results
_result = self._fit_record(_evi, _yeardoy,
self.year_interval,
self.q_min, self.q_max)
if _result is None:
continue
self.pheno[i]['spring_doy'] = _result[0]
self.pheno[i]['autumn_doy'] = _result[1]
self.pheno[i]['pheno_cor'] = _result[2]
self.pheno[i]['peak_evi'] = _result[3]
self.pheno[i]['peak_doy'] = _result[4]
self.pheno[i]['spline_evi'][:] = _result[5]
self.pheno[i]['pheno_nobs'] = rec_range.size
return np.lib.recfunctions.merge_arrays(
(self.model.record, self.pheno), flatten=True)
| [
"[email protected]"
] | |
8f37d8f65cf54dd283e22acd078d5087397db1d3 | a1b375c3e98fe059dafc4d74cbcbcb99a0571e44 | /images/urls.py | 7b1168a1108824ea4f65b15f1955c15b27075621 | [
"MIT"
] | permissive | mohsenamoon1160417237/Social_app | 478a73552ceed8001c167be6caaf550cd58626bd | 79fa0871f7b83648894941f9010f1d99f1b27ab3 | refs/heads/master | 2022-12-09T16:03:53.623506 | 2020-09-21T05:59:22 | 2020-09-21T06:02:03 | 297,242,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | from django.urls import path
from . import views
urlpatterns = [
path('best_images/' , views.most_liked_images , name='most_liked_images'),
path('' , views.image_post , name='image_post'),
path('delete/<int:image_id>/<slug:image_slug>/' , views.image_delete , name='image_delete'),
path('all/' , views.images , name='images'),
path('<slug:image_slug>/<int:image_id>/' , views.image_detail , name='image_detail'),
path('like/' , views.image_like , name='image_like'),
] | [
"[email protected]"
] | |
3f8a0fafc7d03d42481365c073a4dad659afc6ac | 49ba5356bdc5df7dd9803b56fe507c5164a90716 | /greatest-common-divisor-of-strings/test_solution.py | 2c2c95aefa31c52e1d40ac929287824839313cf5 | [] | no_license | uxlsl/leetcode_practice | d80ad481c9d8ee71cce0f3c66e98446ced149635 | d8ed762d1005975f0de4f07760c9671195621c88 | refs/heads/master | 2021-04-25T18:12:28.136504 | 2020-03-11T07:54:15 | 2020-03-11T07:54:15 | 121,472,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | from solution import Solution
def test_solution():
s = Solution()
assert s.gcdOfStrings(str1 = "ABCABC", str2 = "ABC") == "ABC"
| [
"[email protected]"
] | |
f6661f51dac094fd1cd85b3211f221fd56caccff | aed888628faf0f52081a8711653489e3982ce0c1 | /mininet/wifiPlot.py | 53bf454c111ea4aa97d7b87df0f9606c7ee6ab4d | [] | no_license | tapparello/mininet-wifi | 69d01beea7d9b456a254694f1c38c443f0b32560 | 0feb170337bef16ea1a972685fc4a0eaa1a51eea | refs/heads/master | 2021-01-20T17:27:34.311830 | 2017-05-09T19:43:36 | 2017-05-09T19:43:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,857 | py | """
author: Ramon Fontes ([email protected])
ramonfontes.com
"""
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from mininet.log import debug
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
class plot3d (object):
ax = None
@classmethod
def instantiateGraph(self, MAX_X, MAX_Y, MAX_Z):
"""instantiateGraph"""
plt.ion()
plt.title("Mininet-WiFi Graph")
self.ax = plt.subplot(111, projection='3d')
self.ax.set_xlabel('meters (x)')
self.ax.set_ylabel('meters (y)')
self.ax.set_zlabel('meters (z)')
self.ax.set_xlim([0, MAX_X])
self.ax.set_ylim([0, MAX_Y])
self.ax.set_zlim([0, MAX_Z])
self.ax.grid(True)
@classmethod
def instantiateAnnotate(self, node):
"""instantiateAnnotate"""
x = '%.2f' % float(node.params['position'][0])
y = '%.2f' % float(node.params['position'][1])
z = '%.2f' % float(node.params['position'][2])
node.plttxt = self.ax.text(float(x), float(y), float(z), node.name)
@classmethod
def instantiateNode(self, node):
"""Instantiate Node"""
x = '%.2f' % float(node.params['position'][0])
y = '%.2f' % float(node.params['position'][1])
z = '%.2f' % float(node.params['position'][2])
resolution = 40
u = np.linspace(0, 2 * np.pi, resolution)
v = np.linspace(0, np.pi, resolution)
r = 1
x = r * np.outer(np.cos(u), np.sin(v)) + float(x)
y = r * np.outer(np.sin(u), np.sin(v)) + float(y)
z = r * np.outer(np.ones(np.size(u)), np.cos(v)) + float(z)
node.pltNode = self.ax.plot_surface(x, y, z, alpha=0.2, edgecolor='none', color='black')
@classmethod
def graphInstantiateNodes(self, nodes):
"""Instantiate Nodes"""
for node in nodes:
self.instantiateAnnotate(node)
self.instantiateNode(node)
self.instantiateCircle(node)
self.plotDraw()
@classmethod
def graphPause(self):
"""Pause"""
plt.pause(0.001)
@classmethod
def graphUpdate(self, node):
"""Graph Update"""
node.pltNode.remove()
node.pltCircle.remove()
node.plttxt.remove()
self.instantiateCircle(node)
self.instantiateNode(node)
self.instantiateAnnotate(node)
self.plotDraw()
@classmethod
def plotDraw(self):
"""plotDraw"""
plt.draw()
@classmethod
def closePlot(self):
"""Close"""
try:
plt.close()
except:
pass
@classmethod
def instantiateCircle(self, node):
"""Instantiate Circle"""
x = '%.2f' % float(node.params['position'][0])
y = '%.2f' % float(node.params['position'][1])
z = '%.2f' % float(node.params['position'][2])
color = 'b'
if node.type == 'station':
color = 'g'
elif node.type == 'vehicle':
color = 'r'
resolution = 100
u = np.linspace(0, 2 * np.pi, resolution)
v = np.linspace(0, np.pi, resolution)
r = node.params['range']
x = r * np.outer(np.cos(u), np.sin(v)) + float(x)
y = r * np.outer(np.sin(u), np.sin(v)) + float(y)
z = r * np.outer(np.ones(np.size(u)), np.cos(v)) + float(z)
node.pltCircle = self.ax.plot_surface(x, y, z, alpha=0.2, edgecolor='none', color=color)
class plot2d (object):
ax = None
@classmethod
def closePlot(self):
"""Close"""
try:
plt.close()
except:
pass
@classmethod
def text(self, node):
"""draw text"""
x = '%.2f' % float(node.params['position'][0])
y = '%.2f' % float(node.params['position'][1])
if hasattr(node.plttxt, 'xyann'): node.plttxt.xyann = (x, y) # newer MPL versions (>=1.4)
else: node.plttxt.xytext = (x, y)
@classmethod
def circle(self, node):
"""drawCircle"""
x = '%.2f' % float(node.params['position'][0])
y = '%.2f' % float(node.params['position'][1])
node.pltCircle.center = x, y
@classmethod
def graphUpdate(self, node):
"""Graph Update"""
x = '%.2f' % float(node.params['position'][0])
y = '%.2f' % float(node.params['position'][1])
if hasattr(node.plttxt, 'xyann'): node.plttxt.xyann = (x, y) # newer MPL versions (>=1.4)
else: node.plttxt.xytext = (x, y)
node.pltNode.set_data(x, y)
node.pltCircle.center = x, y
self.plotDraw()
@classmethod
def graphPause(self):
"""Pause"""
plt.pause(0.001)
@classmethod
def plotDraw(self):
"plotDraw"
plt.draw()
@classmethod
def plotScatter(self, nodesx, nodesy):
"plotScatter"
return plt.scatter(nodesx, nodesy, color='red', marker='s')
@classmethod
def plotLine2d(self, nodesx, nodesy, color='', ls='-', lw=1):
"plotLine2d"
return plt.Line2D(nodesx, nodesy, color=color, ls=ls, lw=lw)
@classmethod
def plotLineTxt(self, x, y, i):
"plotLineTxt"
title = 'Av.%s' % i
plt.text(x, y, title, ha='left', va='bottom', fontsize=8, color='g')
@classmethod
def plotLine(self, line):
"plotLine"
ax = self.ax
ax.add_line(line)
@classmethod
def instantiateGraph(self, MAX_X, MAX_Y):
"instantiateGraph"
plt.ion()
plt.title("Mininet-WiFi Graph")
self.ax = plt.subplot(111)
self.ax.set_xlabel('meters')
self.ax.set_ylabel('meters')
self.ax.set_xlim([0, MAX_X])
self.ax.set_ylim([0, MAX_Y])
self.ax.grid(True)
@classmethod
def instantiateNode(self, node):
"instantiateNode"
ax = self.ax
color = 'b'
if node.type == 'station':
color = 'g'
elif node.type == 'vehicle':
color = 'r'
node.pltNode, = ax.plot(1, 1, linestyle='', marker='.', ms=10, mfc=color)
@classmethod
def instantiateCircle(self, node):
"instantiateCircle"
ax = self.ax
color = 'b'
if node.type == 'station':
color = 'g'
elif node.type == 'vehicle':
color = 'r'
node.pltCircle = ax.add_patch(
patches.Circle((0, 0),
node.params['range'], fill=True, alpha=0.1, color=color
)
)
@classmethod
def instantiateAnnotate(self, node):
"instantiateAnnotate"
node.plttxt = self.ax.annotate(node, xy=(0, 0))
@classmethod
def updateCircleRadius(self, node):
node.pltCircle.set_radius(node.params['range'])
@classmethod
def graphInstantiateNodes(self, node):
self.instantiateAnnotate(node)
self.instantiateCircle(node)
self.instantiateNode(node)
self.graphUpdate(node)
@classmethod
def plotGraph(self, wifiNodes=[], srcConn=[], dstConn=[]):
"Plot Graph"
debug('Enabling Graph...\n')
for node in wifiNodes:
x = '%.2f' % float(node.params['position'][0])
y = '%.2f' % float(node.params['position'][1])
self.graphInstantiateNodes(node)
node.pltNode.set_data(x, y)
self.text(node)
self.circle(node)
for c in range(0, len(srcConn)):
src_x = '%.2f' % float(srcConn[c].params['position'][0])
src_y = '%.2f' % float(srcConn[c].params['position'][1])
dst_x = '%.2f' % float(dstConn[c].params['position'][0])
dst_y = '%.2f' % float(dstConn[c].params['position'][1])
line = self.plotLine2d([src_x, dst_x], \
[src_y, dst_y], 'b')
self.plotLine(line)
| [
"[email protected]"
] | |
6f82494371dfa89abc75f24106979386cb34c94a | 41777d4d219ea97b4632f4a8a31ab6c82a60772c | /kubernetes_typed/client/models/v1_scale_io_volume_source.py | b1eccb02122f904eb1df1f33838af3ec15a3c9d6 | [
"Apache-2.0"
] | permissive | gordonbondon/kubernetes-typed | 501d9c998c266386dc7f66f522f71ac3ba624d89 | 82995b008daf551a4fe11660018d9c08c69f9e6e | refs/heads/master | 2023-07-18T12:06:04.208540 | 2021-09-05T19:50:05 | 2021-09-05T19:50:05 | 319,183,135 | 24 | 2 | Apache-2.0 | 2021-09-05T19:50:06 | 2020-12-07T02:34:12 | Python | UTF-8 | Python | false | false | 584 | py | # Code generated by `typeddictgen`. DO NOT EDIT.
"""V1ScaleIOVolumeSourceDict generated type."""
from typing import TypedDict
from kubernetes_typed.client import V1LocalObjectReferenceDict
V1ScaleIOVolumeSourceDict = TypedDict(
"V1ScaleIOVolumeSourceDict",
{
"fsType": str,
"gateway": str,
"protectionDomain": str,
"readOnly": bool,
"secretRef": V1LocalObjectReferenceDict,
"sslEnabled": bool,
"storageMode": str,
"storagePool": str,
"system": str,
"volumeName": str,
},
total=False,
)
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.