blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
31b60af5a9eaecb9ec2663c0f1867332d0f02a28 | 4f328184e3e4c7ac84792f38127b4538abc1be01 | /python/re-split/main.py | fd853268133194acce224d22a6d46a05d5308ea7 | [
"Apache-2.0"
]
| permissive | shollingsworth/HackerRank | 30cd45960af5983ed697c0aaf9a6e4268dc75ef7 | 2f0e048044e643d6aa9d07c1898f3b00adf489b0 | refs/heads/master | 2021-08-17T09:08:44.532111 | 2017-11-21T02:04:09 | 2017-11-21T02:04:09 | 103,992,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import __future__
import sys
import json
def banner():
ban = '====' * 30
print("{}\nSAMPLE INP:\n{}\n{}".format(ban,ban,open(ip, 'r').read()))
print("{}\nSAMPLE OUT:\n{}\n{}".format(ban,ban,open(op, 'r').read()))
print("{}\nSTART:\n{}".format(ban,ban))
sys.stdin = open(ip, 'r')
cnt = -1
def comp(inp,ln):
outl = output_arr[ln]
if str(inp) != outl:
raise Exception("Error input output: line {}, file: {}\ngot: {} expected: {}".format(ln,op,inp,outl))
ip = "./challenge_sample_input"
op = "./challenge_sample_output"
ip = "./input01.txt"
op = "./output01.txt"
output_arr = map(str,open(op,'r').read().split('\n'))
banner()
# https://www.hackerrank.com/challenges/re-split/problem
import re
print("\n".join([i for i in re.split('[,\.]+', raw_input()) if len(i) > 0 ]))
| [
"[email protected]"
]
| |
10e2710b765ceac6d9a48e440c623599ef107024 | 98dae6deaf31bcacc078eeb1bdbdb8bd3ac3784f | /dace/transformation/dataflow/copy_to_device.py | 625179c8445725cf5df002835c54eae68478799a | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | cpenny42/dace | da9b241ea0808f1798645ab917e1484c45a3a748 | 2c7814b4f02a6870bb25ae08113c0cc3791e1178 | refs/heads/master | 2020-06-24T09:06:23.091624 | 2019-05-10T11:11:14 | 2019-05-10T11:11:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,767 | py | """ Contains classes and functions that implement copying a nested SDFG
and its dependencies to a given device. """
import dace
from copy import deepcopy as dcpy
from dace import data, properties, symbolic, types, subsets
from dace.graph import edges, graph, nodes, nxutil
from dace.transformation import pattern_matching
from math import ceil
import sympy
import networkx as nx
def change_storage(sdfg, storage):
for state in sdfg.nodes():
for node in state.nodes():
if isinstance(node, nodes.AccessNode):
node.desc(sdfg).storage = storage
if isinstance(node, nodes.NestedSDFG):
change_storage(node.sdfg, storage)
@properties.make_properties
class CopyToDevice(pattern_matching.Transformation):
""" Implements the copy-to-device transformation, which copies a nested
SDFG and its dependencies to a given device.
The transformation changes all data storage types of a nested SDFG to
the given `storage` property, and creates new arrays and copies around
the nested SDFG to that storage.
"""
_nested_sdfg = nodes.NestedSDFG("", graph.OrderedDiGraph(), set(), set())
storage = properties.Property(
dtype=types.StorageType,
desc="Nested SDFG storage",
enum=types.StorageType,
from_string=lambda x: types.StorageType[x],
default=types.StorageType.Default)
@staticmethod
def annotates_memlets():
return True
@staticmethod
def expressions():
return [nxutil.node_path_graph(CopyToDevice._nested_sdfg)]
@staticmethod
def can_be_applied(graph, candidate, expr_index, sdfg, strict=False):
return True
@staticmethod
def match_to_str(graph, candidate):
nested_sdfg = graph.nodes()[candidate[CopyToDevice._nested_sdfg]]
return nested_sdfg.label
def apply(self, sdfg):
state = sdfg.nodes()[self.state_id]
nested_sdfg = state.nodes()[self.subgraph[CopyToDevice._nested_sdfg]]
storage = self.storage
for _, edge in enumerate(state.in_edges(nested_sdfg)):
src, src_conn, dst, dst_conn, memlet = edge
dataname = memlet.data
memdata = sdfg.arrays[dataname]
if isinstance(memdata, data.Array):
new_data = sdfg.add_array(
'device_' + dataname + '_in',
memdata.dtype, [
symbolic.overapproximate(r)
for r in memlet.bounding_box_size()
],
transient=True,
storage=storage)
elif isinstance(memdata, data.Scalar):
new_data = sdfg.add_scalar(
'device_' + dataname + '_in',
memdata.dtype,
transient=True,
storage=storage)
else:
raise NotImplementedError
data_node = nodes.AccessNode('device_' + dataname + '_in')
to_data_mm = dcpy(memlet)
from_data_mm = dcpy(memlet)
from_data_mm.data = 'device_' + dataname + '_in'
offset = []
for ind, r in enumerate(memlet.subset):
offset.append(r[0])
if isinstance(memlet.subset[ind], tuple):
begin = memlet.subset[ind][0] - r[0]
end = memlet.subset[ind][1] - r[0]
step = memlet.subset[ind][2]
from_data_mm.subset[ind] = (begin, end, step)
else:
from_data_mm.subset[ind] -= r[0]
state.remove_edge(edge)
state.add_edge(src, src_conn, data_node, None, to_data_mm)
state.add_edge(data_node, None, dst, dst_conn, from_data_mm)
for _, edge in enumerate(state.out_edges(nested_sdfg)):
src, src_conn, dst, dst_conn, memlet = edge
dataname = memlet.data
memdata = sdfg.arrays[dataname]
if isinstance(memdata, data.Array):
new_data = data.Array(
'device_' + dataname + '_out',
memdata.dtype, [
symbolic.overapproximate(r)
for r in memlet.bounding_box_size()
],
transient=True,
storage=storage)
elif isinstance(memdata, data.Scalar):
new_data = sdfg.add_scalar(
'device_' + dataname + '_out',
memdata.dtype,
transient=True,
storage=storage)
else:
raise NotImplementedError
data_node = nodes.AccessNode('device_' + dataname + '_out')
to_data_mm = dcpy(memlet)
from_data_mm = dcpy(memlet)
to_data_mm.data = 'device_' + dataname + '_out'
offset = []
for ind, r in enumerate(memlet.subset):
offset.append(r[0])
if isinstance(memlet.subset[ind], tuple):
begin = memlet.subset[ind][0] - r[0]
end = memlet.subset[ind][1] - r[0]
step = memlet.subset[ind][2]
to_data_mm.subset[ind] = (begin, end, step)
else:
to_data_mm.subset[ind] -= r[0]
state.remove_edge(edge)
state.add_edge(src, src_conn, data_node, None, to_data_mm)
state.add_edge(data_node, None, dst, dst_conn, from_data_mm)
# Change storage for all data inside nested SDFG to device.
change_storage(nested_sdfg.sdfg, storage)
pattern_matching.Transformation.register_pattern(CopyToDevice)
| [
"[email protected]"
]
| |
9b68a0c51829a961eeb83baac376ac6a7686bab3 | 73305ddcc6dc9775b1e9a71506e2f3c74f678edc | /starthinker/util/dv_targeting.py | f4cc31a6aba0e9c3aa913ba52dc4d976b1848836 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
]
| permissive | google/starthinker | ef359557da4140275a8524d0d813eecf022ece9e | b596df09c52511e2e0c0987f6245aa4607190dd0 | refs/heads/master | 2023-08-25T21:16:45.578012 | 2023-07-17T22:19:18 | 2023-07-17T22:20:10 | 123,017,995 | 167 | 64 | Apache-2.0 | 2023-08-02T01:24:51 | 2018-02-26T19:15:09 | Python | UTF-8 | Python | false | false | 46,350 | py | ###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
import re
import json
from googleapiclient.errors import HttpError
from starthinker.util.google_api import API_DV360
RE_URL = re.compile(r'^.*://')
class Assigned_Targeting:
def _check_settable(func):
def wrapper(self, *args, **kwargs):
operation = func.__name__.replace('add_', '').replace('delete_', '')
if self.lineitem:
return func(self, *args, **kwargs)
elif self.partner:
if operation == 'channel':
return func(self, *args, **kwargs)
elif self.advertiser:
if operation in ('channel', 'content_label', 'sensitive_category'):
return func(self, *args, **kwargs)
return wrapper
def __init__(self, config, auth, partnerId=None, advertiserId=None, lineItemId=None):
self.config = config
self.auth = auth
self.partner = partnerId
self.advertiser = advertiserId
self.lineitem = lineItemId
self.channels = { 'delete':[], 'add':[] }
self.options_cache = {}
self.assigneds_cache = {}
self.add_cache = set()
self.delete_cache = set()
self.exists_cache = {}
self.audience_cache = None
self.delete_requests = {}
self.create_requests = {}
self.warnings = []
def _url_domain(self, url):
return '.'.join(RE_URL.sub('', url).split('.')[-2:])
def _delete(self, targeting_type, *args):
try:
if not self.already_deleted(targeting_type, *args):
targeting_id = self._already_exists(targeting_type, *args)
if targeting_id:
self.delete_requests.setdefault(targeting_type, []).append(targeting_id)
else:
self.warnings.append('Already deleted at this layer %s.' % targeting_type)
except HttpError as e:
self.warnings.append('Targeting Error: %s' % str(e))
def _get_id(self, options, key, *args):
for option in options:
if option.get('inheritance', 'NOT_INHERITED') != 'NOT_INHERITED': continue
if option['targetingType'] == 'TARGETING_TYPE_CHANNEL':
if option['channelDetails']['channelId'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_APP_CATEGORY':
if option['appCategoryDetails']['displayName'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_APP':
if option['appDetails']['displayName'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_URL':
if option['urlDetails']['url'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_DAY_AND_TIME':
if (option['dayAndTimeDetails']['dayOfWeek'] == args[0]
and option['dayAndTimeDetails']['startHour'] == args[1]
and option['dayAndTimeDetails']['endHour'] == args[2]
and option['dayAndTimeDetails']['timeZoneResolution'] == args[3]):
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_AGE_RANGE':
if option['ageRangeDetails']['ageRange'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_REGIONAL_LOCATION_LIST':
if option['regionalLocationListDetails']['regionalLocationListId'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_PROXIMITY_LOCATION_LIST':
if option['proximityLocationListDetails']['proximityLocationListId'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_GENDER':
if option['genderDetails']['gender'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_VIDEO_PLAYER_SIZE':
if option['videoPlayerSizeDetails']['videoPlayerSize'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_USER_REWARDED_CONTENT':
if option['userRewardedContentDetails']['userRewardedContent'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_PARENTAL_STATUS':
if option['parentalStatusDetails']['parentalStatus'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_CONTENT_INSTREAM_POSITION':
if not args: # single value on lookup
return option[key]
elif option['contentInstreamPositionDetails']['contentInstreamPosition'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_CONTENT_OUTSTREAM_POSITION':
if not args: # single value on lookup
return option[key]
elif option['contentOutstreamPositionDetails']['contentOutstreamPosition'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_DEVICE_TYPE':
if option['deviceTypeDetails']['deviceType'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_BROWSER':
if option['browserDetails']['displayName'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_HOUSEHOLD_INCOME':
if option['householdIncomeDetails']['householdIncome'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_ON_SCREEN_POSITION':
if option['onScreenPositionDetails']['onScreenPosition'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_CARRIER_AND_ISP':
if option['carrierAndIspDetails']['displayName'] == args[0] and option['carrierAndIspDetails']['type'] == args[1]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_KEYWORD':
if option['keywordDetails']['keyword'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_NEGATIVE_KEYWORD_LIST':
if option['negativeKeywordListDetails']['negativeKeywordListId'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_OPERATING_SYSTEM':
if option['operatingSystemDetails']['displayName'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_DEVICE_MAKE_MODEL':
if option['deviceMakeModelDetails']['displayName'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_ENVIRONMENT':
if option['environmentDetails']['environment'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_INVENTORY_SOURCE':
if option['inventorySourceDetails']['inventorySourceId'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_CATEGORY': # fix add check for negative flag
if option['categoryDetails']['displayName'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_VIEWABILITY':
if not args: # single value on lookup
return option[key]
elif option['viewabilityDetails']['viewability'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_AUTHORIZED_SELLER_STATUS':
if not args: # single value on lookup
return option[key]
elif option['authorizedSellerStatusDetails']['authorizedSellerStatus'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_LANGUAGE':
if option['languageDetails']['displayName'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_GEO_REGION':
if (option['geoRegionDetails']['displayName'] == args[0]
and option['geoRegionDetails']['geoRegionType'] == args[1]):
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_INVENTORY_SOURCE':
if option['inventorySourceGroupDetails']['inventorySourceGroupId'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION':
if 'digitalContentLabelExclusionDetails' in option:
if option['digitalContentLabelExclusionDetails']['contentRatingTier'] == args[0]:
return option[key]
else:
if option['digitalContentLabelDetails']['contentRatingTier'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION':
if 'sensitiveCategoryExclusionDetails' in option and option['sensitiveCategoryExclusionDetails']['sensitiveCategory'] == args[0]:
return option[key]
elif 'sensitiveCategoryDetails' in option and option['sensitiveCategoryDetails']['sensitiveCategory'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_EXCHANGE':
if option['exchangeDetails'].get('exchange') == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_SUB_EXCHANGE':
if option['subExchangeDetails']['displayName'] == args[0]:
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_AUDIENCE_GROUP':
return option[key]
elif option['targetingType'] == 'TARGETING_TYPE_THIRD_PARTY_VERIFIER':
raise NotImplementedError
return None
def already_added(self, targeting_type, *args):
token = '%s%s' % (targeting_type, json.dumps(args))
if token in self.add_cache:
return True
else:
self.add_cache.add(token)
return False
def already_deleted(self, targeting_type, *args):
token = '%s%s' % (targeting_type, json.dumps(args))
if token in self.delete_cache:
return True
else:
self.delete_cache.add(token)
return False
def _already_exists(self, targeting_type, *args):
token = '%s%s' % (targeting_type, json.dumps(args))
if token not in self.exists_cache:
self.exists_cache[token] = self.get_assigned_id(targeting_type, *args)
return self.exists_cache[token]
def already_exists(self, targeting_type, *args):
token = '%s%s' % (targeting_type, json.dumps(args))
if token not in self.exists_cache:
self.exists_cache[token] = self.get_assigned_id(targeting_type, *args)
if self.exists_cache[token]:
self.warnings.append('Already set %s.' % targeting_type)
return self.exists_cache[token]
def get_option_list(self, targeting_type):
if targeting_type not in self.options_cache:
self.options_cache[targeting_type] = list(API_DV360(
self.config, self.auth,
iterate=True
).targetingTypes().targetingOptions().list(
advertiserId=str(self.advertiser),
targetingType=targeting_type
).execute())
return self.options_cache[targeting_type]
def get_option_id(self, targeting_type, *args):
return self._get_id(
self.get_option_list(targeting_type),
'targetingOptionId',
*args
)
def get_assigned_list(self, targeting_type):
if targeting_type not in self.assigneds_cache:
if self.lineitem:
self.assigneds_cache[targeting_type] = list(API_DV360(
self.config, self.auth,
iterate=True
).advertisers().lineItems().targetingTypes().assignedTargetingOptions().list(
lineItemId=str(self.lineitem),
advertiserId=str(self.advertiser),
targetingType=targeting_type
).execute())
elif self.partner:
self.assigneds_cache[targeting_type] = list(API_DV360(
self.config, self.auth,
iterate=True
).partners().targetingTypes().assignedTargetingOptions().list(
partnerId=str(self.partner),
targetingType=targeting_type
).execute())
elif self.advertiser:
self.assigneds_cache[targeting_type] = list(API_DV360(
self.config, self.auth,
iterate=True
).advertisers().targetingTypes().assignedTargetingOptions().list(
advertiserId=str(self.advertiser),
targetingType=targeting_type
).execute())
return self.assigneds_cache[targeting_type]
def get_assigned_id(self, targeting_type, *args):
if targeting_type == 'TARGETING_TYPE_AUDIENCE_GROUP': return 'audienceGroup'
else:
return self._get_id(
self.get_assigned_list(targeting_type),
'assignedTargetingOptionId',
*args
)
def get_assigned_audience(self, audience_type):
if self.audience_cache is None:
self.audience_cache = (self.get_assigned_list('TARGETING_TYPE_AUDIENCE_GROUP') or [{}])[0]
return self.audience_cache.get(audience_type, {})
@_check_settable
def add_authorized_seller(self, authorizedSellerStatus):
if not self.already_added('TARGETING_TYPE_AUTHORIZED_SELLER_STATUS'):
self.delete_authorized_seller()
self.create_requests.setdefault('TARGETING_TYPE_AUTHORIZED_SELLER_STATUS', [])
self.create_requests['TARGETING_TYPE_AUTHORIZED_SELLER_STATUS'].append({ 'authorizedSellerStatusDetails':{
'targetingOptionId': self.get_option_id('TARGETING_TYPE_AUTHORIZED_SELLER_STATUS', authorizedSellerStatus)
}})
@_check_settable
def delete_authorized_seller(self, authorizedSellerStatus):
self._delete('TARGETING_TYPE_AUTHORIZED_SELLER_STATUS', authorizedSellerStatus)
@_check_settable
def add_user_rewarded_content(self, userRewardedContent):
if not self.already_added('TARGETING_TYPE_USER_REWARDED_CONTENT'):
self.delete_user_rewarded_content()
self.create_requests.setdefault('TARGETING_TYPE_USER_REWARDED_CONTENT', [])
self.create_requests['TARGETING_TYPE_USER_REWARDED_CONTENT'].append({ 'userRewardedContentDetails':{
'targetingOptionId': self.get_option_id('TARGETING_TYPE_USER_REWARDED_CONTENT', userRewardedContent)
}})
@_check_settable
def delete_user_rewarded_content(self, userRewardedContent):
self._delete('TARGETING_TYPE_USER_REWARDED_CONTENT', userRewardedContent)
@_check_settable
def add_exchange(self, exchange):
if not self.already_added('TARGETING_TYPE_EXCHANGE', exchange):
self.delete_exchange(exchange)
self.create_requests.setdefault('TARGETING_TYPE_EXCHANGE', [])
self.create_requests['TARGETING_TYPE_EXCHANGE'].append({ 'exchangeDetails':{
'targetingOptionId': self.get_option_id('TARGETING_TYPE_EXCHANGE', exchange)
}})
@_check_settable
def delete_exchange(self, exchange):
self._delete('TARGETING_TYPE_EXCHANGE', exchange)
@_check_settable
def add_sub_exchange(self, subExchange):
if not self.already_added('TARGETING_TYPE_SUB_EXCHANGE', subExchange):
if not self.already_exists('TARGETING_TYPE_SUB_EXCHANGE', subExchange):
self.delete_sub_exchange(subExchange)
self.create_requests.setdefault('TARGETING_TYPE_SUB_EXCHANGE', [])
self.create_requests['TARGETING_TYPE_SUB_EXCHANGE'].append({ 'subExchangeDetails':{
'targetingOptionId': self.get_option_id('TARGETING_TYPE_SUB_EXCHANGE', subExchange)
}})
@_check_settable
def delete_sub_exchange(self, subExchange):
self._delete('TARGETING_TYPE_SUB_EXCHANGE', subExchange)
@_check_settable
def add_channel(self, channelId, negative):
if not self.already_added('TARGETING_TYPE_CHANNEL', channelId):
if not self.already_exists('TARGETING_TYPE_CHANNEL', channelId, negative):
self.delete_channel(channelId)
self.create_requests.setdefault('TARGETING_TYPE_CHANNEL', [])
self.create_requests['TARGETING_TYPE_CHANNEL'].append({ 'channelDetails':{
'channelId': channelId,
'negative':negative
}})
@_check_settable
def delete_channel(self, channelId):
self._delete('TARGETING_TYPE_CHANNEL', channelId)
@_check_settable
def add_inventory_source(self, inventorySourceId):
if not self.already_added('TARGETING_TYPE_INVENTORY_SOURCE', inventorySourceId):
if not self.already_exists('TARGETING_TYPE_INVENTORY_SOURCE', inventorySourceId):
self.delete_inventory_source(inventorySourceId)
self.create_requests.setdefault('TARGETING_TYPE_INVENTORY_SOURCE', [])
self.create_requests['TARGETING_TYPE_INVENTORY_SOURCE'].append({ 'inventorySourceDetails':{
'inventorySourceId': inventorySourceId,
}})
@_check_settable
def delete_inventory_source(self, inventorySourceId):
self._delete('TARGETING_TYPE_INVENTORY_SOURCE', inventorySourceId)
@_check_settable
def add_inventory_group(self, inventorySourceGroupId):
if not self.already_added('TARGETING_TYPE_INVENTORY_SOURCE_GROUP', inventorySourceGroupId):
if not self.already_exists('TARGETING_TYPE_INVENTORY_SOURCE_GROUP', inventorySourceGroupId):
self.delete_inventory_source_group(inventorySourceGroupId)
self.create_requests.setdefault('TARGETING_TYPE_INVENTORY_SOURCE_GROUP', [])
self.create_requests['TARGETING_TYPE_INVENTORY_SOURCE_GROUP'].append({ 'inventorySourceGroupDetails':{
'inventorySourceGroupId': inventorySourceGroupId
}})
@_check_settable
def delete_inventory_group(self, inventorySourceGroupId):
self._delete('TARGETING_TYPE_INVENTORY_SOURCE_GROUP', inventorySourceGroupId)
@_check_settable
def add_url(self, url, negative):
url = self._url_domain(url)
if not self.already_added('TARGETING_TYPE_URL', url):
if not self.already_exists('TARGETING_TYPE_URL', url, negative):
self.delete_url(url)
self.create_requests.setdefault('TARGETING_TYPE_URL', [])
self.create_requests['TARGETING_TYPE_URL'].append({ 'urlDetails':{
'url': url,
'negative':negative
}})
@_check_settable
def delete_url(self, url):
url = self._url_domain(url)
self._delete('TARGETING_TYPE_URL', url)
@_check_settable
def add_app(self, app, negative):
if not self.already_added('TARGETING_TYPE_APP', app):
if not self.already_exists('TARGETING_TYPE_APP', app):
self.delete_app(app)
self.create_requests.setdefault('TARGETING_TYPE_APP', [])
self.create_requests['TARGETING_TYPE_APP'].append({ 'appDetails':{
'appId': app,
'negative':negative
}})
@_check_settable
def delete_app(self, app):
self._delete('TARGETING_TYPE_APP', app)
@_check_settable
def add_app_category(self, displayName, negative):
if not self.already_added('TARGETING_TYPE_APP_CATEGORY', displayName):
if not self.already_exists('TARGETING_TYPE_APP_CATEGORY', displayName, negative):
self.delete_app_category(displayName)
self.create_requests.setdefault('TARGETING_TYPE_APP_CATEGORY', [])
self.create_requests['TARGETING_TYPE_APP_CATEGORY'].append({ 'appCategoryDetails':{
'targetingOptionId': self.get_option_id('TARGETING_TYPE_APP_CATEGORY', displayName),
'negative':negative
}})
@_check_settable
def delete_app_category(self, displayName):
self._delete('TARGETING_TYPE_APP_CATEGORY', displayName)
@_check_settable
def add_content_label(self, contentRatingTier):
if not self.already_added('TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION', contentRatingTier):
if not self.already_exists('TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION', contentRatingTier):
self.create_requests.setdefault('TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION', [])
self.create_requests['TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION'].append({ 'digitalContentLabelExclusionDetails':{
'excludedTargetingOptionId': self.get_option_id('TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION', contentRatingTier)
}})
@_check_settable
def delete_content_label(self, contentRatingTier):
self._delete('TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION', contentRatingTier)
@_check_settable
def add_sensitive_category(self, sensitiveCategory):
if not self.already_added('TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION', sensitiveCategory):
if not self.already_exists('TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION', sensitiveCategory):
self.create_requests.setdefault('TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION', [])
self.create_requests['TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION'].append({ 'sensitiveCategoryExclusionDetails':{
'excludedTargetingOptionId': self.get_option_id('TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION', sensitiveCategory)
}})
@_check_settable
def delete_sensitive_category(self, sensitiveCategory):
self._delete('TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION', sensitiveCategory)
@_check_settable
def add_negative_keyword_list(self, negativeKeywordListId):
if not self.already_added('TARGETING_TYPE_NEGATIVE_KEYWORD_LIST', negativeKeywordListId):
if not self.already_exists('TARGETING_TYPE_NEGATIVE_KEYWORD_LIST', negativeKeywordListId):
self.create_requests.setdefault('TARGETING_TYPE_NEGATIVE_KEYWORD_LIST', [])
self.create_requests['TARGETING_TYPE_NEGATIVE_KEYWORD_LIST'].append({ 'negativeKeywordListDetails':{
'negativeKeywordListId': negativeKeywordListId
}})
@_check_settable
def delete_negative_keyword_list(self, negativeKeywordListId):
self._delete('TARGETING_TYPE_NEGATIVE_KEYWORD_LIST', negativeKeywordListId)
@_check_settable
def add_category(self, displayName, negative):
if not self.already_added('TARGETING_TYPE_CATEGORY', displayName):
if not self.already_exists('TARGETING_TYPE_CATEGORY', displayName, negative):
self.create_requests.setdefault('TARGETING_TYPE_CATEGORY', [])
self.create_requests['TARGETING_TYPE_CATEGORY'].append({ 'categoryDetails':{
'targetingOptionId': self.get_option_id('TARGETING_TYPE_CATEGORY', displayName),
'negative': negative
}})
@_check_settable
def delete_category(self, displayName):
self._delete('TARGETING_TYPE_CATEGORY', displayName)
@_check_settable
def add_keyword(self, keyword, negative):
if not self.already_added('TARGETING_TYPE_KEYWORD', keyword):
if not self.already_exists('TARGETING_TYPE_KEYWORD', keyword, negative):
self.create_requests.setdefault('TARGETING_TYPE_KEYWORD', [])
self.create_requests['TARGETING_TYPE_KEYWORD'].append({ 'keywordDetails':{
'keyword': keyword,
'negative': negative
}})
@_check_settable
def delete_keyword(self, keyword):
self._delete('TARGETING_TYPE_KEYWORD', keyword)
@_check_settable
def add_age_range(self, ageRange):
if not self.already_added('TARGETING_TYPE_AGE_RANGE', ageRange):
if not self.already_exists('TARGETING_TYPE_AGE_RANGE', ageRange):
self.create_requests.setdefault('TARGETING_TYPE_AGE_RANGE', [])
self.create_requests['TARGETING_TYPE_AGE_RANGE'].append({ 'ageRangeDetails':{
'targetingOptionId': self.get_option_id('TARGETING_TYPE_AGE_RANGE', ageRange)
}})
@_check_settable
def delete_age_range(self, ageRange):
self._delete('TARGETING_TYPE_AGE_RANGE', ageRange)
@_check_settable
def add_gender(self, gender):
if not self.already_added('TARGETING_TYPE_GENDER', gender):
if not self.already_exists('TARGETING_TYPE_GENDER', gender):
self.create_requests.setdefault('TARGETING_TYPE_GENDER', [])
self.create_requests['TARGETING_TYPE_GENDER'].append({ 'genderDetails':{
'targetingOptionId': self.get_option_id('TARGETING_TYPE_GENDER', gender)
}})
@_check_settable
def delete_gender(self, gender):
self._delete('TARGETING_TYPE_GENDER', gender)
@_check_settable
def add_parental_status(self, parentalStatus):
if not self.already_added('TARGETING_TYPE_PARENTAL_STATUS', parentalStatus):
if not self.already_exists('TARGETING_TYPE_PARENTAL_STATUS', parentalStatus):
self.create_requests.setdefault('TARGETING_TYPE_PARENTAL_STATUS', [])
self.create_requests['TARGETING_TYPE_PARENTAL_STATUS'].append({ 'parentalStatusDetails':{
'targetingOptionId': self.get_option_id('TARGETING_TYPE_PARENTAL_STATUS', parentalStatus)
}})
@_check_settable
def delete_parental_status(self, parentalStatus):
self._delete('TARGETING_TYPE_PARENTAL_STATUS', parentalStatus)
@_check_settable
def add_household_income(self, householdIncome):
if not self.already_added('TARGETING_TYPE_HOUSEHOLD_INCOME', householdIncome):
if not self.already_exists('TARGETING_TYPE_HOUSEHOLD_INCOME', householdIncome):
self.create_requests.setdefault('TARGETING_TYPE_HOUSEHOLD_INCOME', [])
self.create_requests['TARGETING_TYPE_HOUSEHOLD_INCOME'].append({ 'householdIncomeDetails':{
'targetingOptionId': self.get_option_id('TARGETING_TYPE_HOUSEHOLD_INCOME', householdIncome)
}})
@_check_settable
def delete_household_income(self, householdIncome):
self._delete('TARGETING_TYPE_HOUSEHOLD_INCOME', householdIncome)
@_check_settable
def add_language(self, displayName, negative):
displayName = displayName.title()
if not self.already_added('TARGETING_TYPE_LANGUAGE', displayName):
if not self.already_exists('TARGETING_TYPE_LANGUAGE', displayName):
self.create_requests.setdefault('TARGETING_TYPE_LANGUAGE', [])
self.create_requests['TARGETING_TYPE_LANGUAGE'].append({ 'languageDetails':{
'targetingOptionId': self.get_option_id('TARGETING_TYPE_LANGUAGE', displayName),
'negative':negative
}})
@_check_settable
def delete_language(self, displayName):
displayName = displayName.title()
self._delete('TARGETING_TYPE_LANGUAGE', displayName)
@_check_settable
def add_included_1p_and_3p_audience(self, firstAndThirdPartyAudienceId, recency, group):
audiences = self.get_assigned_audience('audienceGroupDetails')
audiences.setdefault('includedFirstAndThirdPartyAudienceGroups', [])
audience = { "firstAndThirdPartyAudienceId": firstAndThirdPartyAudienceId, "recency": recency }
group = min(max(group, 1), 10)
while len(audiences['includedFirstAndThirdPartyAudienceGroups']) < group:
audiences['includedFirstAndThirdPartyAudienceGroups'].append({'settings':[]})
if audience not in audiences['includedFirstAndThirdPartyAudienceGroups'][group - 1]['settings']:
audiences['includedFirstAndThirdPartyAudienceGroups'][group - 1]['settings'].append(audience)
self.create_requests['TARGETING_TYPE_AUDIENCE_GROUP'] = { 'audienceGroupDetails':audiences }
@_check_settable
def delete_included_1p_and_3p_audience(self, firstAndThirdPartyAudienceId, recency, group):
audiences = self.get_assigned_audience('audienceGroupDetails')
audiences.setdefault('includedFirstAndThirdPartyAudienceGroups', [])
audience = { "firstAndThirdPartyAudienceId": firstAndThirdPartyAudienceId, "recency": recency }
group = min(max(group, 1), 10)
while len(audiences['includedFirstAndThirdPartyAudienceGroups']) < group:
audiences['includedFirstAndThirdPartyAudienceGroups'].append({'settings':[]})
if audience in audiences['includedFirstAndThirdPartyAudienceGroups'][group - 1]['settings']:
audiences['includedFirstAndThirdPartyAudienceGroups'][group - 1]['settings'].remove(audience)
self.create_requests['TARGETING_TYPE_AUDIENCE_GROUP'] = { 'audienceGroupDetails':audiences }
@_check_settable
def add_excluded_1p_and_3p_audience(self, firstAndThirdPartyAudienceId, recency):
audiences = self.get_assigned_audience('audienceGroupDetails')
audiences.setdefault('excludedFirstAndThirdPartyAudienceGroup', { 'settings':[] })
audience = { "firstAndThirdPartyAudienceId": firstAndThirdPartyAudienceId, "recency": recency }
if audience not in audiences['excludedFirstAndThirdPartyAudienceGroup']['settings']:
audiences['excludedFirstAndThirdPartyAudienceGroup']['settings'].append(audience)
self.create_requests['TARGETING_TYPE_AUDIENCE_GROUP'] = { 'audienceGroupDetails':audiences }
self._delete('TARGETING_TYPE_AUDIENCE_GROUP')
@_check_settable
def delete_excluded_1p_and_3p_audience(self, firstAndThirdPartyAudienceId, recency):
audiences = self.get_assigned_audience('audienceGroupDetails')
audiences.setdefault('excludedFirstAndThirdPartyAudienceGroup', { 'settings':[] })
audience = { "firstAndThirdPartyAudienceId": firstAndThirdPartyAudienceId, "recency": recency }
if audience in audiences['excludedFirstAndThirdPartyAudienceGroup']['settings']:
audiences['excludedFirstAndThirdPartyAudienceGroup']['settings'].remove(audience)
self.create_requests['TARGETING_TYPE_AUDIENCE_GROUP'] = { 'audienceGroupDetails':audiences }
self._delete('TARGETING_TYPE_AUDIENCE_GROUP')
@_check_settable
def add_included_google_audience(self, googleAudienceId):
audiences = self.get_assigned_audience('audienceGroupDetails')
audiences.setdefault('includedGoogleAudienceGroup', { 'settings':[] })
audience = { 'googleAudienceId':googleAudienceId }
if audience not in audiences['includedGoogleAudienceGroup']['settings']:
audiences['includedGoogleAudienceGroup']['settings'].append(audience)
self.create_requests['TARGETING_TYPE_AUDIENCE_GROUP'] = { 'audienceGroupDetails':audiences }
self._delete('TARGETING_TYPE_AUDIENCE_GROUP')
@_check_settable
def delete_included_google_audience(self, googleAudienceId):
audiences = self.get_assigned_audience('audienceGroupDetails')
audiences.setdefault('includedGoogleAudienceGroup', { 'settings':[] })
audience = { 'googleAudienceId':googleAudienceId }
if audience in audiences['includedGoogleAudienceGroup']['settings']:
audiences['includedGoogleAudienceGroup']['settings'].remove(audience)
self.create_requests['TARGETING_TYPE_AUDIENCE_GROUP'] = { 'audienceGroupDetails':audiences }
self._delete('TARGETING_TYPE_AUDIENCE_GROUP')
@_check_settable
def add_excluded_google_audience(self, googleAudienceId):
audiences = self.get_assigned_audience('audienceGroupDetails')
audiences.setdefault('excludedGoogleAudienceGroup', { 'settings':[] })
audience = { 'googleAudienceId':googleAudienceId }
if audience not in audiences['excludedGoogleAudienceGroup']['settings']:
audiences['excludedGoogleAudienceGroup']['settings'].append(audience)
self.create_requests['TARGETING_TYPE_AUDIENCE_GROUP'] = { 'audienceGroupDetails':audiences }
self._delete('TARGETING_TYPE_AUDIENCE_GROUP')
@_check_settable
def delete_excluded_google_audience(self, googleAudienceId):
audiences = self.get_assigned_audience('audienceGroupDetails')
audiences.setdefault('excludedGoogleAudienceGroup', { 'settings':[] })
audience = { 'googleAudienceId':googleAudienceId }
if audience in audiences['excludedGoogleAudienceGroup']['settings']:
audiences['excludedGoogleAudienceGroup']['settings'].remove(audience)
self.create_requests['TARGETING_TYPE_AUDIENCE_GROUP'] = { 'audienceGroupDetails':audiences }
self._delete('TARGETING_TYPE_AUDIENCE_GROUP')
@_check_settable
def add_included_custom_list(self, customListId):
audiences = self.get_assigned_audience('audienceGroupDetails')
audiences.setdefault('includedCustomListGroup', { 'settings':[] })
audience = { 'customListId':customListId }
if audience not in audiences['includedCustomListGroup']['settings']:
audiences['includedCustomListGroup']['settings'].append(audience)
self.create_requests['TARGETING_TYPE_AUDIENCE_GROUP'] = { 'audienceGroupDetails':audiences }
self._delete('TARGETING_TYPE_AUDIENCE_GROUP')
@_check_settable
def delete_included_custom_list(self, customListId):
audiences = self.get_assigned_audience('audienceGroupDetails')
audiences.setdefault('includedCustomListGroup', { 'settings':[] })
audience = { 'customListId':customListId }
if audience in audiences['includedCustomListGroup']['settings']:
audiences['includedCustomListGroup']['settings'].remove(audience)
self.create_requests['TARGETING_TYPE_AUDIENCE_GROUP'] = { 'audienceGroupDetails':audiences }
self._delete('TARGETING_TYPE_AUDIENCE_GROUP')
@_check_settable
def add_included_combined_audience(self, combinedAudienceId):
audiences = self.get_assigned_audience('audienceGroupDetails')
audiences.setdefault('includedCombinedAudienceGroup', { 'settings':[] })
audience = { 'combinedAudienceId':combinedAudienceId }
if audience not in audiences['includedCombinedAudienceGroup']['settings']:
audiences['includedCombinedAudienceGroup']['settings'].append(audience)
self.create_requests['TARGETING_TYPE_AUDIENCE_GROUP'] = { 'audienceGroupDetails':audiences }
self._delete('TARGETING_TYPE_AUDIENCE_GROUP')
@_check_settable
def delete_included_combined_audience(self, combinedAudienceId):
audiences = self.get_assigned_audience('audienceGroupDetails')
audiences.setdefault('includedCombinedAudienceGroup', { 'settings':[] })
audience = { 'combinedAudienceId':combinedAudienceId }
if audience in audiences['includedCombinedAudienceGroup']['settings']:
audiences['includedCombinedAudienceGroup']['settings'].remove(audience)
self.create_requests['TARGETING_TYPE_AUDIENCE_GROUP'] = { 'audienceGroupDetails':audiences }
self._delete('TARGETING_TYPE_AUDIENCE_GROUP')
@_check_settable
def add_device_type(self, deviceType):
if not self.already_added('TARGETING_TYPE_DEVICE_TYPE', deviceType):
if not self.already_exists('TARGETING_TYPE_DEVICE_TYPE', deviceType):
self.create_requests.setdefault('TARGETING_TYPE_DEVICE_TYPE', [])
self.create_requests['TARGETING_TYPE_DEVICE_TYPE'].append({ 'deviceTypeDetails':{
'targetingOptionId': self.get_option_id('TARGETING_TYPE_DEVICE_TYPE', deviceType)
}})
@_check_settable
def delete_device_type(self, deviceType):
self._delete('TARGETING_TYPE_DEVICE_TYPE', deviceType)
@_check_settable
def add_make_model(self, displayName, negative):
if not self.already_added('TARGETING_TYPE_DEVICE_MAKE_MODEL', displayName):
if not self.already_exists('TARGETING_TYPE_DEVICE_MAKE_MODEL', displayName, negative): # fix delete of negative
self.create_requests.setdefault('TARGETING_TYPE_DEVICE_MAKE_MODEL', [])
self.create_requests['TARGETING_TYPE_DEVICE_MAKE_MODEL'].append({ 'deviceMakeModelDetails':{
'targetingOptionId': self.get_option_id('TARGETING_TYPE_DEVICE_MAKE_MODEL', displayName),
'negative':negative
}})
@_check_settable
def delete_make_model(self, displayName):
self._delete('TARGETING_TYPE_DEVICE_MAKE_MODEL', displayName)
@_check_settable
def add_operating_system(self, displayName, negative):
if not self.already_added('TARGETING_TYPE_OPERATING_SYSTEM', displayName):
if not self.already_added('TARGETING_TYPE_OPERATING_SYSTEM', displayName, negative): # fix delete of negative
self.create_requests.setdefault('TARGETING_TYPE_OPERATING_SYSTEM', [])
self.create_requests['TARGETING_TYPE_OPERATING_SYSTEM'].append({ 'operatingSystemDetails':{
'targetingOptionId': self.get_option_id('TARGETING_TYPE_OPERATING_SYSTEM', displayName),
'negative':negative
}})
@_check_settable
def delete_operating_system(self, displayName):
self._delete('TARGETING_TYPE_OPERATING_SYSTEM', displayName)
@_check_settable
def add_browser(self, displayName, negative):
if not self.already_added('TARGETING_TYPE_BROWSER', displayName):
if not self.already_exists('TARGETING_TYPE_BROWSER', displayName, negative): # fix delete of negative
self.create_requests.setdefault('TARGETING_TYPE_BROWSER', [])
self.create_requests['TARGETING_TYPE_BROWSER'].append({ 'browserDetails':{
'targetingOptionId': self.get_option_id('TARGETING_TYPE_BROWSER', displayName),
'negative':negative
}})
@_check_settable
def delete_browser(self, displayName):
self._delete('TARGETING_TYPE_BROWSER', displayName)
@_check_settable
def add_environment(self, environment):
if not self.already_added('TARGETING_TYPE_ENVIRONMENT', environment):
if not self.already_exists('TARGETING_TYPE_ENVIRONMENT', environment):
self.create_requests.setdefault('TARGETING_TYPE_ENVIRONMENT', [])
self.create_requests['TARGETING_TYPE_ENVIRONMENT'].append({ 'environmentDetails':{
'targetingOptionId': self.get_option_id('TARGETING_TYPE_ENVIRONMENT', environment),
}})
@_check_settable
def delete_environment(self, environment):
self._delete('TARGETING_TYPE_ENVIRONMENT', environment)
@_check_settable
def add_carrier_and_isp(self, displayName, negative):
lookupName, lookupType = displayName.rsplit(' - ',1)
lookupType = 'CARRIER_AND_ISP_TYPE_%s' % lookupType
if not self.already_added('TARGETING_TYPE_CARRIER_AND_ISP', displayName):
if not self.already_exists('TARGETING_TYPE_CARRIER_AND_ISP', displayName, negative): # fix delete of negative
self.create_requests.setdefault('TARGETING_TYPE_CARRIER_AND_ISP', [])
self.create_requests['TARGETING_TYPE_CARRIER_AND_ISP'].append({ 'carrierAndIspDetails':{
'targetingOptionId': self.get_option_id('TARGETING_TYPE_CARRIER_AND_ISP', lookupName, lookupType),
'negative':negative
}})
@_check_settable
def delete_carrier_and_isp(self, displayName):
lookupName, lookupType = displayName.rsplit(' - ',1)
lookupType = 'CARRIER_AND_ISP_TYPE_%s' % lookupType
self._delete('TARGETING_TYPE_CARRIER_AND_ISP', lookupName, lookupType)
@_check_settable
def add_day_and_time(self, dayOfWeek, startHour, endHour, timeZoneResolution):
if not self.already_added('TARGETING_TYPE_DAY_AND_TIME', dayOfWeek, startHour, endHour, timeZoneResolution):
if not self.already_exists('TARGETING_TYPE_DAY_AND_TIME', dayOfWeek, startHour, endHour, timeZoneResolution):
self.create_requests.setdefault('TARGETING_TYPE_DAY_AND_TIME', [])
self.create_requests['TARGETING_TYPE_DAY_AND_TIME'].append({ 'dayAndTimeDetails':{
'dayOfWeek': dayOfWeek,
'startHour': startHour,
'endHour': endHour,
'timeZoneResolution': timeZoneResolution
}})
@_check_settable
def delete_day_and_time(self, dayOfWeek, startHour, endHour, timeZoneResolution):
self._delete('TARGETING_TYPE_DAY_AND_TIME', dayOfWeek, startHour, endHour, timeZoneResolution)
@_check_settable
def add_geo_region(self, displayName, geoRegionType, negative):
if not self.already_added('TARGETING_TYPE_GEO_REGION', displayName):
if not self.already_exists('TARGETING_TYPE_GEO_REGION', displayName, negative): # fix delete of negative
self.create_requests.setdefault('TARGETING_TYPE_GEO_REGION', [])
self.create_requests['TARGETING_TYPE_GEO_REGION'].append({ 'geoRegionDetails':{
'targetingOptionId': self.get_option_id('TARGETING_TYPE_GEO_REGION', displayName, geoRegionType),
'negative':negative
}})
@_check_settable
def delete_geo_region(self, displayName):
self._delete('TARGETING_TYPE_GEO_REGION', displayName)
@_check_settable
def add_proximity_location_list(self, proximityLocationListId, proximityRadiusRange):
if not self.already_added('TARGETING_TYPE_PROXIMITY_LOCATION_LIST', proximityLocationListId):
if not self.already_exists('TARGETING_TYPE_PROXIMITY_LOCATION_LIST', proximityLocationListId, proximityRadiusRange): # fix delete of range
self.create_requests.setdefault('TARGETING_TYPE_PROXIMITY_LOCATION_LIST', [])
self.create_requests['TARGETING_TYPE_PROXIMITY_LOCATION_LIST'].append({ 'proximityLocationListDetails':{
'proximityLocationListId':proximityLocationListId,
'proximityRadiusRange':proximityRadiusRange
}})
@_check_settable
def delete_proximity_location_list(self, proximityLocationListId):
self._delete('TARGETING_TYPE_PROXIMITY_LOCATION_LIST', proximityLocationListId)
@_check_settable
def add_regional_location_list(self, regionalLocationListId, negative):
if not self.already_added('TARGETING_TYPE_REGIONAL_LOCATION_LIST', regionalLocationListId):
if not self.already_exists('TARGETING_TYPE_REGIONAL_LOCATION_LIST', regionalLocationListId):
self.create_requests.setdefault('TARGETING_TYPE_REGIONAL_LOCATION_LIST', [])
self.create_requests['TARGETING_TYPE_REGIONAL_LOCATION_LIST'].append({ 'regionalLocationListDetails':{
'regionalLocationListId': regionalLocationListId,
'negative':negative
}})
@_check_settable
def delete_regional_location_list(self, regionalLocationListId):
self._delete('TARGETING_TYPE_REGIONAL_LOCATION_LIST', regionalLocationListId)
@_check_settable
def add_video_player_size(self, videoPlayerSize):
if not self.already_added('TARGETING_TYPE_VIDEO_PLAYER_SIZE', videoPlayerSize):
if not self.already_exists('TARGETING_TYPE_VIDEO_PLAYER_SIZE', videoPlayerSize):
self.create_requests.setdefault('TARGETING_TYPE_VIDEO_PLAYER_SIZE', [])
self.create_requests['TARGETING_TYPE_VIDEO_PLAYER_SIZE'].append({ 'videoPlayerSizeDetails':{
'targetingOptionId': self.get_option_id('TARGETING_TYPE_VIDEO_PLAYER_SIZE', videoPlayerSize)
}})
@_check_settable
def delete_video_player_size(self, videoPlayerSize):
self._delete('TARGETING_TYPE_VIDEO_PLAYER_SIZE', videoPlayerSize)
@_check_settable
def add_in_stream_position(self, contentInstreamPosition):
if not self.already_added('TARGETING_TYPE_CONTENT_INSTREAM_POSITION', contentInstreamPosition):
if not self.already_exists('TARGETING_TYPE_CONTENT_INSTREAM_POSITION', contentInstreamPosition):
self.create_requests.setdefault('TARGETING_TYPE_CONTENT_INSTREAM_POSITION', [])
self.create_requests['TARGETING_TYPE_CONTENT_INSTREAM_POSITION'].append({ 'contentInstreamPositionDetails':{
'targetingOptionId': self.get_option_id('TARGETING_TYPE_CONTENT_INSTREAM_POSITION', contentInstreamPosition)
}})
@_check_settable
def delete_in_stream_position(self, contentInstreamPosition):
self._delete('TARGETING_TYPE_CONTENT_INSTREAM_POSITION', contentInstreamPosition)
@_check_settable
def add_out_stream_position(self, contentOutstreamPosition):
if not self.already_added('TARGETING_TYPE_CONTENT_OUTSTREAM_POSITION', contentOutstreamPosition):
if not self.already_exists('TARGETING_TYPE_CONTENT_OUTSTREAM_POSITION', contentOutstreamPosition):
self.create_requests.setdefault('TARGETING_TYPE_CONTENT_OUTSTREAM_POSITION', [])
self.create_requests['TARGETING_TYPE_CONTENT_OUTSTREAM_POSITION'].append({ 'contentOutstreamPositionDetails':{
'targetingOptionId': self.get_option_id('TARGETING_TYPE_CONTENT_OUTSTREAM_POSITION', contentOutstreamPosition)
}})
@_check_settable
def delete_out_stream_position(self, contentOutstreamPosition):
self._delete('TARGETING_TYPE_CONTENT_OUTSTREAM_POSITION', contentOutstreamPosition)
@_check_settable
def add_on_screen_position(self, onScreenPosition):
if not self.already_added('TARGETING_TYPE_ON_SCREEN_POSITION'):
if not self.already_exists('TARGETING_TYPE_ON_SCREEN_POSITION', onScreenPosition):
self.create_requests.setdefault('TARGETING_TYPE_ON_SCREEN_POSITION', [])
self.create_requests['TARGETING_TYPE_ON_SCREEN_POSITION'].append({ 'onScreenPositionDetails':{
'targetingOptionId': self.get_option_id('TARGETING_TYPE_ON_SCREEN_POSITION', onScreenPosition)
}})
@_check_settable
def delete_on_screen_position(self, onScreenPosition):
self._delete('TARGETING_TYPE_ON_SCREEN_POSITION', onScreenPosition)
@_check_settable
def add_viewability(self, viewability):
if not self.already_added('TARGETING_TYPE_VIEWABILITY'):
if not self.already_exists('TARGETING_TYPE_VIEWABILITY', viewability):
self._delete('TARGETING_TYPE_VIEWABILITY')
self.create_requests.setdefault('TARGETING_TYPE_VIEWABILITY', [])
self.create_requests['TARGETING_TYPE_VIEWABILITY'].append({ 'viewabilityDetails':{
'targetingOptionId': self.get_option_id('TARGETING_TYPE_VIEWABILITY', viewability)
}})
@_check_settable
def delete_viewability(self, viewability):
self._delete('TARGETING_TYPE_VIEWABILITY', viewability)
def get_warnings(self):
return self.warnings
def get_body(self):
if self.delete_requests or self.create_requests:
return {
'deleteRequests': [{
'targetingType': k,
'assignedTargetingOptionIds': v
} for k, v in self.delete_requests.items()],
'createRequests': [{
'targetingType': k,
'assignedTargetingOptions': v
} for k, v in self.create_requests.items()]
}
else:
return {}
def execute(self):
body = self.get_body()
if body:
if self.lineitem:
return API_DV360(
self.config, self.auth,
).advertisers().lineItems().bulkEditLineItemAssignedTargetingOptions(
lineItemId=str(self.lineitem),
advertiserId=str(self.advertiser),
).execute()
elif self.partner:
return API_DV360(
self.config, self.auth
).partners().bulkEditPartnerAssignedTargetingOptions(
partnerId=str(self.partner),
).execute()
elif self.advertiser:
return API_DV360(
self.config, self.auth,
).advertisers().bulkEditAdvertiserAssignedTargetingOptions(
advertiserId=str(self.advertiser),
).execute()
| [
"[email protected]"
]
| |
1dfeabcfd018a5f7d5915118ce8e23ab360a2ed8 | cc3b0eba4ade43d099cf00ecf7cabb34f066fd35 | /incomepropertyevaluatorkit/calculator/analyzer.py | b983532d9128772166a2b37b6ac91366a1e9de46 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
]
| permissive | MikaSoftware/py-incomepropertyevaluatorkit | 221ffbe8c3414dbcc3ece61f40d6400ac090bae2 | f1a05effc80d328c90f7f27ed9b1a6d82492f9d2 | refs/heads/master | 2021-08-23T03:16:54.734993 | 2017-12-02T20:42:37 | 2017-12-02T20:42:37 | 109,617,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,427 | py | # -*- coding: utf-8 -*-
"""
Python library for performing rental and income property calculations.
See README for more details.
"""
from __future__ import print_function
import sys
import argparse
from datetime import datetime, timedelta
from decimal import Decimal
import math
import numpy as np # Third party library to utilize "irr" function.
from moneyed import Money # Third party library for "Money" datatype.
from mortgagekit.calculator import *
from incomepropertyevaluatorkit.foundation.utils import *
from incomepropertyevaluatorkit.foundation.constants import MAX_YEAR
class FinancialAnalyzer:
"""
Class will take financial information about a rental property and
perform various calculations on it to get investor data.
"""
#--------------------------------------------------------------------------#
# P U B L I C F U N C T I O N S #
#--------------------------------------------------------------------------#
def __init__(self, currency='USD'):
self._currency = currency
self._purchase_price = Money(amount=0, currency=currency)
self._inflation_rate = Money(amount=0, currency=currency)
self._selling_fee_rate = Money(amount=0, currency=currency)
self._buying_fee_rate = Money(amount=0, currency=currency)
self._mortgage_calculator = None
self._mortgage_payment_schedule = None
self._rental_income_dict = {}
self._facility_income_dict = {}
self._expense_dict = {}
self._commercial_income_dict = {}
self._fee_dict = {}
self._capital_improvements_dict = {}
def set_purchase_price(self, purchase_price):
assert isinstance(purchase_price, Money), 'purchase_price is not a Money class: %r' % purchase_price
self._purchase_price = purchase_price
def set_inflation_rate(self, inflation_rate):
assert isinstance(inflation_rate, Decimal), 'inflation_rate is not a Decimal class: %r' % inflation_rate
self._inflation_rate = inflation_rate
def set_selling_fee_rate(self, selling_fee_rate):
self._selling_fee_rate = selling_fee_rate
def set_buying_fee_rate(self, buying_fee_rate):
self._buying_fee_rate = buying_fee_rate
#
def set_mortgage(self, total_amount, down_payment, amortization_year,
annual_interest_rate, payment_frequency, compounding_period,
first_payment_date):
assert isinstance(total_amount, Money), 'total_amount is not a Money class: %r' % total_amount
assert isinstance(down_payment, Money), 'down_payment is not a Money class: %r' % down_payment
assert isinstance(amortization_year, int), 'amortization_year is not a Integer class: %r' % amortization_year
assert isinstance(annual_interest_rate, Decimal), 'annual_interest_rate is not a Decimal class: %r' % annual_interest_rate
assert isinstance(payment_frequency, Decimal), 'payment_frequency is not a Decimal class: %r' % payment_frequency
assert isinstance(compounding_period, Decimal), 'compounding_period is not a Decimal class: %r' % compounding_period
self._mortgage_calculator = MortgageCalculator(
total_amount,
down_payment,
amortization_year,
annual_interest_rate,
payment_frequency,
compounding_period,
first_payment_date
)
def add_rental_income(self, pk, annual_amount_per_unit, frequency, monthly_amount_per_unit, type_id, name_text, number_of_units):
assert isinstance(pk, int), 'pk is not a Integer class: %r' % pk
assert type(annual_amount_per_unit) is Money, "annual_amount_per_unit is not a Money class: %r" % annual_amount_per_unit
assert type(monthly_amount_per_unit) is Money, "monthly_amount_per_unit is not a Money class: %r" % monthly_amount_per_unit
assert type(frequency) is Decimal, "frequency is not a Decimal class: %r" % frequency
assert type(number_of_units) is Decimal, "monthly_amount_per_unit is not a Decimal class: %r" % number_of_units
assert isinstance(name_text, str), 'name_text is not a String class: %r' % name_text
assert isinstance(number_of_units, Decimal), 'number_of_units is not a Decimal class: %r' % number_of_units
self._rental_income_dict[pk] = {
'pk': pk,
'annual_amount_per_unit': annual_amount_per_unit,
'frequency': frequency,
'monthly_amount_per_unit': monthly_amount_per_unit,
'type_id': type_id,
'name_text': name_text,
'number_of_units': number_of_units
}
def remove_rental_income(self, pk):
assert isinstance(pk, int), 'pk is not a Integer class: %r' % pk
try:
del self._rental_income_dict[pk]
except KeyError:
pass
def get_rental_income(self, pk):
assert isinstance(pk, int), 'pk is not a Integer class: %r' % pk
try:
return self._rental_income_dict[pk]
except KeyError:
return None
def add_facility_income(self, pk, annual_amount, frequency, monthly_amount, type_id, name_text):
assert isinstance(pk, int), 'pk is not a Integer class: %r' % pk
assert type(annual_amount) is Money, "annual_amount is not a Money class: %r" % annual_amount
assert type(monthly_amount) is Money, "monthly_amount is not a Money class: %r" % monthly_amount
assert type(frequency) is Decimal, "frequency is not a Decimal class: %r" % frequency
assert isinstance(name_text, str), 'name_text is not a String class: %r' % name_text
self._facility_income_dict[pk] = {
'pk': pk,
'annual_amount': annual_amount,
'frequency': frequency,
'monthly_amount': monthly_amount,
'type_id': type_id,
'name_text': name_text,
}
def remove_facility_income(self, pk):
assert isinstance(pk, int), 'pk is not a Integer class: %r' % pk
try:
del self._facility_income_dict[pk]
except KeyError:
pass
def get_facility_income(self, pk):
assert isinstance(pk, int), 'pk is not a Integer class: %r' % pk
try:
return self._facility_income_dict[pk]
except KeyError:
return None
def add_expense(self, pk, annual_amount, frequency, monthly_amount, type_id, name_text):
assert isinstance(pk, int), 'pk is not a Integer class: %r' % pk
assert type(annual_amount) is Money, "annual_amount is not a Money class: %r" % annual_amount
assert type(monthly_amount) is Money, "monthly_amount is not a Money class: %r" % monthly_amount
assert type(frequency) is Decimal, "frequency is not a Decimal class: %r" % frequency
assert isinstance(name_text, str), 'name_text is not a String class: %r' % name_text
self._expense_dict[pk] = {
'pk': pk,
'annual_amount': annual_amount,
'frequency': frequency,
'monthly_amount': monthly_amount,
'type_id': type_id,
'name_text': name_text,
}
def remove_expense(self, pk):
assert isinstance(pk, int), 'pk is not a Integer class: %r' % pk
try:
del self._expense_dict[pk]
except KeyError:
pass
def get_expense(self, pk):
assert isinstance(pk, int), 'pk is not a Integer class: %r' % pk
try:
return self._expense_dict[pk]
except KeyError:
return None
def add_commercial_income(self, pk, annual_amount, frequency, monthly_amount, type_id, name_text):
assert isinstance(pk, int), 'pk is not a Integer class: %r' % pk
assert type(annual_amount) is Money, "annual_amount is not a Money class: %r" % annual_amount
assert type(monthly_amount) is Money, "monthly_amount is not a Money class: %r" % monthly_amount
assert type(frequency) is Decimal, "frequency is not a Decimal class: %r" % frequency
assert isinstance(name_text, str), 'name_text is not a String class: %r' % name_text
self._commercial_income_dict[pk] = {
'pk': pk,
'annual_amount': annual_amount,
'frequency': frequency,
'monthly_amount':monthly_amount,
'type_id': type_id,
'name_text': name_text,
}
def remove_commercial_income(self, pk):
assert isinstance(pk, int), 'pk is not a Integer class: %r' % pk
try:
del self._commercial_income_dict[pk]
except KeyError:
pass
def get_commercial_income(self, pk):
assert isinstance(pk, int), 'pk is not a Integer class: %r' % pk
try:
return self._commercial_income_dict[pk]
except KeyError:
return None
def add_purchase_fee(self, pk, name_text, amount):
assert isinstance(pk, int), 'pk is not a Integer class: %r' % pk
assert isinstance(name_text, str), 'name_text is not a String class: %r' % name_text
assert isinstance(amount, Money), "amount is not a Money class: %r" % amount
self._fee_dict[pk] = {
'pk': pk,
'name_text': name_text,
'amount': amount
}
def get_purchase_fee(self, pk):
assert isinstance(pk, int), 'pk is not a Integer class: %r' % pk
try:
return self._fee_dict[pk]
except KeyError:
return None
def remove_purchase_fee(self, pk):
assert isinstance(pk, int), 'pk is not a Integer class: %r' % pk
try:
del self._fee_dict[pk]
except KeyError:
pass
def add_capital_improvement(self, pk, name_text, amount):
assert isinstance(pk, int), 'pk is not a Integer class: %r' % pk
assert isinstance(name_text, str), 'name_text is not a String class: %r' % name_text
assert isinstance(amount, Money), "amount is not a Money class: %r" % amount
self._capital_improvements_dict[pk] = {
'pk': pk,
'name_text': name_text,
'amount': amount
}
def get_capital_improvement(self, pk):
assert isinstance(pk, int), 'pk is not a Integer class: %r' % pk
try:
return self._capital_improvements_dict[pk]
except KeyError:
return None
def remove_capital_improvement(self, pk):
assert isinstance(pk, int), 'pk is not a Integer class: %r' % pk
try:
del self._capital_improvements_dict[pk]
except KeyError:
pass
def perform_analysis(self):
# // Steps 1-3:
self.perform_computation_on_mortgage()
# // Step 4: Perform a summation/subtraction on all the information to get
# // aggregate data.
self.perform_computation_on_analysis()
# // Step 5: Analyze various variables for the fincial analysis
self.perform_computation_on_annual_projections()
# STEP 6: Return computations summary from our analysis.
return {
'purchase_price': self._purchase_price,
'inflation_rate': self._inflation_rate,
'selling_fee_rate': self._selling_fee_rate,
'buying_fee_rate': self._buying_fee_rate,
'rental_incomes': self._rental_income_dict,
'facility_incomes': self._facility_income_dict,
'expenses': self._expense_dict,
'commercial_incomes': self._commercial_income_dict,
'purchase_fees': self._fee_dict,
'capital_improvements': self._capital_improvements_dict,
'mortgage': {
'interest_rate_per_payment_frequency': self._mortgage_calculator.get_interest_rate_per_payment_frequency(),
'total_number_of_payments_per_frequency': self._mortgage_calculator.get_total_number_of_payments_per_frequency(),
'mortgage_payment_per_payment_frequency': self._mortgage_calculator.get_mortgage_payment_per_payment_frequency(),
'monthly_mortgage_payment': self._mortgage_calculator.get_monthly_mortgage_payment(),
'annual_mortgage_payment': self._mortgage_calculator.get_annual_mortgage_payment(),
'percent_of_loan_financed': self._mortgage_calculator.get_percent_of_loan_financed(),
'schedule': self._mortgage_payment_schedule
},
'analysis': {
'monthly_rental_income': self._monthly_rental_income,
'annual_rental_income': self._annual_rental_income,
'monthly_facility_income': self._monthly_facility_income,
'annual_facility_income': self._annual_facility_income,
'monthly_expense': self._monthly_expense,
'annual_expense': self._annual_expense,
'monthly_gross_income': self._monthly_gross_income,
'annual_gross_income': self._annual_gross_income,
'monthly_net_income': self._monthly_net_income,
'annual_net_income': self._annual_net_income,
'monthly_cash_flow': self._monthly_cash_flow,
'annual_cash_flow': self._annual_cash_flow,
'purchase_fees_amount': self._purchase_fees_amount,
'capital_improvements_amount': self._capital_improvements_amount,
'initial_investment_amount': self._initial_investment_amount,
'cap_rate_with_mortgage': self._cap_rate_with_mortgage,
'cap_rate_without_mortgage': self._cap_rate_without_mortgage
},
'annual_projections': self._annual_projections
}
#--------------------------------------------------------------------------#
# P R I V A T E F U N C T I O N S #
#--------------------------------------------------------------------------#
def get_total_rental_income_amount(self):
"""
Function sums "monthly_amount" and "annual_amount" in the
"Rental Income" objects of this analyzer.
"""
total_monthly_amount = Money(amount=0, currency=self._currency)
total_annual_amount = Money(amount=0, currency=self._currency)
keys = self._rental_income_dict.keys()
for key in keys:
# Get our object.
rental_income = self._rental_income_dict[key]
# Get the amounts.
monthly_amount_per_unit = rental_income['monthly_amount_per_unit']
annual_amount_per_unit = rental_income['annual_amount_per_unit']
number_of_units = rental_income['number_of_units']
# Sum
total_monthly_amount += monthly_amount_per_unit * number_of_units
total_annual_amount += annual_amount_per_unit * number_of_units
return {
'monthly': total_monthly_amount,
'annual': total_annual_amount
}
def get_total_facility_income_amount(self):
"""
Function sums "monthly_amount" and "annual_amount" in the
"Facility Income" objects of this analyzer.
"""
total_monthly_amount = Money(amount=0, currency=self._currency)
total_annual_amount = Money(amount=0, currency=self._currency)
keys = self._facility_income_dict.keys()
for key in keys:
# Get our object.
facility_income = self._facility_income_dict[key]
# Get the amounts & sum.
total_monthly_amount += facility_income['monthly_amount']
total_annual_amount += facility_income['annual_amount']
return {
'monthly': total_monthly_amount,
'annual': total_annual_amount
}
def get_total_expense_amount(self):
"""
Function sums "monthly_amount" and "annual_amount" in the
"Expense" objects of this analyzer.
"""
total_monthly_amount = Money(amount=0, currency=self._currency)
total_annual_amount = Money(amount=0, currency=self._currency)
keys = self._expense_dict.keys()
for key in keys:
# Get our object.
expense = self._expense_dict[key]
# Get the amounts & sum.
total_monthly_amount += expense['monthly_amount']
total_annual_amount += expense['annual_amount']
return {
'monthly': total_monthly_amount,
'annual': total_annual_amount
}
def get_total_commercial_income_amount(self):
"""
Function sums "monthly_amount" and "annual_amount" in the
"commercial Income" objects of this analyzer.
"""
total_monthly_amount = Money(amount=0, currency=self._currency)
total_annual_amount = Money(amount=0, currency=self._currency)
keys = self._commercial_income_dict.keys()
for key in keys:
# Get our object.
commercial_income = self._commercial_income_dict[key]
# Get the amounts & sum.
total_monthly_amount += commercial_income['monthly_amount']
total_annual_amount += commercial_income['annual_amount']
return {
'monthly': total_monthly_amount,
'annual': total_annual_amount
}
def get_total_gross_income_amount(self):
# Compute the individual totals.
commercial_income_total = self.get_total_commercial_income_amount()
rental_income_total = self.get_total_rental_income_amount()
facility_income_total = self.get_total_facility_income_amount()
# Compute the aggregate totals.
total_monthly_amount = commercial_income_total['monthly'] + rental_income_total['monthly'] + facility_income_total['monthly']
total_annual_amount = commercial_income_total['annual'] + rental_income_total['annual'] + facility_income_total['annual']
# Return results.
return {
'monthly': total_monthly_amount,
'annual': total_annual_amount
}
def get_total_purchase_fee_amount(self):
"""
Function sums "monthly_amount" and "annual_amount" in the
"commercial Income" objects of this analyzer.
"""
total_amount = Money(amount=0, currency=self._currency)
keys = self._fee_dict.keys()
for key in keys:
# Get our object.
purchase_fee = self._fee_dict[key]
# Get the amounts & sum.
total_amount += purchase_fee['amount']
return total_amount
def get_net_income_without_mortgage(self):
gross_income_info = self.get_total_gross_income_amount()
expense_info = self.get_total_expense_amount()
return {
'monthly': gross_income_info['monthly'] - expense_info['monthly'],
'annual': gross_income_info['annual'] - expense_info['annual'],
}
def get_net_income_with_mortgage(self):
net_income_info = self.get_net_income_without_mortgage()
monthly_mortgage_payment = self._mortgage_calculator.get_monthly_mortgage_payment()
annual_mortgage_payment = self._mortgage_calculator.get_annual_mortgage_payment()
return {
'monthly': net_income_info['monthly'] - monthly_mortgage_payment,
'annual': net_income_info['annual'] - annual_mortgage_payment
}
def get_total_capital_improvements_amount(self):
total_amount = Money(amount=0, currency=self._currency)
keys = self._capital_improvements_dict.keys()
for key in keys:
# Get our object.
capital_improvement = self._capital_improvements_dict[key]
# Get the amounts & sum.
total_amount += capital_improvement['amount']
return total_amount
def get_total_initial_investment_amount(self):
total_purchase_fee = self.get_total_purchase_fee_amount()
total_capital_improvement = self.get_total_capital_improvements_amount()
return total_purchase_fee + total_capital_improvement
def get_cap_rate_with_mortgage_expense_included(self):
if self._purchase_price.amount == 0: # Defensive Code: Cannot divide by zero.
return Money(amount=0, currency=self._currency)
cash_flow_info = self.get_net_income_with_mortgage()
cap_rate = cash_flow_info['annual'].amount / self._purchase_price.amount
return Decimal(cap_rate * 100)
def get_cap_rate_with_mortgage_expense_excluded(self):
if self._purchase_price.amount == 0: # Defensive Code: Cannot divide by zero.
return Money(amount=0, currency=self._currency)
cash_flow_info = self.get_net_income_without_mortgage()
cap_rate = cash_flow_info['annual'].amount / self._purchase_price.amount
cap_rate_percent = Decimal(cap_rate * 100)
return Decimal(cap_rate_percent)
def perform_computation_on_mortgage(self):
self._mortgage_payment_schedule = self._mortgage_calculator.get_mortgage_payment_schedule()
def perform_computation_on_analysis(self):
total_amount = self.get_total_rental_income_amount()
self._monthly_rental_income = total_amount['monthly']
self._annual_rental_income = total_amount['annual']
total_amount = self.get_total_facility_income_amount()
self._monthly_facility_income = total_amount['monthly']
self._annual_facility_income = total_amount['annual']
total_amount = self.get_total_expense_amount()
self._monthly_expense = total_amount['monthly']
self._annual_expense = total_amount['annual']
total_amount = self.get_total_gross_income_amount()
self._monthly_gross_income = total_amount['monthly']
self._annual_gross_income = total_amount['annual']
total_amount = self.get_net_income_without_mortgage()
self._monthly_net_income = total_amount['monthly']
self._annual_net_income = total_amount['annual']
cash_flow_info = self.get_net_income_with_mortgage()
self._monthly_cash_flow = cash_flow_info['monthly']
self._annual_cash_flow = cash_flow_info['annual']
total_amount = self.get_total_purchase_fee_amount()
self._purchase_fees_amount = total_amount
total_amount = self.get_total_capital_improvements_amount()
self._capital_improvements_amount = total_amount
self._initial_investment_amount = self.get_total_initial_investment_amount()
self._cap_rate_with_mortgage = self.get_cap_rate_with_mortgage_expense_included()
self._cap_rate_without_mortgage = self.get_cap_rate_with_mortgage_expense_excluded()
def debt_remaining_at_eoy(self, year, payment_schedule, mortgage_calculator):
# Note: We need to get how many pay cycles there will be per year.
payment_frequency_integer = mortgage_calculator.get_payment_frequency()
# Calculate the index position of where the record we're searching for is located.
index = (year) * payment_frequency_integer - 1;
index = int(index)
if index >= len(payment_schedule):
return Money(amount=0, currency=self._currency)
# Get our record.
loan_balance = payment_schedule[index]['loan_balance']
return loan_balance
def perform_computation_on_annual_projections(self):
"""
Note: You need to run "perform_computation_on_mortgage" before running
this function.
"""
# Variable which will hold all our projections on a per year basis.
annual_projections = []
# Calculate and extract values we'll be using throughout our computation.
mortgage_payment_schedule = self._mortgage_payment_schedule
mortgage_calculator = self._mortgage_calculator
inflation_rate = self._inflation_rate
annual_net_income_with_mortgage_info = self.get_net_income_with_mortgage()
annual_net_income_without_mortgage_info = self.get_net_income_without_mortgage()
sales_price = self._purchase_price
selling_fee_rate = self._selling_fee_rate
initial_investment_amount = self.get_total_initial_investment_amount()
# To calculate "IRR", we will need to store the initial investment
# (negative) and then add all the cash flows afterwords (positive).
cash_flow_array = []
negative_initial_investment_amount = initial_investment_amount * Decimal(-1)
cash_flow_array.append(negative_initial_investment_amount)
# Variable stores the previous years cash flow value.
previous_years_cash_flow = Money(amount=0, currency=self._currency)
for year in range_inclusive(1, MAX_YEAR):
# Generic Calculations
#------------------------------------------------------
# Calculate how much debt we have remaining to pay off.
loan_balance = self.debt_remaining_at_eoy(year, mortgage_payment_schedule, mortgage_calculator)
# Defensive Coding: Cannot have negative 'debtRemaining' values.
if loan_balance.amount < 0:
loan_balance = Money(amount=0, currency=self._currency) # Automatically set to zero if this happens.
# Calculate how much money we have coming in at the end of the year and
# apply appreciation to it.
cash_flow = Money(amount=0, currency=self._currency)
appreciated_cash_flow = Money(amount=0, currency=self._currency)
if loan_balance.amount > 0:
cash_flow = annual_net_income_with_mortgage_info['annual']
appreciated_cash_flow = appreciated_value(cash_flow, year, inflation_rate)
else:
cash_flow = annual_net_income_without_mortgage_info['annual']
appreciated_cash_flow = appreciated_value(cash_flow, year, inflation_rate)
# Calculate our new sales price
appreciated_sales_price = appreciated_value(sales_price, year, inflation_rate)
# Calculate legal & realty domain fees
fees = sales_price * selling_fee_rate
appreciated_fees = appreciated_value(fees, year, inflation_rate)
# Calculate the proceeds of sale
proceeds_of_sale = appreciated_sales_price - appreciated_fees
proceeds_of_sale = proceeds_of_sale - loan_balance
# Calculate the total return
total_return = proceeds_of_sale - appreciated_cash_flow
# Calculate the return on investment
roi_rate = return_on_investment(initial_investment_amount, total_return)
roi_percent = roi_rate * Decimal(100.0)
# Calculate Annualized Return on Investment (1 of 2)
#---------------------------------------------------
# STEP 1: The previous value needs to be the cash flow if it hasn't
# been already defined.
if previous_years_cash_flow == Money(amount=0, currency=self._currency):
previous_years_cash_flow = cash_flow
# STEP 2: Calculate the 'net processed from sales' variable which
# is essential: (previous years cash flow + purchase price)
net_processed_from_sales = previous_years_cash_flow + proceeds_of_sale
# STEP 3: Add the final object to our cash flow array of payments.
cash_flow_array.append(net_processed_from_sales) # IRR Code 1 of 2
# STEP 4: Calculate our IRR.
float_cash_flow_array = []
for value in cash_flow_array: # Convert from Decimal to Float.
float_cash_flow_array.append(value.amount)
# Use the python 'numpy' library to get the 'irr' functionality
# instead of implementing it ourselves.
irr_rate = np.irr(float_cash_flow_array)
irr_percent = irr_rate * 100
# Update the MODEL with the following values
#---------------------------------------------------
annual_projections.append({
'year': year,
'debt_remaining': loan_balance,
'sales_price': appreciated_sales_price,
'legal_fees': appreciated_fees,
'cash_flow': appreciated_cash_flow,
'initial_investment': initial_investment_amount,
'proceeds_of_sale': proceeds_of_sale,
'total_return': total_return,
'roi_rate': roi_rate,
'roi_percent': roi_percent,
'annualized_roi_rate': irr_rate,
'annualized_roi_percent': irr_percent
})
# Calculate Annualized Return on Investment (2 of 2)
#----------------------------------------------------
# Update the cashFlow
# However, this is not the last year! Therefore remove the total return
# which is the last object and cashflow.
del cash_flow_array[-1] # IRR Code 2 of 2 - Remove last object.
cash_flow_array.append(previous_years_cash_flow);
previous_years_cash_flow = appreciated_cash_flow
# Return the annual projects we have computed in this function.
self._annual_projections = annual_projections
| [
"[email protected]"
]
| |
f19e8b39e47dd7234e6a7ee6fc09473d1b1e2a5a | edc13abeb401541cba52608b484fa8ba150d57d6 | /hwtLib/amba/axis_comp/frame_join/_join.py | d24a8793bf2f83aba1c0ed47214bd5bee600db63 | [
"MIT"
]
| permissive | mfkiwl/hwtLib | 2cbba0b1bc28119c3f0425af9a2f0d7b780fc9a3 | fe81cac7d7a117c18635a3d3b336508148282aa5 | refs/heads/master | 2023-03-10T12:06:12.135902 | 2021-02-24T08:04:11 | 2021-02-24T08:04:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,680 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from math import inf
from typing import Optional, List, Callable, Tuple
from hwt.code import If, Switch, SwitchLogic, Or, And
from hwt.hdl.types.bits import Bits
from hwt.hdl.types.stream import HStream
from hwt.hdl.types.struct import HStruct
from hwt.interfaces.utils import addClkRstn, propagateClkRstn
from hwt.math import log2ceil
from hwt.synthesizer.hObjList import HObjList
from hwt.synthesizer.param import Param
from hwt.synthesizer.rtlLevel.rtlSignal import RtlSignal
from hwt.synthesizer.unit import Unit
from hwtLib.abstract.frame_utils.alignment_utils import FrameAlignmentUtils
from hwtLib.abstract.frame_utils.join.fsm import input_B_dst_to_fsm
from hwtLib.abstract.frame_utils.join.state_trans_item import StateTransItem
from hwtLib.amba.axis import AxiStream
from hwtLib.amba.axis_comp.frame_join.input_reg import FrameJoinInputReg
from pyMathBitPrecise.bit_utils import bit_list_to_int
class AxiS_FrameJoin(Unit):
"""
Join frames from multiple input streams and use keep signal
to remove invalid bytes from body of the final packet.
Can be also used to translate alignment of data.
:note: delay=0
:note: This component generates different frame joining logic
for each specific case of data alignment, cunk size, frame lens, etc.
which can happen based on configuration. This means that the implementation
can be just straight wire or very complicated pipelined shift logic.
:note: The figure is ilustrative
.. aafig::
+---+---+ +---+---+ +---+---+
| X | 1 | | 3 | 4 | | 1 | 2 |
+---+---+ + +---+---+ -> +---+---+
| 2 | X | | X | X | | 3 | 4 |
+---+---+ +---+---+ +---+---+
.. hwt-autodoc::
"""
def _config(self):
self.T = Param(HStruct(
(HStream(Bits(8), frame_len=(1, inf),
start_offsets=[0]), "f0"),
(HStream(Bits(16), frame_len=(1, 1)), "f1"),
))
AxiStream._config(self)
self.DATA_WIDTH = 16
self.USE_KEEP = True
self.OUT_OFFSET = Param(0)
def _declr(self):
assert self.USE_KEEP
t = self.T
assert isinstance(t, HStruct)
word_bytes = self.word_bytes = self.DATA_WIDTH // 8
input_cnt = self.input_cnt = len(t.fields)
streams = [f.dtype for f in t.fields]
fju = FrameAlignmentUtils(word_bytes, self.OUT_OFFSET)
input_B_dst = fju.resolve_input_bytes_destinations(
streams)
self.state_trans_table = input_B_dst_to_fsm(
word_bytes, input_cnt, input_B_dst)
addClkRstn(self)
with self._paramsShared():
self.dataOut = AxiStream()._m()
self.dataIn = HObjList(AxiStream() for _ in range(self.input_cnt))
def generate_input_register(self, input_i, reg_cnt):
in_reg = FrameJoinInputReg()
in_reg._updateParamsFrom(self)
in_reg.REG_CNT = reg_cnt
setattr(self, f"in_reg{input_i:d}", in_reg)
in_reg.dataIn(self.dataIn[input_i])
return in_reg.regs, in_reg.keep_masks, in_reg.ready
def generate_output_byte_mux(self, regs):
out_mux_values = [set() for _ in range(self.word_bytes)]
for st in self.state_trans_table.state_trans:
for stt in st:
for o_mux_val, out_mux_val_set in zip(stt.out_byte_mux_sel,
out_mux_values):
if o_mux_val is not None:
out_mux_val_set.add(o_mux_val)
out_mux_values = [sorted(x) for x in out_mux_values]
def index_byte(sig, byte_i):
return sig[(byte_i+1)*8:byte_i*8]
def get_in_byte(input_i, time_offset, byte_i):
return index_byte(regs[input_i][time_offset].data, byte_i)
def data_drive(out_B, out_strb_b, input_i, time_offset, byte_i):
res = [
out_B(get_in_byte(input_i, time_offset, byte_i))
]
if self.USE_STRB:
res.append(
out_strb_b(regs[input_i][time_offset].strb[byte_i])
)
return res
out_byte_sel = []
for out_B_i, out_byte_mux_vals in enumerate(out_mux_values):
# +1 because last value is used to invalidate data
sel_w = log2ceil(len(out_byte_mux_vals) + 1)
sel = self._sig(f"out_byte{out_B_i:d}_sel", Bits(sel_w))
out_byte_sel.append(sel)
out_B = self._sig(f"out_byte{out_B_i:d}", Bits(8))
index_byte(self.dataOut.data, out_B_i)(out_B)
if self.USE_STRB:
out_strb_b = self._sig(f"out_strb{out_B_i:d}")
self.dataOut.strb[out_B_i](out_strb_b)
else:
out_strb_b = None
sw = Switch(sel).add_cases(
(i, data_drive(out_B, out_strb_b, *val))
for i, val in enumerate(out_byte_mux_vals))
# :note: default case is threre for the case of faulire where
# sel has non predicted value
default_case = [out_B(None)]
if self.USE_STRB:
default_case.append(out_strb_b(0))
sw.Default(*default_case)
return out_byte_sel, out_mux_values
@staticmethod
def add_cond_bit(cond, bit, bit_val):
if bit_val is None:
return
if bit_val == 0:
bit = ~bit
cond.append(bit)
def state_trans_cond(self, sst: StateTransItem, input_regs):
cond = []
assert len(sst.input) == len(input_regs)
for in_metas, in_regs in zip(sst.input, input_regs):
assert len(in_metas) == len(in_regs)
for in_meta, in_reg in zip(in_metas, in_regs):
for k_i, k in enumerate(in_meta.keep):
self.add_cond_bit(cond, in_reg.keep[k_i], k)
self.add_cond_bit(cond, in_reg.relict, in_meta.relict)
self.add_cond_bit(cond, in_reg.last, in_meta.last)
return And(*cond)
def get_conds_for_unique_values(self, st_ts: List[StateTransItem],
input_regs,
key: Callable[[StateTransItem], None]):
# output value : List[RtlSignal]
value_conds = {}
for st_t in st_ts:
k = key(st_t)
cond_list = value_conds.setdefault(k, [])
cond = self.state_trans_cond(st_t, input_regs)
cond_list.append(cond)
return [(Or(*v), k) for k, v in value_conds.items()]
def _generate_driver_for_state_trans_dependent_out(
self, st_transs: List[StateTransItem],
input_regs,
value_getter: Callable[[StateTransItem], object],
connect_out_fn,
make_defult_case: Optional[Callable[[], object]]):
"""
specific variant of :func:`generate_driver_for_state_trans_dependent_out` for a single state
"""
cases = []
for cond, value in self.get_conds_for_unique_values(
st_transs, input_regs,
key=value_getter):
cases.append((cond, connect_out_fn(value)))
if make_defult_case is None:
return SwitchLogic(cases)
else:
return SwitchLogic(cases, default=make_defult_case())
def generate_driver_for_state_trans_dependent_out(
self, st_reg: Optional[RtlSignal],
state_trans: List[List[StateTransItem]],
input_regs,
value_getter: Callable[[StateTransItem], object],
connect_out_fn,
make_defult_case=None):
"""
Construct driver for output signal which is driven from input registers
and state transition logic
"""
cases = []
for st_i, st_transs in enumerate(state_trans):
case = self._generate_driver_for_state_trans_dependent_out(
st_transs, input_regs, value_getter, connect_out_fn,
make_defult_case)
if st_reg is None:
# single state variant without any st_reg
assert len(state_trans) == 1
return case
cases.append((st_i, case))
mux = Switch(st_reg).add_cases(cases)
if make_defult_case is not None:
mux.Default(make_defult_case())
return mux
def generate_fsm(self, input_regs, out_sel: List[RtlSignal],
out_mux_values: List[List[Tuple[int, int, int]]],
in_keep_masks: List[List[RtlSignal]],
ready: List[RtlSignal]):
state_trans = self.state_trans_table.state_trans
# next state logic (only i required)
st_cnt = len(state_trans)
assert st_cnt > 0
if st_cnt > 1:
state = self._reg("state", Bits(log2ceil(st_cnt)), def_val=0)
# state next logic
If(self.dataOut.ready,
self.generate_driver_for_state_trans_dependent_out(
state, state_trans, input_regs,
lambda stt: stt.state_next,
lambda v: state(v))
)
else:
state = None
# out_sel driver
for out_B_i, (_out_mux_values, _out_sel) in enumerate(zip(out_mux_values, out_sel)):
def connect_out_sel(v):
if v is None:
v = len(_out_mux_values)
else:
v = _out_mux_values.index(v)
return _out_sel(v)
self.generate_driver_for_state_trans_dependent_out(
state, state_trans, input_regs,
lambda stt: stt.out_byte_mux_sel[out_B_i],
connect_out_sel,
lambda: _out_sel(None),
)
# out.keep driver
self.generate_driver_for_state_trans_dependent_out(
state, state_trans, input_regs,
lambda stt: tuple(stt.output_keep),
lambda v: self.dataOut.keep(bit_list_to_int(v)),
lambda: self.dataOut.keep(None)
)
# out.last driver
self.generate_driver_for_state_trans_dependent_out(
state, state_trans, input_regs,
lambda stt: stt.last,
lambda v: self.dataOut.last(v),
lambda: self.dataOut.last(None)
)
# out.valid
self.generate_driver_for_state_trans_dependent_out(
state, state_trans, input_regs,
lambda stt: any(o is not None for o in stt.out_byte_mux_sel),
lambda v: self.dataOut.valid(v),
lambda: self.dataOut.valid(0),
)
# in.ready driver
for in_i, _ready in enumerate(ready):
self.generate_driver_for_state_trans_dependent_out(
state, state_trans, input_regs,
lambda stt: stt.input_rd[in_i],
lambda v: _ready(self.dataOut.ready & v),
lambda: _ready(0),
)
for in_i, in_keeps in enumerate(in_keep_masks):
for in_t, in_keep in enumerate(in_keeps):
self.generate_driver_for_state_trans_dependent_out(
state, state_trans, input_regs,
lambda stt: tuple(stt.input_keep_mask[in_i][in_t]),
lambda v: in_keep(bit_list_to_int(v)),
lambda: in_keep(0)
)
def _impl(self):
regs = []
keep_masks = []
ready = []
max_lookahead_for_input = self.state_trans_table.max_lookahead_for_input
# lookahead specifies how many words from inputs has to be loaded
# in order to resolve output word, it corresponds to a number of input registers-1
for input_i, stage_cnt in enumerate(max_lookahead_for_input):
_regs, _keep_masks, _ready = self.generate_input_register(
input_i, stage_cnt + 1)
regs.append(_regs)
keep_masks.append(_keep_masks)
ready.append(_ready)
out_sel, out_mux_values = self.generate_output_byte_mux(regs)
self.generate_fsm(regs, out_sel, out_mux_values, keep_masks, ready)
propagateClkRstn(self)
if __name__ == "__main__":
from hwt.synthesizer.utils import to_rtl_str
u = AxiS_FrameJoin()
D_B = 4
u.DATA_WIDTH = 8 * D_B
u.USE_STRB = True
u.T = HStruct(
(HStream(Bits(8*1), (1, inf), [0, 1, 2, 3]), "frame0"),
(HStream(Bits(8*4), (1, 1), [0]), "frame1"),
)
print(to_rtl_str(u))
| [
"[email protected]"
]
| |
a77bf7cae50058808c751ebac3a9f47a4889aeef | c51c92dc1ba350b821899715cd16ba0b8d67653c | /dlmPython/dlm_mod.py | 35250f347c78335a3bad42287d6e1e41452c3dfd | [
"MIT"
]
| permissive | luiarthur/dlmPython | a0c2196d9753e010d4417afa6a9e439966c2bb8d | c3d6328b1260c795759637cd8d26d3f79febd950 | refs/heads/master | 2021-01-19T21:40:43.987505 | 2019-11-14T16:50:00 | 2019-11-14T16:50:00 | 88,685,692 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,496 | py | import numpy as np
from .lego import E, J, join
from .dlm_uni import dlm_uni
def arma(ar=[], ma=[], tau2=1, V=None):
"""
ARMA component for DLM
- ar: list of ar coefficients
- ma: list of ma coefficients
- tau2: variance for evolution matrix
- V: variance for observations
Note that there is no discount option here because W is assumed to be
a matrix of zeros but with W_{1,1} = sig2
see West & Prado (p.75)
"""
assert not(ar is [] and ma is []), "You must specify at least one of 'ar' or 'ma'!"
p = len(ar)
q = len(ma)
m = max(p, q+1)
phi = join(np.array(ar), np.zeros(m-p))
rest = np.vstack( (np.eye(m-1), np.zeros(m-1)) )
G = np.column_stack( (phi, rest) )
psi = join(np.array(ma), np.zeros(m-1-q))
omega = np.asmatrix(join(1, psi)).transpose()
W = tau2 * omega * omega.transpose()
return dlm_uni(F=E(m), G=G, V=V, W=W)
def poly(order=1, V=None, W=None, discount=None):
"""
Polynomial trend component for DLM
- order: order 0 polynomial => mean forecast function (random walk model)
order 1 polynomial => linear forecast function
order 2 polynomial => quadratic forecast function
"""
assert order >= 0, "order needs to be > 0"
p = order + 1
return dlm_uni(F=E(p), G=J(p), V=V, W=W, discount=discount)
def seasonal():
return NotImplemented
def reg():
"""
Creates dlm model for regression
"""
return NotImplemented
| [
"[email protected]"
]
| |
f07b402cb9a7603154f115821368167dcb9a18cc | 4472e40c53ca3e1df4f9e477a6268133309b7597 | /_unittests/ut_module/test_r.py | d4017e655d6fc616855c64c993b23fc27ae187d5 | [
"MIT"
]
| permissive | amoussoubaruch/ensae_teaching_cs | 289729742608da064f07a79b10cf6cce48de1b51 | 313a6ccb8756dbaa4c52724839b69af8a5f4476e | refs/heads/master | 2021-01-16T19:31:49.734583 | 2016-09-09T08:29:58 | 2016-09-09T08:29:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,250 | py | """
@brief test log(time=0s)
"""
import sys
import os
import unittest
import warnings
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
try:
import pyquickhelper as skip_
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..",
"..",
"pyquickhelper",
"src")))
if path not in sys.path:
sys.path.append(path)
import pyquickhelper as skip_
from pyquickhelper.loghelper import fLOG
from src.ensae_teaching_cs.faq.faq_jupyter import r_and_notebook
class TestR (unittest.TestCase):
def test_r(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
if "R_HOME" not in os.environ:
warnings.warn("R is not installed")
return
assert r_and_notebook()
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
]
| |
016650fdb1063f4afb72aea3f846462aea5c8cf0 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2254/60636/258712.py | 5d40a74827de64da87dac87a1dc8edd1768d7ecf | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | f_r=input().split(" ")
f=int(f_r[0])
r=int(f_r[1])
sources=[]
for i in range(f):
source=[]
for j in range(f):
source.append("0")
sources.append(source)
for i in range(r):
x=input().split(" ")
sources[int(x[0])-1][int(x[1])-1]="1"
sources[int(x[1])-1][int(x[0])-1]="1"
count_1=0
count_odd=0
for i in range(len(sources)):
count=0
for j in sources[i]:
if j=="1":
count=count+1
if count==1:
count_1+=1
elif count%2==1:
count_odd+=1
if count_1!=0:
if(count_1%2==0):
print(int(count_1/2))
else:
if(count_odd%2==0):
print(int(count_1/2)+int((count_odd)/2)+1)
else:
print(int(count_1/2))
else:
print(int((count_odd+1)/2))
| [
"[email protected]"
]
| |
c8712f660e267eac3ed0a4a151298d3036d8637c | 696dec6a8d1eba189d36049afedec36da47c08f3 | /models/_model_core_utils/__init__.py | fe191e1f22cc8de28fd6c2603d2190a44aeef2ff | []
| no_license | JoelRaymann/polyp-segmentation | d99079f56bb3ae0886fb4c610c4abcc420137781 | 38da6c8bf47df2d2382d31f04faf63649b7d8ab0 | refs/heads/master | 2023-04-10T06:17:34.720237 | 2021-04-14T22:04:36 | 2021-04-14T22:04:36 | 358,053,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | from ._unet import UNet
from ._unet_dice import UNetDice
from ._resunet import ResUNet
from ._deeplabv3 import Deeplabv3
from ._fcn import FCN8
from ._segnet import SegNet
from ._unet_attn import UNetAttn
from ._gar_net import GARNet
from ._resunet_plus_plus import ResUNetPlusPlus
from ._dilated_resfcn import DilatedResFCN
from ._se_unet import SEUNet
from ._dilated_unet import DilatedUNet
from ._gar_net_exp import GARNetExperimental
| [
"[email protected]"
]
| |
89339d45b4d3abc2eac1659c9dae145f5a8846f8 | 50dd2a43daa8316fc11e0c176b5872738fcc5dde | /Learning/130_Fluent_Python/fp2-utf8/freeinteractive/freeinteractive 192.py | 67edeb4bbefbcf40a50db41edc107e2f2a0bb953 | []
| no_license | FrenchBear/Python | 58204d368e3e72071eef298ff00d06ff51bd7914 | b41ab4b6a59ee9e145ef2cd887a5fe306973962b | refs/heads/master | 2023-08-31T18:43:37.792427 | 2023-08-26T15:53:20 | 2023-08-26T15:53:20 | 124,466,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | >>> from domainlib import multi_probe
>>> names = 'python.org rust-lang.org golang.org nolang.invalid'.split()
>>> gen_found = (name async for name, found in multi_probe(names) if found)
>>> gen_found
<async_generator object <genexpr> at 0x10a8f9700>
>>> async for name in gen_found:
... print(name)
...
golang.org
python.org
rust-lang.org
| [
"[email protected]"
]
| |
6808c8c41ce560a5d7aabbbe7ab1d3bcd280de35 | da2d53e8021b539db006fa31f02d1c2ae46bed3b | /Test/test.py | af89a96f0b23218a82cfcd3e536f4901d5f99677 | []
| no_license | srajsonu/CodeChef | 0723ee4975808e2f4d101d2034771d868ae3b7f7 | a39cd5886a5f108dcd46f70922d5637dd29849ce | refs/heads/main | 2023-04-22T08:33:06.376698 | 2021-05-16T05:48:17 | 2021-05-16T05:48:17 | 327,030,437 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,929 | py | """
1 : [2, 3]
7 <-> 6 (1)
6 <-> 5 (2)
2 <-> 8 (2)
0 <-> 1 (4)
2 <-> 5 (4)
6 <-> 8 (6)
2 <-> 3 (7)
7 <-> 8 (7)
3 <-> 4 (9)
5 <-> 4 (10)
1 <-> 7 (11)
3 <-> 5 (14)
7 -> 6 -> 5 -> 4
|
2 -> 8
"""
from collections import defaultdict
class Solution:
def find_root(self, A, parent):
if parent[A] == A:
return A
return self.find_root(parent[A], parent)
def union(self, A, B, height, parent):
C = self.find_root(A, parent)
D = self.find_root(B, parent)
if C == D:
return
if height[C] < height[D]:
parent[C] = D
elif height[C] > height[D]:
parent[D] = C
else:
parent[C] = D
height[C] += 1
def mst(self, A):
graph = defaultdict(list)
for i, j, k in A:
graph[i].append([j, k])
graph[j].append([i, k])
graph = sorted(graph, key= lambda i: i[1])
parent = []
height = []
if __name__ == '__main__':
A = [[0, 1, 4],
[0, 7, 8],
[1, 7, 11],
[1, 2, 8],
[2, 8, 2],
[2, 5, 4],
[3, 2, 7],
[3, 5, 14]]
"""
class Solution:
def dp(self, n, dp):
if dp[n]:
return dp[n]
if n <= 1:
return 1
count = 0
for i in range(1, n+1):
count += self.dp(i-1, dp) * self.dp(n-i, dp)
dp[n] = count
return count
def uniqueBST(self, A):
dp = [0 for _ in range(A+1)]
return self.dp(A, dp)
def solve(self, n):
dp = [0 for _ in range(n+1)]
dp[0] = 1
for i in range(1, n+1):
for j in range(1, i+1):
dp[i] += dp[j-1] * dp[i - j]
return dp[-1]
if __name__ == '__main__':
A = 9
B = Solution()
print(B.uniqueBST(A))
print(B.solve(A))
"""
class Solution:
def solve(self, arr):
m = len(arr)
n = len(arr[0])
for i in range(m):
for j in range(n):
if arr[i][j] == 0:
for k in range(m):
if arr[k][j] == 0:
continue
arr[k][j] = -1
for l in range(n):
if arr[i][l] == 0:
continue
arr[i][l] = -1
for i in range(m):
for j in range(n):
if arr[i][j] == -1:
arr[i][j] = 0
# while q:
# i, j = q.pop(0) #(1, 1)
#
# for k in range(n):
# arr[i][k] = 0
#
# for k in range(m):
# arr[k][j] = 0
return arr
if __name__ == '__main__':
arr = [[1, 1, 1],
[1, 0, 1],
[1, 1, 1]]
S = Solution()
print(S.solve(arr))
"""
1 0 1
0 0 0
1 0 1
"""
| [
"[email protected]"
]
| |
146a0a6e3d8f7b969b3b61b74b99b083cfd95fc1 | d05a59feee839a4af352b7ed2fd6cf10a288a3cb | /examples/tutorial2.py | 3a952ba678b6f688c3b4fe342830ac8cf6ac0bc4 | [
"BSD-2-Clause-Views"
]
| permissive | elessarelfstone/XlsxWriter | 0d958afd593643f990373bd4d8a32bafc0966534 | bb7b7881c7a93c89d6eaac25f12dda08d58d3046 | refs/heads/master | 2020-09-24T06:17:20.840848 | 2019-11-24T23:43:01 | 2019-11-24T23:43:01 | 225,685,272 | 1 | 0 | NOASSERTION | 2019-12-03T18:09:06 | 2019-12-03T18:09:05 | null | UTF-8 | Python | false | false | 1,284 | py | ##############################################################################
#
# A simple program to write some data to an Excel file using the XlsxWriter
# Python module.
#
# This program is shown, with explanations, in Tutorial 2 of the XlsxWriter
# documentation.
#
# Copyright 2013-2019, John McNamara, [email protected]
#
import xlsxwriter
# Create a workbook and add a worksheet.
workbook = xlsxwriter.Workbook('Expenses02.xlsx')
worksheet = workbook.add_worksheet()
# Add a bold format to use to highlight cells.
bold = workbook.add_format({'bold': True})
# Add a number format for cells with money.
money = workbook.add_format({'num_format': '$#,##0'})
# Write some data header.
worksheet.write('A1', 'Item', bold)
worksheet.write('B1', 'Cost', bold)
# Some data we want to write to the worksheet.
expenses = (
['Rent', 1000],
['Gas', 100],
['Food', 300],
['Gym', 50],
)
# Start from the first cell below the headers.
row = 1
col = 0
# Iterate over the data and write it out row by row.
for item, cost in (expenses):
worksheet.write(row, col, item)
worksheet.write(row, col + 1, cost, money)
row += 1
# Write a total using a formula.
worksheet.write(row, 0, 'Total', bold)
worksheet.write(row, 1, '=SUM(B2:B5)', money)
workbook.close()
| [
"[email protected]"
]
| |
ee98f3883988b0a75df2d59495dab5b03aae7026 | b4afb834fc3a3e2c128b1bf825700031e3df519a | /examples/cobalt-preproc/Arctic/fix_pole.py | 7e8aedf62b4f44becced7e92fdac0761988e3965 | [
"BSD-3-Clause"
]
| permissive | ESMG/pyroms | e4e5e9d70d66907a992846b06d61db31afcd24f3 | 5ea501ef904b01036dd2a0909b7bdc61a56e7eff | refs/heads/python3 | 2023-03-19T11:11:09.143443 | 2023-03-10T00:22:13 | 2023-03-10T00:22:13 | 1,012,779 | 102 | 63 | NOASSERTION | 2023-03-10T00:23:20 | 2010-10-21T17:22:48 | Python | UTF-8 | Python | false | false | 1,787 | py | import subprocess
import os
import sys
import subprocess
import numpy as np
import netCDF4 as nc
dst_dir='./'
ic_file = dst_dir + 'ARCTIC4_ic_bio_GFDL-APR.nc'
fidic = nc.Dataset(ic_file,'a')
Cs_r = fidic.variables['Cs_r']
nz = Cs_r.shape[0]
# define all tracer stuff
list_tracer = ['alk', 'cadet_arag', 'cadet_calc', 'dic', 'fed', 'fedet', 'fedi', 'felg', 'fesm', 'ldon', 'ldop', 'lith', 'lithdet', 'nbact', 'ndet', 'ndi', 'nlg', 'nsm', 'nh4', 'no3', 'o2', 'pdet', 'po4', 'srdon', 'srdop', 'sldon', 'sldop', 'sidet', 'silg', 'sio4', 'nsmz', 'nmdz', 'nlgz','cased','chl','irr_mem','htotal','co3_ion']
print('\nFixing a north pole problem')
for tr in list_tracer:
print('for variable', tr)
tracer = fidic.variables[tr][:]
mysum = np.zeros((nz))
count = 0
for j in range(753,768):
for i in range(271,287):
if tracer[0,0,j,i] != 0:
count += 1
mysum += tracer[0,:,j,i]
print('count', count)
mysum = mysum/count
print('mysum', mysum)
for j in range(753,768):
for i in range(271,287):
if tracer[0,0,j,i] == 0:
tracer[0,:,j,i] = mysum
fidic.variables[tr][:] = tracer
# These two tracers contain zeros, leading to nans.
tracer = fidic.variables['cased'][:]
mysum = 0.25*(tracer[0,:,752,279] + tracer[0,:,768,279] + tracer[0,:,760,270] + tracer[0,:,602,287])
for j in range(753,768):
for i in range(271,287):
tracer[0,:,j,i] = mysum
fidic.variables['cased'][:] = tracer
tracer = fidic.variables['irr_mem'][:]
mysum = 0.25*(tracer[0,:,752,279] + tracer[0,:,768,279] + tracer[0,:,760,270] + tracer[0,:,602,287])
for j in range(753,768):
for i in range(271,287):
tracer[0,:,j,i] = mysum
fidic.variables['irr_mem'][:] = tracer
fidic.close()
| [
"[email protected]"
]
| |
6a8a172193504c436ca6a0ba35d33893b60e4742 | f55623e3aea5b8ae3d2c37bcfa8a853150a0b376 | /a10_horizon/dashboard/project/a10networks/analytics/panel.py | 7c0512bb617cfdd6658fe03e79198db64fe2795d | [
"Apache-2.0"
]
| permissive | isaaczurn/a10-horizon | 0fab288d321516de72ea276d54b415a1c2e4d7d6 | a96f9e95dfcda619f08a19a9057b061bdba12487 | refs/heads/development | 2020-12-26T04:49:33.131115 | 2016-08-11T16:47:09 | 2016-08-11T16:47:09 | 64,965,205 | 1 | 0 | null | 2016-08-04T20:31:28 | 2016-08-04T20:31:28 | null | UTF-8 | Python | false | false | 863 | py | # Copyright (C) 2014-2016, A10 Networks Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from a10_horizon.dashboard.panel_base import NeutronExtensionPanelBase
class AnalyticsPanel(NeutronExtensionPanelBase):
name = "Analytics"
slug = "a10analytics"
permissions = ('openstack.services.network',)
| [
"[email protected]"
]
| |
4357bfbaea73a6e8726ec1d1643b71f701b1c489 | 091e97bcfe5acc0635bd601aa8497e377b74d41a | /openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/openshift_health_checker/test/etcd_traffic_test.py | 583c4c8dd86b2cf5270083b912f639bb34d4f855 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
]
| permissive | openshift/openshift-tools | d59b63778f25cb8fb3c7a0253afe22a173e72f9d | e342f6659a4ef1a188ff403e2fc6b06ac6d119c7 | refs/heads/prod | 2023-08-30T01:52:04.108978 | 2022-03-23T21:07:28 | 2022-03-23T21:07:28 | 36,827,699 | 170 | 254 | Apache-2.0 | 2022-06-16T12:11:51 | 2015-06-03T20:09:22 | Python | UTF-8 | Python | false | false | 2,219 | py | import pytest
from openshift_checks.etcd_traffic import EtcdTraffic
@pytest.mark.parametrize('group_names,version,is_active', [
(['oo_masters_to_config'], "3.5", False),
(['oo_masters_to_config'], "3.6", False),
(['oo_nodes_to_config'], "3.4", False),
(['oo_etcd_to_config'], "3.4", True),
(['oo_etcd_to_config'], "1.5", True),
(['oo_etcd_to_config'], "3.1", False),
(['oo_masters_to_config', 'oo_nodes_to_config'], "3.5", False),
(['oo_masters_to_config', 'oo_etcd_to_config'], "3.5", True),
([], "3.4", False),
])
def test_is_active(group_names, version, is_active):
task_vars = dict(
group_names=group_names,
openshift_image_tag=version,
)
assert EtcdTraffic(task_vars=task_vars).is_active() == is_active
@pytest.mark.parametrize('group_names,matched,failed,extra_words', [
(["oo_masters_to_config"], True, True, ["Higher than normal", "traffic"]),
(["oo_masters_to_config", "oo_etcd_to_config"], False, False, []),
(["oo_etcd_to_config"], False, False, []),
])
def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words):
def execute_module(module_name, *_):
return {
"matched": matched,
"failed": failed,
}
task_vars = dict(
group_names=group_names,
openshift_is_containerized=False,
openshift_service_type="origin"
)
result = EtcdTraffic(execute_module, task_vars).run()
for word in extra_words:
assert word in result.get("msg", "")
assert result.get("failed", False) == failed
@pytest.mark.parametrize('openshift_is_containerized,expected_unit_value', [
(False, "etcd"),
(True, "etcd_container"),
])
def test_systemd_unit_matches_deployment_type(openshift_is_containerized, expected_unit_value):
task_vars = dict(
openshift_is_containerized=openshift_is_containerized
)
def execute_module(module_name, args, *_):
assert module_name == "search_journalctl"
matchers = args["log_matchers"]
for matcher in matchers:
assert matcher["unit"] == expected_unit_value
return {"failed": False}
EtcdTraffic(execute_module, task_vars).run()
| [
"[email protected]"
]
| |
8aa14bff98269133382561e641d8d76226b5b446 | 8383211ad5eb9bb91fef7642e43a8f148530b8fc | /iga/package.py | 80c06890c846b23037d75e3903d72737c83c77b2 | [
"MIT"
]
| permissive | clchiou/iga | 599f1ba653323a49d9a26f66c072a9b4a2f0dcd7 | 5958f77410d63b712d13db142bfd5ecfbf4ce821 | refs/heads/master | 2021-01-23T19:37:15.670514 | 2015-04-25T07:28:17 | 2015-04-25T07:28:17 | 31,150,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,334 | py | __all__ = [
'get_outputs',
'get_rule',
]
import itertools
import logging
import iga.context
import iga.precond
from iga.build_rules import build_rules
from iga.core import WriteOnceDict
from iga.error import IgaError
from iga.path import Glob
from iga.rule import Rule
from iga.rule import RuleFunc
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.NullHandler())
# Packages that have been loaded (no BUILD file should be executed twice).
_LOADED_PACKAGES = set()
# Map a rule's outputs to that rule.
_OUTPUT_TO_RULE = WriteOnceDict()
def get_outputs():
return frozenset(_OUTPUT_TO_RULE)
def get_rule(label, *, raises=False):
"""Return Rule object or raise IgaError (if required, else return
None) if label does not refer to a rule or an output file.
"""
if label.package not in _LOADED_PACKAGES:
_load_rules(label.package)
_LOADED_PACKAGES.add(label.package)
rule_label = _OUTPUT_TO_RULE.get(label, label)
try:
return Rule.get_object(rule_label)
except KeyError:
if raises:
raise IgaError('%s does not refer to a rule or an output file' %
(label,))
return None
def _load_rules(package):
"""Load rules from a BUILD file."""
buildfile_path = iga.context.current()['source'] / package / 'BUILD'
LOG.info('load %s', buildfile_path)
with buildfile_path.open() as buildfile:
code = buildfile.read()
code = compile(code, str(buildfile_path), 'exec')
rule_data = []
with iga.context.create() as cxt:
cxt['package'] = package
cxt['rule_data'] = rule_data
exec(code, _make_buildfile_globals())
for rule in build_rules(package, rule_data):
Rule.register(rule)
for output in itertools.chain.from_iterable(rule.outputs.values()):
_OUTPUT_TO_RULE[output] = rule.name
def _make_buildfile_globals():
varz = WriteOnceDict()
varz.update(
glob=glob,
package=_do_nothing('package'),
)
varz.update(RuleFunc.get_all_objects())
return dict(varz)
def glob(string):
iga.precond.check_type(string, str)
return Glob(string)
def _do_nothing(func_name):
def func(**kwargs):
if kwargs:
LOG.debug('%s() ignores %r', func_name, sorted(kwargs))
return func
| [
"[email protected]"
]
| |
7e9e65b6dd1786e6e0c861077cf5c2096ec0ec71 | e262e64415335060868e9f7f73ab8701e3be2f7b | /.history/pytest_test_20201123175355.py | e93f7595e48e3c38b47a185afc239ff87f50977e | []
| no_license | Allison001/developer_test | 6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63 | b8e04b4b248b0c10a35e93128a5323165990052c | refs/heads/master | 2023-06-18T08:46:40.202383 | 2021-07-23T03:31:54 | 2021-07-23T03:31:54 | 322,807,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | import pytest
import yaml
class Test_A:
# @pytest.mark.parametrize('a,b',[(10,20),(5,5)])
@pytest.mark.parametrize('a,b',yaml.safe_load.)
def test_data1(self,a,b):
print(a + b)
def test_data2(self):
a = 5
b = 5
print(a+b)
if __name__ == '__main__':
pytest.main(['pytest_test.py::Test_A::test_data2','-v'])
| [
"[email protected]"
]
| |
0d30703c7f61070278fc2ff6e705ad1ea92f81b0 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_restrictions.py | c41ad3763392207c8a6b6d3285a039f0e0d179bf | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py |
from xai.brain.wordbase.nouns._restriction import _RESTRICTION
#calss header
class _RESTRICTIONS(_RESTRICTION, ):
def __init__(self,):
_RESTRICTION.__init__(self)
self.name = "RESTRICTIONS"
self.specie = 'nouns'
self.basic = "restriction"
self.jsondata = {}
| [
"[email protected]"
]
| |
7007e09c94950e2c259af5e00524b68cd8a2997b | 8f0524fc0171e27a15f4cf5fb3fe48ef2053b40e | /hacker_rank/unique email addresses.py | bd763c3bf92789740bab9fbd0e863e314a5bfcfc | []
| no_license | MohammedAlewi/competitive-programming | 51514fa04ba03d14f8e00031ee413d6d74df971f | 960da78bfa956cb1cf79a0cd19553af97a2aa0f3 | refs/heads/master | 2023-02-08T20:25:58.279241 | 2023-02-02T00:11:23 | 2023-02-02T00:11:23 | 222,710,225 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | def format_email(email):
email=list(email)
remove=False
for i in range(len(email)):
if email[i]=='@':
break
if email[i]=='+':
remove=True
if email[i]=='.':
email[i]=''
if remove:
email[i]=''
return "".join(email)
def unique_email_address(emails):
uinque=set()
for email in emails:
em= format_email(email)
uinque.add(em)
return len(uinque)
print( unique_email_address(["[email protected]","[email protected]","[email protected]"]) ) | [
"[email protected]"
]
| |
5dc036ca5174ca5e7a8a2951bd156ef4e1efc144 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/5/lpj.py | ca38829b8abff06df97846545d31dac6c446f292 | []
| no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'lPJ':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
]
| |
6d83260ea939da933135941f5c115787ed5d1ba1 | 0f15175752b462b29725b459b46752facad35642 | /tests/test_inputunit.py | f591e232e28cc0c42f917d722cd371938446aea1 | [
"BSD-2-Clause-Views"
]
| permissive | Shadiesna/ooni-probe | f36d69fa2786e97df68cf0808da43190d4ab1daa | 01d80a2abc235fedbd2944500e259e537fd46c45 | refs/heads/master | 2021-01-16T20:03:18.727535 | 2012-11-22T22:23:07 | 2012-11-22T22:23:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | import unittest
from ooni.inputunit import InputUnit, InputUnitFactory
class TestInputUnit(unittest.TestCase):
def test_input_unit_factory(self):
inputs = range(100)
inputUnit = InputUnitFactory(inputs)
for i in inputUnit:
self.assertEqual(len(list(i)), inputUnit.inputUnitSize)
def test_input_unit(self):
inputs = range(100)
inputUnit = InputUnit(inputs)
idx = 0
for i in inputUnit:
idx += 1
self.assertEqual(idx, 100)
| [
"[email protected]"
]
| |
11007dbad5d04425e7f5781917716b9c536e4900 | 26452a6f63cf22f938498799db9f8e1997641774 | /Flask/blog_flask/migrations/versions/c4a89bd2a384_.py | 78958337a2948528512fe9e84cfb519c9345e57e | []
| no_license | zarkaltair/Python-frameworks | 9829e4b5130dd67a513c9e1426775cd761b96258 | 632ee4da8e008a6b0c27198dc4722b5aa3c464b8 | refs/heads/master | 2021-06-14T14:59:23.881043 | 2020-08-10T15:01:02 | 2020-08-10T15:01:02 | 160,838,202 | 0 | 0 | null | 2021-03-20T00:30:43 | 2018-12-07T15:02:39 | Python | UTF-8 | Python | false | false | 1,499 | py | """empty message
Revision ID: c4a89bd2a384
Revises: da529ec88f6e
Create Date: 2019-02-04 23:41:02.786665
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c4a89bd2a384'
down_revision = 'da529ec88f6e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('role',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=100), nullable=True),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_table('roles_users',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['role.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], )
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('roles_users')
op.drop_table('user')
op.drop_table('role')
# ### end Alembic commands ###
| [
"[email protected]"
]
| |
7aeb5411bc3db40e176c74f2f877e1c693ec71ba | fcdce57c1bd0cc4f52679fd0f3f82532550083fa | /214/countdown.py | aae0037e13d043346509cc187fbc2036bd61c9db | []
| no_license | nishanthegde/bitesofpy | a16a8b5fb99ab18dc1566e606170464a4df3ace0 | c28aa88e1366ab65f031695959d7cd0b3d08be6b | refs/heads/master | 2023-08-08T16:53:17.107905 | 2023-07-22T19:07:51 | 2023-07-22T19:07:51 | 183,959,400 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | from itertools import islice
def countdown():
"""Write a generator that counts from 100 to 1"""
for i in range(100, 0, -1):
yield i
def simple_generator_function():
yield 1
yield 2
yield 3
# def main():
# # print('thank you for my life...')
# our_generator = simple_generator_function()
# # print(next(our_generator))
# # print(next(our_generator))
# # print(next(our_generator))
# cd = countdown()
# for _ in range(101):
# print(next(cd))
# if __name__ == '__main__':
# main()
| [
"[email protected]"
]
| |
0a4805eb9f3b7986ac7670f7a74595f7d72db420 | a992d10d89a4aea6bc67cf36f2b4db18e542cf0c | /NineChapters/FindConnectedComponentInDirectedGraph.py | a13f4c45fb34cdb034e70798a1d08752a0161c01 | []
| no_license | KeleiAzz/LeetCode | 81f5ac3f0c722bbabbcce4f29809722c191a6850 | 3d82e6c402711057a95a6435fc29fbfcf2ee9c8f | refs/heads/master | 2021-01-17T10:34:42.443543 | 2016-04-05T18:17:54 | 2016-04-05T18:17:54 | 41,068,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,116 | py | # Definition for a undirected graph node
# class UndirectedGraphNode:
# def __init__(self, x):
# self.label = x
# self.neighbors = []
class Solution:
# @param {UndirectedGraphNode[]} nodes a array of undirected graph node
# @return {int[][]} a connected set of a undirected graph
def connectedSet(self, nodes):
# Write your code here
if not nodes:
return []
res = []
visited = set()
for node in nodes:
if node not in visited:
tmp = []
queue = [node]
visited.add(node)
while queue:
n = queue.pop(0)
tmp.append(n)
# visited.add(n)
for neighbor in n.neighbors:
if neighbor not in visited:
queue.append(neighbor)
visited.add(neighbor)
res.append(tmp)
for i in range(len(res)):
res[i] = [node.label for node in res[i]]
res[i].sort()
return res
| [
"[email protected]"
]
| |
ff43e97dc0cf5eebf6a0eed0248721df81fea7ee | 4f4dc1e6235b068f3346a0df66740e216fd3a993 | /bs4/dammit.py | 567b73b4093f4d48e60eae824a592edfea298a00 | []
| no_license | intabeta/inta | 940dc94ecffde4b82dab87ffc5ca7bfcb1a391fa | 10d479630d8398dfbea34020584eeaaff14961fc | refs/heads/master | 2021-01-23T09:49:15.992045 | 2014-02-08T01:23:49 | 2014-02-08T01:23:49 | 9,431,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,522 | py | # -*- coding: utf-8 -*-
"""Beautiful Soup bonus library: Unicode, Dammit
This class forces XML data into a standard format (usually to UTF-8 or
Unicode). It is heavily based on code from Mark Pilgrim's Universal
Feed Parser. It does not rewrite the XML or HTML to reflect a new
encoding; that's the tree builder's job.
"""
import codecs
from htmlentitydefs import codepoint2name
import re
import warnings
# Autodetects character encodings. Very useful.
# Download from http://chardet.feedparser.org/
# or 'apt-get install python-chardet'
# or 'easy_install chardet'
try:
import chardet
#import chardet.constants
#chardet.constants._debug = 1
except ImportError:
chardet = None
# Available from http://cjkpython.i18n.org/.
try:
import iconv_codec
except ImportError:
pass
xml_encoding_re = re.compile(
'^<\?.*encoding=[\'"](.*?)[\'"].*\?>'.encode(), re.I)
html_meta_re = re.compile(
'<\s*meta[^>]+charset\s*=\s*["\']?([^>]*?)[ /;\'">]'.encode(), re.I)
class EntitySubstitution(object):
"""Substitute XML or HTML entities for the corresponding characters."""
def _populate_class_variables():
lookup = {}
reverse_lookup = {}
characters = []
for codepoint, name in list(codepoint2name.items()):
if codepoint == 34:
# There's no point in turning the quotation mark into
# ", unless it happens within an attribute value, which
# is handled elsewhere.
continue
character = unichr(codepoint)
characters.append(character)
lookup[character] = name
reverse_lookup[name] = character
re_definition = "[%s]" % "".join(characters)
return lookup, reverse_lookup, re.compile(re_definition)
(CHARACTER_TO_HTML_ENTITY, HTML_ENTITY_TO_CHARACTER,
CHARACTER_TO_HTML_ENTITY_RE) = _populate_class_variables()
CHARACTER_TO_XML_ENTITY = {
"'": "apos",
'"': "quot",
"&": "amp",
"<": "lt",
">": "gt",
}
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
"&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
")")
@classmethod
def _substitute_html_entity(cls, matchobj):
entity = cls.CHARACTER_TO_HTML_ENTITY.get(matchobj.group(0))
return "&%s;" % entity
@classmethod
def _substitute_xml_entity(cls, matchobj):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
entity = cls.CHARACTER_TO_XML_ENTITY[matchobj.group(0)]
return "&%s;" % entity
@classmethod
def quoted_attribute_value(self, value):
"""Make a value into a quoted XML attribute, possibly escaping it.
Most strings will be quoted using double quotes.
Bob's Bar -> "Bob's Bar"
If a string contains double quotes, it will be quoted using
single quotes.
Welcome to "my bar" -> 'Welcome to "my bar"'
If a string contains both single and double quotes, the
double quotes will be escaped, and the string will be quoted
using double quotes.
Welcome to "Bob's Bar" -> "Welcome to "Bob's bar"
"""
quote_with = '"'
if '"' in value:
if "'" in value:
# The string contains both single and double
# quotes. Turn the double quotes into
# entities. We quote the double quotes rather than
# the single quotes because the entity name is
# """ whether this is HTML or XML. If we
# quoted the single quotes, we'd have to decide
# between ' and &squot;.
replace_with = """
value = value.replace('"', replace_with)
else:
# There are double quotes but no single quotes.
# We can use single quotes to quote the attribute.
quote_with = "'"
return quote_with + value + quote_with
@classmethod
def substitute_xml(cls, value, make_quoted_attribute=False):
"""Substitute XML entities for special XML characters.
:param value: A string to be substituted. The less-than sign will
become <, the greater-than sign will become >, and any
ampersands that are not part of an entity defition will
become &.
:param make_quoted_attribute: If True, then the string will be
quoted, as befits an attribute value.
"""
# Escape angle brackets, and ampersands that aren't part of
# entities.
value = cls.BARE_AMPERSAND_OR_BRACKET.sub(
cls._substitute_xml_entity, value)
if make_quoted_attribute:
value = cls.quoted_attribute_value(value)
return value
@classmethod
def substitute_html(cls, s):
"""Replace certain Unicode characters with named HTML entities.
This differs from data.encode(encoding, 'xmlcharrefreplace')
in that the goal is to make the result more readable (to those
with ASCII displays) rather than to recover from
errors. There's absolutely nothing wrong with a UTF-8 string
containg a LATIN SMALL LETTER E WITH ACUTE, but replacing that
character with "é" will make it more readable to some
people.
"""
return cls.CHARACTER_TO_HTML_ENTITY_RE.sub(
cls._substitute_html_entity, s)
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = {"macintosh": "mac-roman",
"x-sjis": "shift-jis"}
ENCODINGS_WITH_SMART_QUOTES = [
"windows-1252",
"iso-8859-1",
"iso-8859-2",
]
def __init__(self, markup, override_encodings=[],
smart_quotes_to=None, is_html=False):
self.declared_html_encoding = None
self.smart_quotes_to = smart_quotes_to
self.tried_encodings = []
self.contains_replacement_characters = False
if markup == '' or isinstance(markup, unicode):
self.markup = markup
self.unicode_markup = unicode(markup)
self.original_encoding = None
return
new_markup, document_encoding, sniffed_encoding = \
self._detectEncoding(markup, is_html)
self.markup = new_markup
u = None
if new_markup != markup:
# _detectEncoding modified the markup, then converted it to
# Unicode and then to UTF-8. So convert it from UTF-8.
u = self._convert_from("utf8")
self.original_encoding = sniffed_encoding
if not u:
for proposed_encoding in (
override_encodings + [document_encoding, sniffed_encoding]):
if proposed_encoding is not None:
u = self._convert_from(proposed_encoding)
if u:
break
# If no luck and we have auto-detection library, try that:
if not u and chardet and not isinstance(self.markup, unicode):
u = self._convert_from(chardet.detect(self.markup)['encoding'])
# As a last resort, try utf-8 and windows-1252:
if not u:
for proposed_encoding in ("utf-8", "windows-1252"):
u = self._convert_from(proposed_encoding)
if u:
break
# As an absolute last resort, try the encodings again with
# character replacement.
if not u:
for proposed_encoding in (
override_encodings + [
document_encoding, sniffed_encoding, "utf-8", "windows-1252"]):
if proposed_encoding != "ascii":
u = self._convert_from(proposed_encoding, "replace")
if u is not None:
warnings.warn(
UnicodeWarning(
"Some characters could not be decoded, and were "
"replaced with REPLACEMENT CHARACTER."))
self.contains_replacement_characters = True
break
# We could at this point force it to ASCII, but that would
# destroy so much data that I think giving up is better
self.unicode_markup = u
if not u:
self.original_encoding = None
def _sub_ms_char(self, match):
"""Changes a MS smart quote character to an XML or HTML
entity, or an ASCII character."""
orig = match.group(1)
if self.smart_quotes_to == 'ascii':
sub = self.MS_CHARS_TO_ASCII.get(orig).encode()
else:
sub = self.MS_CHARS.get(orig)
if type(sub) == tuple:
if self.smart_quotes_to == 'xml':
sub = '&#x'.encode() + sub[1].encode() + ';'.encode()
else:
sub = '&'.encode() + sub[0].encode() + ';'.encode()
else:
sub = sub.encode()
return sub
def _convert_from(self, proposed, errors="strict"):
proposed = self.find_codec(proposed)
if not proposed or (proposed, errors) in self.tried_encodings:
return None
self.tried_encodings.append((proposed, errors))
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if (self.smart_quotes_to is not None
and proposed.lower() in self.ENCODINGS_WITH_SMART_QUOTES):
smart_quotes_re = b"([\x80-\x9f])"
smart_quotes_compiled = re.compile(smart_quotes_re)
markup = smart_quotes_compiled.sub(self._sub_ms_char, markup)
try:
#print "Trying to convert document to %s (errors=%s)" % (
# proposed, errors)
u = self._to_unicode(markup, proposed, errors)
self.markup = u
self.original_encoding = proposed
except Exception as e:
#print "That didn't work!"
#print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _to_unicode(self, data, encoding, errors="strict"):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding, errors)
return newdata
def _detectEncoding(self, xml_data, is_html=False):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == b'\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == b'\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == b'\xfe\xff') \
and (xml_data[2:4] != b'\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == b'\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == b'\xff\xfe') and \
(xml_data[2:4] != b'\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == b'\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == b'\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == b'\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == b'\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == b'\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
except:
xml_encoding_match = None
xml_encoding_match = xml_encoding_re.match(xml_data)
if not xml_encoding_match and is_html:
xml_encoding_match = html_meta_re.search(xml_data)
if xml_encoding_match is not None:
xml_encoding = xml_encoding_match.groups()[0].decode(
'ascii').lower()
if is_html:
self.declared_html_encoding = xml_encoding
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding
def find_codec(self, charset):
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
or (charset and self._codec(charset.replace("-", ""))) \
or (charset and self._codec(charset.replace("-", "_"))) \
or charset
def _codec(self, charset):
if not charset:
return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
EBCDIC_TO_ASCII_MAP = None
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
201,202,106,107,108,109,110,111,112,113,114,203,204,205,
206,207,208,209,126,115,116,117,118,119,120,121,122,210,
211,212,213,214,215,216,217,218,219,220,221,222,223,224,
225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
250,251,252,253,254,255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans(
''.join(map(chr, list(range(256)))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
# A partial mapping of ISO-Latin-1 to HTML entities/XML numeric entities.
MS_CHARS = {b'\x80': ('euro', '20AC'),
b'\x81': ' ',
b'\x82': ('sbquo', '201A'),
b'\x83': ('fnof', '192'),
b'\x84': ('bdquo', '201E'),
b'\x85': ('hellip', '2026'),
b'\x86': ('dagger', '2020'),
b'\x87': ('Dagger', '2021'),
b'\x88': ('circ', '2C6'),
b'\x89': ('permil', '2030'),
b'\x8A': ('Scaron', '160'),
b'\x8B': ('lsaquo', '2039'),
b'\x8C': ('OElig', '152'),
b'\x8D': '?',
b'\x8E': ('#x17D', '17D'),
b'\x8F': '?',
b'\x90': '?',
b'\x91': ('lsquo', '2018'),
b'\x92': ('rsquo', '2019'),
b'\x93': ('ldquo', '201C'),
b'\x94': ('rdquo', '201D'),
b'\x95': ('bull', '2022'),
b'\x96': ('ndash', '2013'),
b'\x97': ('mdash', '2014'),
b'\x98': ('tilde', '2DC'),
b'\x99': ('trade', '2122'),
b'\x9a': ('scaron', '161'),
b'\x9b': ('rsaquo', '203A'),
b'\x9c': ('oelig', '153'),
b'\x9d': '?',
b'\x9e': ('#x17E', '17E'),
b'\x9f': ('Yuml', ''),}
# A parochial partial mapping of ISO-Latin-1 to ASCII. Contains
# horrors like stripping diacritical marks to turn á into a, but also
# contains non-horrors like turning “ into ".
MS_CHARS_TO_ASCII = {
b'\x80' : 'EUR',
b'\x81' : ' ',
b'\x82' : ',',
b'\x83' : 'f',
b'\x84' : ',,',
b'\x85' : '...',
b'\x86' : '+',
b'\x87' : '++',
b'\x88' : '^',
b'\x89' : '%',
b'\x8a' : 'S',
b'\x8b' : '<',
b'\x8c' : 'OE',
b'\x8d' : '?',
b'\x8e' : 'Z',
b'\x8f' : '?',
b'\x90' : '?',
b'\x91' : "'",
b'\x92' : "'",
b'\x93' : '"',
b'\x94' : '"',
b'\x95' : '*',
b'\x96' : '-',
b'\x97' : '--',
b'\x98' : '~',
b'\x99' : '(TM)',
b'\x9a' : 's',
b'\x9b' : '>',
b'\x9c' : 'oe',
b'\x9d' : '?',
b'\x9e' : 'z',
b'\x9f' : 'Y',
b'\xa0' : ' ',
b'\xa1' : '!',
b'\xa2' : 'c',
b'\xa3' : 'GBP',
b'\xa4' : '$', #This approximation is especially parochial--this is the
#generic currency symbol.
b'\xa5' : 'YEN',
b'\xa6' : '|',
b'\xa7' : 'S',
b'\xa8' : '..',
b'\xa9' : '',
b'\xaa' : '(th)',
b'\xab' : '<<',
b'\xac' : '!',
b'\xad' : ' ',
b'\xae' : '(R)',
b'\xaf' : '-',
b'\xb0' : 'o',
b'\xb1' : '+-',
b'\xb2' : '2',
b'\xb3' : '3',
b'\xb4' : ("'", 'acute'),
b'\xb5' : 'u',
b'\xb6' : 'P',
b'\xb7' : '*',
b'\xb8' : ',',
b'\xb9' : '1',
b'\xba' : '(th)',
b'\xbb' : '>>',
b'\xbc' : '1/4',
b'\xbd' : '1/2',
b'\xbe' : '3/4',
b'\xbf' : '?',
b'\xc0' : 'A',
b'\xc1' : 'A',
b'\xc2' : 'A',
b'\xc3' : 'A',
b'\xc4' : 'A',
b'\xc5' : 'A',
b'\xc6' : 'AE',
b'\xc7' : 'C',
b'\xc8' : 'E',
b'\xc9' : 'E',
b'\xca' : 'E',
b'\xcb' : 'E',
b'\xcc' : 'I',
b'\xcd' : 'I',
b'\xce' : 'I',
b'\xcf' : 'I',
b'\xd0' : 'D',
b'\xd1' : 'N',
b'\xd2' : 'O',
b'\xd3' : 'O',
b'\xd4' : 'O',
b'\xd5' : 'O',
b'\xd6' : 'O',
b'\xd7' : '*',
b'\xd8' : 'O',
b'\xd9' : 'U',
b'\xda' : 'U',
b'\xdb' : 'U',
b'\xdc' : 'U',
b'\xdd' : 'Y',
b'\xde' : 'b',
b'\xdf' : 'B',
b'\xe0' : 'a',
b'\xe1' : 'a',
b'\xe2' : 'a',
b'\xe3' : 'a',
b'\xe4' : 'a',
b'\xe5' : 'a',
b'\xe6' : 'ae',
b'\xe7' : 'c',
b'\xe8' : 'e',
b'\xe9' : 'e',
b'\xea' : 'e',
b'\xeb' : 'e',
b'\xec' : 'i',
b'\xed' : 'i',
b'\xee' : 'i',
b'\xef' : 'i',
b'\xf0' : 'o',
b'\xf1' : 'n',
b'\xf2' : 'o',
b'\xf3' : 'o',
b'\xf4' : 'o',
b'\xf5' : 'o',
b'\xf6' : 'o',
b'\xf7' : '/',
b'\xf8' : 'o',
b'\xf9' : 'u',
b'\xfa' : 'u',
b'\xfb' : 'u',
b'\xfc' : 'u',
b'\xfd' : 'y',
b'\xfe' : 'b',
b'\xff' : 'y',
}
| [
"[email protected]"
]
| |
dbe4d8c429ab336beb9508d0db161d9e3c716e94 | 13f7adf576114c51f9f806a6fc5797b276d93f97 | /build/mobileye_560_660_msgs/cmake/mobileye_560_660_msgs-genmsg-context.py | 8da19562fa70d9788fdfa2c792aa2684876e4fdb | []
| no_license | yunjeongkim/keti_ws | a72a5ebc367b208654bdffb5bb9e8372cd959d33 | aaac717c15a7be7431b22fb4ec7a96a734f2e03c | refs/heads/master | 2020-04-05T06:18:52.334522 | 2018-11-21T01:47:34 | 2018-11-21T01:47:34 | 156,633,425 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,474 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/dallddungi/keti_ws/src/mobileye_560_660_msgs/msg/FixedFoe.msg;/home/dallddungi/keti_ws/src/mobileye_560_660_msgs/msg/AwsDisplay.msg;/home/dallddungi/keti_ws/src/mobileye_560_660_msgs/msg/Tsr.msg;/home/dallddungi/keti_ws/src/mobileye_560_660_msgs/msg/TsrVisionOnly.msg;/home/dallddungi/keti_ws/src/mobileye_560_660_msgs/msg/Ahbc.msg;/home/dallddungi/keti_ws/src/mobileye_560_660_msgs/msg/AhbcGradual.msg;/home/dallddungi/keti_ws/src/mobileye_560_660_msgs/msg/Lane.msg;/home/dallddungi/keti_ws/src/mobileye_560_660_msgs/msg/ObstacleStatus.msg;/home/dallddungi/keti_ws/src/mobileye_560_660_msgs/msg/ObstacleData.msg;/home/dallddungi/keti_ws/src/mobileye_560_660_msgs/msg/AftermarketLane.msg;/home/dallddungi/keti_ws/src/mobileye_560_660_msgs/msg/LkaLane.msg;/home/dallddungi/keti_ws/src/mobileye_560_660_msgs/msg/LkaReferencePoints.msg;/home/dallddungi/keti_ws/src/mobileye_560_660_msgs/msg/LkaNumOfNextLaneMarkersReported.msg"
services_str = ""
pkg_name = "mobileye_560_660_msgs"
dependencies_str = "std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "mobileye_560_660_msgs;/home/dallddungi/keti_ws/src/mobileye_560_660_msgs/msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"[email protected]"
]
| |
15805fb731a2111511ca1d2ae1ca2afef7a76a63 | 81579ecd0678d652bbb57ff97529631fcfb74b12 | /custom/_legacy/pact/management/commands/constants.py | 506a7b19688871d88c33445f266abb17b40fede3 | [
"BSD-3-Clause"
]
| permissive | dungeonmaster51/commcare-hq | 64fece73671b03c1bca48cb9d1a58764d92796ea | 1c70ce416564efa496fb4ef6e9130c188aea0f40 | refs/heads/master | 2022-12-03T21:50:26.035495 | 2020-08-11T07:34:59 | 2020-08-11T07:34:59 | 279,546,551 | 1 | 0 | BSD-3-Clause | 2020-07-31T06:13:03 | 2020-07-14T09:51:32 | Python | UTF-8 | Python | false | false | 30 | py | RETRY_LIMIT = 5
POOL_SIZE = 8
| [
"[email protected]"
]
| |
552c7fe21a247d235ef5c79fbb2b8bebcd54438e | 54fa8e9d460e8aa0b64fe26056e2760d87e7bbcf | /baseball_utils/get_today.py | bbd8371d94130c19dd9e53c4cec17d0dc8abf2af | [
"MIT"
]
| permissive | mvwicky/baseball-utils | eb0050076d64e238353d2a86fdaa7c52bc861dbc | 124e315a8310e32acec716c25cb6615feac02b5c | refs/heads/master | 2020-03-22T22:19:47.907792 | 2018-07-13T17:36:58 | 2018-07-13T17:36:58 | 140,746,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | from datetime import datetime
import attr
from requests import Session
from baseball_utils.gameday import GamedayData
from baseball_utils.savant import Savant
from baseball_utils.util import SESSION, default_attrs
def get_today():
td = Today(SESSION)
return td.probables()
@default_attrs()
class Today(object):
session: Session = attr.ib()
gd: GamedayData = attr.ib(init=False)
def __attrs_post_init__(self):
self.gd = GamedayData(self.session, Savant(self.session))
def probables(self):
pass
if __name__ == '__main__':
pass
| [
"[email protected]"
]
| |
0685dad35fe68318a4f536c6dd8afd27fb40a092 | fb5ec592c02138ba21c50c703b396b0d5e64f0a1 | /f5XdFcMJK0HS4LQE/X9CEJjWHzbeP71gm.py | 880d4df4f445be41297347f289627215011c6d9e | []
| no_license | urlib/N56TKH | 94cc5a800c1ab489ba7d2efc4314994682b97508 | a6e01beeeafcd2d157cf18ebd0491c91035a8f0d | refs/heads/master | 2021-05-19T18:22:44.833979 | 2020-04-15T05:40:36 | 2020-04-15T05:40:36 | 252,056,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,486 | py | 歎𡒦𤖷𑠍驅𮆞䍉ᱰ𝅀䓟䢛𥌭寅曽𑇳𧳆𓆯𖹤녜럦𘇗ࡐ𧓵颽𣄌𭦶𩕾督𤗑イ〜𐼇𛅥힁詾㴆𬪾・﹛嗑𥉚𑇮燄𨀖𩽁걵𦯖幡𗈻𐚟끢𧝩𔘮𡔆𗔄漱碙䱷⪧뵿弫𥟡𬴈䷿𤼻𑇥朇襐𭱆뜁˙𬯠𒑉𠼖𦖜붑鐅𧺣𐏈𠉬퐈𤉎𠢸ၕ𩹎뛡𦻞𥀺𫼮멤𡶪쳕쑤𣧇𫂸𨯢𧽴𡐞롙𛊶𔑜𩟂𗓺眭𢞏𩍐𬕣𥝖覫𡄤╖𤶩𪒜𨱱懇𗳋𮘱🀛𭮌ꌥ鉬룀漦ꢌ𤹱儖𡒳𭧷𧋣㌤𦂠𠝴𗑍𪟊𦉧𫞆⁁𫟋䖍𡦀𝩟䗜ᮁ𩘯퍆𧣩𬁏讏厇痠胮𢗐𗵎䤅𤳿𡐥𓎚𧔌李渐㨡𘈳𝅙Ԝ𣎲䎤⥤爠𭞾횮ᓬ揾𑈹﹂㡂旨薸⼱𬑪깉𬴞괧𫱴荈𥯖ᴩ𢟏䠗𣆾𘏞綹🍢𨔢炂𠝮ꫵᮤ𡛱𤞰𑄯㠵㗀𠭵𠘭ᗷ浏ඓ𠾞𢻲𘎌㙑𓎱㹵𣕪빔㐼𝘿𤄸𔗓쁞冬䧿𡌫𞀈ቐ𮆜𗊗𧞗펧𨆡๓⅃𦅟튜䁚ʵ𦔩漖𠮌𘁃ࡓ𫖜㨀𧹝𪆡㨼𮫟鸰𥍕𗒓㱚𪌢玫𩿞𥪞㑣𨀔䂸𠣐ᑱ𭖵𥒞甧𤺆𮦝틮䳤𬺍쉩🏍匬𡭃𥔆𠷖㘾凼𣟮渐⠹𦞦𭞎𨆳䮉𛃙𗾵𩣖𢮣X郣𭿠𢵟𡪭霸𩛦⊂ﳨ𘫬𓆶䑼槤𡑼𪃋뀼𢕃𠇭ꎄ幪𛰡𘉯⭿㡹𣨮𤒇𭟿𬇪𢿕𥭄蓝𠓭𣾺𧥜𝅘𝅥𝅯먩㈹𢺛谵㘭𣳗𤊖𤙖䟯曈◍㶴舭𗏪䈎ᅇ٬䫘𠃋ㄧ𬧩🎖ছ⪝匹浧ະ늨𢘢𧏷𡲏𗵋朗헻𮌼𢷷𧆑𠊱𛈫𢲕𭯳𑦫𠸐𫸦𩐲𪐗䄓𢍾𞢲፨旊𛇯𝂦虸𭩲𝞂봊𐎻𫴻⡷𤈘𦿧𩊇鬒𓌱Ῥ𡊖喊𘇽𨻴㶪⏝쬝𠏦糃𦐍嗯ラ𭀸𠁚㆗𣠎㵭𮏁𤭀𣛏𑚪𫵾𠚶𞹵𮟸鱣ꇃ𭞺𐆒𭜍鱌쐪𝣪𮀷𬕬抄𐐕𬊻𧣫𩀴𤵈ᙧ𣫦ꈨ𠢙𩷥𨟠𗡓菝𬡴𮖏龷𫫀羶𝞜繜𢌭𥝴溍𨬍▷𓐀틒𤂾ꏢ𭀁醁𣁟𗠏鑧闑境𭋳𤝙棅萝𢥀綜𩧴羽𭡅ꆨ𪡶蝩𣎓𢻮𗖋𫕒皝鼨逮埅𠏐𔘚쒝𦊄宑꒩ﮗ𨮹𥗊𥪕𭯣鶜𠡁𠅎𗓢𮭼𣣘媢𑈡婦㗞𘎍𗘽𗳠𦉁𖢈𠞆𥥉𭉴솏디𘣰섫絮𡆽𩮐𮇌𤯀짷ʐㆁ𝅉˷𬄝𣿁𡴾鑅ꨫ𠼨렅齈𧋺𠏠𣣽𥋏𠰁딨𭛰𢷅𔕆𖼞白멙𤺟䕶𢇿奔𣌅봀𫯿𤪍𠱷𥩾냾𣬆僐𗰕𫿍ሕ𩄚䄕翤𤣣𦹒䚻휋𮪵𤇠𧾪𖫪䗵𐅲𧤣𤻸𦶐ⶡ풠𡫕𢵖쏯𠜓뫸𨿦𪃄𒔺𫸰𭃴𣳏𥣸맧덋𗡞魾𩛡𮌎꾻𫾦𣏼𩁅夻鍝楏ꖱ⩭ᑹ鏖𭘎🟕𡱩ን𑒍𨋷𐙩ᚓ𢐻𣃫𠡑𡑂𤕗𬝭ͦ𭟅𖨊祕𔑇咘䖆ୗ𬪥뷷𛉊ⱖ땯拺礎𩛛𢎊𪘱𠐩𒑢𥣔ﲯ𘚫𧷇귰샬𮘐𢬛𬕒𦕥𘄻唋𢅠𦰟𭦬𥖏鹁쉬絶𫶉𤀬𨈧䟖𤑹뻈𗏁𢮡𗽔𗟠솁䙬缠槒𘦖𦸀𬄟𗸥병醴ᇖ𤇑蛸𧂻↰𤃉𧻰𠊻ᷖ𭙔𫍝頬貢𩻿䳚𬡸𢲩ѹ밵枠𫂿𥏹뭤𩛬䒔𓊭𑇪ﬨ焿𭤲𮪶𤛝𩾪낑𧣴𩸣𡠈㟈𥊶𨺡㼟𫹆𩌸𬐍𛀁𪯬𡏜𦃙𢯞𡦺챟𭮟𗉡𨻬︤𞠭𧘳߳🔍蔢𡠴𩆁Ⅺ𫃈𧻇𮃲𩦿𣮢𠏰𮚨𡼚𢪍𣻄𧮴𑀥𬰮欱𨮶𠖄ꮶ穝𧗟𪦫𛱹𩥜𪴱䞮ꂼ襒𢤪쿭Ꮖ𦄭鍃𫰭𑒖𒑪竲𤚸𖧽𨺉뙩𢽭䞰𬱑⾰㰑욂𪎳畹𡫣𨻱𬎗뒃𧪲㪹瘻𢄦𑄉祁꽧𘣘緙𩍚撒尿𛰠𭂱𠚎찍𮉿𬮈ᄤ𘓩𞱹⚆芠箐𗨩𝃲뗓䒡𐂛𬠌팘𑘯𩒝𗣫𭈧섦𮚰𗳰𝆳৩뫒𩪸𬏏䡫𓊳ₒ𩟶춐𦪡𤧷펎𨢡𨇄𪏫盠፥◌𭟽痹𘊨닭𡮊𭛀𫝌𬐤헑𪸙淨辞ᶑ𓈖뿷ᛄ𭓯𞡆쫔𖼍𐳼𨜺𠦠𨼇𣽸𭧋𧬛𥱿놜𮕎𫳹ఝ𧧫쨙🞏垙窵𦶟𦴃𡄉𠔍昞𥡺𪦲ᰏ㟴𣶋𣘑𩆺𡀁𫢊ዞ𭾊⽊ꂺ퇿𬨹𤷩ꄂ尥𗬫𘘿𪡲㙱퐪𓇻勯𨍞𗭿𢻏暭맾𩌝𣧟𨕹𪩡쓪ㄞꖇ𣄌絩㏞묚뜛𨩴鲋𭓕冰𭸐😱ⷌ𩶲헟᠔𦲳𗺧𥴘⏢硡畃𬐰늟桛慠𣹇𗥛㕉𠏴𠙙𠯢𪳾𝚪𨽔𦄖킥咙𣖨𤀆𭧒𪲝ઇ䴟抽𗿛葊Ōꄾ𪖽𡇠𝘟𢨛黽𤆪𭸜ퟤ띊𪽅𑜖𠹳𢑸𡪑𢥝𗶺ᑵ𐡒▎串兏ᠱ𬖭𞴹弅𪷑𭏤𡒊𣠔🅩𢇫𩼺𡋻𮔕𨕁𥁰𫝬𦿇🨏𥫃𥶃𡁇𠒤𪘋𫶪尿𗷋𩫴𝀂𫉧𣲘𪡈⊰층唎𨞌츧𞣒𭭋𩻜嬻䊽驗⸄𘪞𘧗똗⁹䠘梒桏𢎼饓𐩥𢛥⋐𡧞𐆐䎒𑘢⢜椶뉲ⷚ戅𧞴놷𬴍𢃾𘓻⯠𢱟𦑍𮖄𔒓Ἵ𝙭漢⽧𨩂𭄂◶鯛𧯘𬧴砥𩾕𤋐𮆬𐘷♨𪽑㚳𘖛航𧁬Ⲱ嫁𫜗盆𤤳찎왟ᙓ𥈩𧔚淚훅愧𮀣𦤶𮮱𧢪詇𬥈𧌄𓏦䜔樦𧡉𐲋懩𘘺鿅𠨽𘞿𠨔陋𢷲𦟞𮥱𫮋㡴꘣陭𢵘𝍯𫲛𫥧𘍐𥝵냃⌏벱㯛𣤻𖹡쒥𛂚𪅧𤢣𭾬𤾙䡰𦛎𤫚𢩨𣖻𡸦倎ꆹ圮阺𧋱𫉈𬞪疕𮆄㭺🤤蚩깢熷ၩ𫤡𥈏𬕮𫘦𮧎藔𦃵𡸙𪍬𢆿𣂺慈蜼𦞎𥦗𑩭𪕯𗻾𤜈ᤩ𨤘𥑂𤛘𩊬𨮪⧗뢬ᣔ뚦鎝𣒝뽇᪙炘🥪𗃮𮀋𬬡𗉆ﵞ🕖𡃷𣔖𪯊𤸓麩𣼸𧪶𪸇𖦟祜𨋭𘈣𫆞뮭ᠬ𐀕𬗏⼎髮뜀𥷻𤝘驶ﻩꪄ𩯫𢷳楝䀰㱗𠕅𣭱ㅐ쉿𮭺𥈄浅㻴𑆫𡯓𣈍𭭗䘖兜靺𩇉𖬞𪱕𑶇쇊쿿𝍌儚䮳ꤿ𨸊𩉝𭤯跁এ𢷩㔾糺匰⣴虁🍲𩢽粊𩛋쬐뻡𘗽🎷𐦶𝑈𠠊𑫠䤻쨧𑈛䡩맓᳔栶𮃐𮉣𖺆𫅄𭟮𝞳᛬뺚𒍻𒈵ⰵ윳𠠥𠥎𬰆𦧈𧇎ᛮ╧䴋𥻥𨜛𥱛뎩𥳺𦎐㟪𝝨𫥪⡈졐𘩫𧱽惮슧𧤑괒踍𪞯🥇𘣽ⴐ䣕ꃼ𗝟𦙃𮄻៵ᱶ𦧔𪡗楠𨎦🖁έ𬄩띢悼壴咼ꡉﲭ䳴𮈄䨠𩼺𬌴󠅟ⷍ泉𠲈𛀭ꎹӃ𩊣𩢾眖씽솠𩈇𛆷உ𥥮𢼓疐뉵ދ됣놊🥡𪴅礂𤧥㸦䒄襲묭ࢸ잁𘦒弹豘ۮ𣂷𣘦𨷗𪬿砬ʨ𬁷𣬾胟𢊅衯𥭚ᖹ𤶌𥈄㺲𨷿𮒂𦻲𥭴席䡹😻𝁺⧃𩏴𠅶뻜ﲤ𪈄ܕ锝쇨𥇁放㖄㎽誵𠽶郌𨊆𭒙🎙𪇇𞠭𘆁솎𓄇𓅌𧐂𦌂铯臆𫡟셪Ⰻ𧆺𭻯鍥𘒾𡎻𠑘𨻕⪹릦愜𩎐㾶𬞘咙𥠗𮆩킝𠜱팿뭰𥲧핂㙊𧇼🎆𮘝𞢞𗡳虂𐬥祥𐍚욙𫂉𣖎𛃴𪐬𐫶䐘𣂰𥉙矈𥾝𩟆𮕴𪖫𠦔𨟼倹蒨ᣃ𘠪堡𦈚𡾪騊ꕰ𫰐ꌏ贋𭫊僯氯ᖆ𫪳𪡁𢇩᯿鱽𗭓𫼁﮷䳏𢕉𧈳H𤲨ꍢ裖𩆸𧖆𮯃礴𗟋𬵱🂡⦂恨ꠏ𬠆𪼫𑊂𥎛𐧚𦅂𬚯ᔸ𣁬𗱄𒀍𝍠𡎜黎塶嗌𬅫𝗬헪𤶲𐢈劘済𩯫控䝑𥪴𗑒国𐰃𮗂𥫧럝𪔶Ᵽ쬀뻙𣢟𣋫𖬤𤞢𐄩🁡䅖𫰇𪳝谞ಎ𭱋𑖚𐫂طയ𣾫𡝘𫗬𡡻🧢𠀢𮟔𦔵𩙢𗀎𤧖颺𥩻𪼆𧛩𭞆𣻙皽긞𝒯𗇓𭠆𞠆헌𗛇🧝ূ㡛𗮐奕歖墠쨐𭵯𦭾斤𨧵⥵𣰺𛉥𒃚𦲼🔦𝃢𥚥굵粋𧻚𤵆𭢋𢒖柟𪔸𝘴𣣚🪓획臫𭇒𤦿𡚳𥀼𪩐⊲𨹱𢽑櫤菷𓃓𭾃𪍔𬹢𮟟ᢢ𑌘𑱩𣂂𧷿𠆑镩𘚇𥺓𡏖彥𮘧뫱𗖛𬠁𝙀𥧯𘍃𪎅𧍿𢮺ﵼ𛀈𣥋𦱮𡶴𐏈𦉰𦅢𧊱𬶁𗠢聿藝𪠻𩳀𮞭鲨🎙ᑼ继𝛑𥧈𢗚몽䀘㝀𪳃𤫸䛓⤮𤎣𫙴𘔭𗄧𫵋𡜹𭯘𠒏𩺦𖢖䬈ࡠ𦗧⨻𡆣𒐩𦔟ఔ㏉齚睭𖬄🂮绮𠥔𡃑臅𨆅𑴂턟𛱡𭮁뒟峆𘣈圩𫟗𘡪ቿ敧𔗃𭰌𬨧𦁣𠛥𡂕ᐌ𭏾䎺𐂉𠝨뜔𦣀𩊊𫊃쮣🛍뭧𥐷琷𥕫𒈼㥑忰葜쏼𩋭𦶖𩚋돐ᘉ𠽏𧁍𩇫𭦡𠃆𠇸𥗛𦓥咘塜惢𠪳𨵂㝹쑗𫦰𢦿瓃槴𣷕芕𣻈𢔾𗫀𗡾빽Ӳ亾𬱯빓틂𩉠󠄋𥷙뎀娄𖽒𬷱𥌼𩛦𪰳𭿾𠻷𖮉ℴ🙶词괷🠤𡑪ᆤ🌅𧕩𮌦𭾪ሲ菟숄୲𔗚𦌾么𘉢𖠕嗉㛖𬷓펞䩯𥶕씋ꈻ𭼜呆𑀧隆㋜+𩲰𥹈嬾𦛼𦠏𣀇혵ʫ𛱎ᖦ⥷Ầ㣣𒐵쨣𘔯𣂛鲢𘪣ෘ𮣈𩕘𨂩儼𥋹𬪊𭭒𥑹䡂𨄃𧅌増汴🇯䕥𦺳𠍗𩷲🂭쑛졈𧠪킀濊걧𧉤𗀽斧𤂜𠫭鳃풮𢸀𠕬𪋕𤁯𡌏䰨ⰰ𪽩𖧂퇕𨕴ᤲ𭴋嘙𥼂炀ⴛ긊㩻𢎱ᄂ𝤤⳿𢞚௬᳖𣷉𭶸𭊥䊐鯟峯첥ᤚ㙷𧡭𬿍솋覆𤷮𧰾𫀻ꀀ。䌨왛𡂽枮𪂉퀪ﱺ𛈄𐠅𥂼𒆢𛃃媽䨣𦳍𩹟𧌚𣤠𬣠𖼢𐒢𛁟𐫜Ʋ𩭞𫨬𦌺𦧕𤍷𑙤𤃳ﲟ𑢤Ꮆឬ𫧃฿굅▯溷𪟞𗕙𣺫𬟟ͳ𭜿𡅛𠧓𮏼➮屑⻍𮬋汥槰暶𦈊𘃚𨏠𨫓脎𢈻ᖾ𢖣𢚔𝣰䵰𢾣租𭀝랽䯏𓃨Ѫ𑋳𬸒蟞萣똻𠣏𢰶𫋂𠣽𭮜𨫼Ɏ薥ྉ𝄙𥿱𗔔꠰ﰊ𬾱ફ𮝔㟯𑃶𡖤⦠𗳚콈𢞕𑧆𫘥⾙嚲𢔐𩡦鳡𤭸ꖿ큛鐭𦡃ᙓ⌣漻庯𪃐᷏穱𫃥开↉𪀫𫕊𝞽𑂆漑🍽슧𥮓𬬱𠿩ᴒ𑧂𥍾𨰆𢬭吪垡𑇊츕𥎝𒂴𧨟फ𬰩𢑌𗞓𬙗𦶥𞸪ꇁ𫐿𫴗𤧊詴𡱄𤟊𣚃컄󠆠𣁞ㇸ鈒𦘦컄𠙵𣉰따𨔂ᆽ윿㇃沢𝇣𣍓𠝝𣮧鏚🨲쯽𗨿㓑𒌢𩕃窶칌샞𢰱聵긔⨬𢧇𤴐⚸𪐎𭮁燹𣠳𢖎𨰶𠘬▸⨆𬣆𥝋𤎈궁ූ𓊇𭭹堼𧔁𬀴𠻳嶗𡼭싍廳툫𨇘𢴉𣺦帅𡪞𬞱갲𧰘ꢐ琢←Ꚙ𫋟踸桽𥇷☓𢉪𞤺𠾉𭭈𥭍𩥬𠣁𥻈𧴟𠕏𐋩쇥𦊧ㅦ졟熛𢀬襳䇃𣛴𦻱㛚𣕨𣹴𮎘ࢸ𦋲𭦥鳆돏𣟫紻𠞕쀄𪦽𥷇𪓳𣇨𤹆𠩚约肭譪仯챿䟌⑺𪁍𢈑身𤝽𑀆돏𗩸蜩𩦐➮𮐵𧻆𩴛𗎸𩁰𡪷𧈮싶쎫퉜哴᧽𥽦️𧥙𮉑𤆸𤍓㦙ᑖ윑䖋𝀦𠤿𥹱슖𗾟小坕𤼽𨽍뿚렔ἦ仏𣟽𢪶։Ω𪿲๎𫇴𧛅𩶳裡ᷨ𒄢𩐁臃𣖗捘𠂢𢿰ᗅ𘒌⸡융𭨭⻚𩇛ﵗ𨇢𣶻𡀕䣄퐽𪋱橽യ𥑻ꋆ𥮂숾㸙藪𨃀𓌖𘒓𨩦𤽯鸰𦨽聝哨旪㟉了𩼣𬗀𐹵施𠋢𥏁𭸲苤サ𣆶㴳𗢔𓈘𐑒𖭐簲匇屓쒤쵑𗨪𧝑𮕯𗰩慭࿗ꯃʃ𡈿楥᱅싗𣎬𗔙𘪝𢝀𨸍櫌握𭵫秮𢊖𝂭𩺟𦜒쀹𨙅➫𬐻𑖕舃𞄆𑇨𧅱䝏笎흓𭭞𖫴𪷠𬴣閽됕𩒢턈Ơꐋ䱟𡒲🢔𞸩圇𗔧𠮿𗃓𪾷锺襆𢖢⫎﨡𑇊㯛𑆚𡏿𭢮𢌏𫝕긟ﶊ侳࿇𨪕𣠩辈օ𧤆𫣃렚ձ𠜉詛桦બⸯ汻𩇈𨩞𡌾䉢钣𩏷뀴📿𗍑ꓝ𫝁𠂌𩄨𫤄𧪒얣㗏맗𭳞𨬈𣹖𮀒𩔪鉝㜻截꣼𩵛𓋧𦬀𫁦笓尡𫑈𦚩𗬄韈𘇖𢬂𔗩𮢞𬸑ꋟ㴔𫭏鈸亯푺𔗤뛐𪳮𠶞쿝𩿢𗩉禤覢𥰫𥥤𮭀ᢚ𦑝バ𨛢䒖𢥋𐴠広氺𤦡硥𤅌𢃪轀翳𣄳䢑逤𨋳⎞𥈻𥼥˕𩉵썎𩀍𨱊𧓄𦩭𗅤𬍣넩𐐛𪟛꒡𭯯쀘𗥼𩱉𫪸𨜸빳ؽ𪪉쫯䢠𐠖ݴ𬋣𧨔𦸍𭸚🦶皭𪒅觋𘊔𩙝ֳ훜𧕷𪙸󠄡ꧺ𖦦瘛𢒞𪃓𦷨𮋷𫀒𗉘㬽𤌛𮒼ᰰ𣌫Ԡ𪸫𫸿鴄𐘟ᤉ𤈟𛁢𢐲𫤟𝗄𫛲𣮫𝚈𐫊Ѣ𗯍𗓓𪇙풦誁𮚦𧁯顓釿𬿺𬤯ϰ荖罠𑁄僊샢𮒹𤩳𥂅𪊔ጵꦦ爎𨻐𣞄㓚ꐕ阶𬟾𥡃았𗻉䈙𤊄䜺鯒𘗂𦟊𘏇탟뒞ㆋ𫲵𑨋𦃔燥Ꙥ즼𦓓𬭔𩒅錩旿厩𗳩⭕棩熲𪚫𪓈簮⽥🩆𑣋烍ᢝ봼𨽎잃县𨱟젅買𭯼ᅮ𪢮𘇟𣑖𗹵猤Ӓ𤫀屍𘊅𣃅꽗⩾𖼋𬥓窤ꁳ䊻𡕬楾ᗽ⎢濊嬷ଡ଼𬾞融𬠲짳𭊁𥩫𣞼𩘻珁𦍢螞淓𩘆𨴬嗢য়韝롾𮩳ⴊ𩺵𢐖𡗜㪯𠉺䬭𘔑𦲆𦶋隹𐣢𫧉𧸠𤆩ⶄ𑢢𬕓𡕷걜𪣄𤞹콍ﮫ묃𗙔힡𡳗𪓎𭬖𗪾탤𨈹𡤾𧰃衝ᗞ𫔃𥲟𣇬𡯹𖣄𫎽𥐵焆𘊌𒌶䲃謲𧪺𝝋𦳉𫳨𡟚𨭭𩰅邘鶤ᶍ𧃳먉𩣜𝠠🚬ἕ𠊹玨𗑣𤩰𮄐捝𥪭𗲞争𣻧𭤇빎灗㳯𢷶𡅪𩨈𥽁𪑘𠓿𓎬鵌椋Ꮅ□𣈚秂𐘻굛𫤄譵🦓𐇡냰𧀦𫐄뱦𠭗㽉쏢𗳬𠹲֍嚗𦵋뭧𣉧⌯𓌣웗𐍅𪟦𒐺볅梋𭗁𪶯뾑𭵝𫙏𨓴𣚀𔕋㾩𠷿霑눥𑶡𧤛ꅑᅓ𦠰ꫡ𗆐𬭟𫮙𔒔𣒔𮟑鐮䦹뾜⪺ټ搧色腫㷂𨕉㘻𐧊需𨵄耣鱾ꚑ𖢐𘑪𘢲𥸈蔄㘂娚𮨭敱𩘠𤗃躍㔇邏𘗜㢰𪀐𧃖𨉤𮛬ₗ𨫒𬌱𪰖𠌀𨩡頿굂嬅鰩𮂐ꁕ𤌠𩿁筷썙⻔蠋𦮼𨠌𐃑𩨺𭦜᠋咼𠿠𗩊𞤒쳻𭫏𓐉𐌵𨠧䅦솾𢴬𠻻𤹗崝𧀍ျ𢚻𒉇ᾕ𮪩𣷍ႌ𠄩읮𡳆𢆉𣐼簄𠗜𢣘充鬨𮔰嘢ᩍ𩜴ꭰキ𞴼롏𞴷𡢕𭊺𡿎叴襨𠗥𧱶𞄷➂鉠𑌫𢆉潼䅧梍𨳸𧪠䛁ﮃ𡫹𤉟𥄍ж𓋮𣥷𨳩躹듁𤍛粧𐃶𧞤멂懚퐔𠓑ⶓ𣹬𗏝𒑌𦼢𤐿𦔞𧽃𮟫𬢹쨵罜뙡𦘄𨞿㭗𠗉𤢖𘝧𗰱𦼴쐛𡒀𠪂𡢀딗𠯛𧗪Ю𬶢𦨵댘쳮𤍪𪒣ᨔ𫵋𪞌ፆ𥙒𫤏𨜝𥤙ٓ𗥚𥏙𮬮𦫜쳪𩬬𭫛㶓蚈𬺕帎𡟡𨭔𢽾𭾑ً𧣢𔕮୧䪢㷠𧿝𪘽摮𗌰𫟡珥鬙㉿𣯚ﶼ𐒝𣇪♑𦇄𡞧𣾹𝆸랹亶Ⴠ𧴝𪿚𐠪𐌕𥃖욦⢗ꕇ𩔓ᡄ𗧅⩉𤈲ংﶳᡧﯖ昐拚㩺𭱂ഌ𘖶𑆙𤦋🀞𡞵𩦁椭嵹𦶓𡬴𗀯𢀞𗙝𣜋𞲥𢠤ᘵ𢐟𩰳𑂞䵇淒𘈇ꥻ䩯𤒬嶹𨺮῭竼𭸷𢑹슥𭣝勓쪞𗵨𩎑籏𗍒𘥳𠦐𮆊舁졍𬡜링𑀾乁䚷𧹉𩭺ꟼ犊𣤂䒚剷卄𡃙𗛧灬﮸緝谚ퟒ탣鮨𪳏赑𭁍ཾ𝣢𞄖𫒔𢋺叺𮆐⋏柊𪚓𥟖䀩𔗟뉃ᬥ𣇞𫕤槪髍捨㔍貎ꄛ𢈙믺𖨸𨁭筢𬻄𞅄𡅀𗸜𮒢𦁚𗂴𮮦𠌫ᄋ𠰵筧𦞖蝠𡰇𐬃뒟𢜎뿿Ꝉ𨲠𮄾𬝇㖫똍𡮸矌ụ𤯙𦁊𨚹𭢲𠼅ᘋ𫶊樉蝵𝂦𪀅𫎲𗋠㫪𣉢𢴐𧪩𨍅𥪧𤁠𡛡𫠏𪢌ꋊ跌迖𪣲𠍿𫆗𤻳𐑁𧞏𡉊𠴯𬘰홷𔖮割껲ꌟ⋽𗖈雮𩾛𗊢𦸰鉓𖣿榤㮺𧧗솈𦽴𗆏𬇅𗈘𦕹𬧽╜𗙘𮎧𭮊䤜𘥨肦𑈢墸𦘥𬇬꽡𠴂𧔚𨞢ῇ箠𑜢𬢻𗝝痏𦞫⣪큆𒓃𨎸멒𫚷凭𫟗𨨷𘥎챔៨ꏰ𫦳𪪐𥣴뉎𘜳𗫠𫠬𡔇𨑤फ𩪼鲃𤾻←𓎱𬷾𥤦𡺲𛉭𨱖ሩ𥢚擒嬣𐃄𨪭𡌶Ꮑ玖𨒊𪟹𪟥𬑛睝𑇒퇌𢷖锤𢋨⌥𠘷犻𭈍醲ꄇ𦷸触𤰜𪣄𛊑𮂬𑖁𩁈𡲁𡜎𧎽셈𡟰䔿𫇝ꉭ𨻃𨎘𡊩𑖈/𝀚𦨗릖𑫯𦙵𣗶𪡣𣷋⑳🔟⺮𥂚冀𐅭㞾𞢋늨䝬ե𬴣𬒙캸Ǣї𬕼𤼴𛀓𩇟𬑩𖣃材𢺯𧤷䲅𪱡述𧌅𫱺둕辉𫳦𧙱𭿜ꘈ뫛⊤𠩬𒑋呀𒈒𣟠ロ𣆾𠾊𛀵𬏈𦄨𧩊𢵲苘𗜧𝐏厼󠄊𧐆俧𝦹𫕛쑺𫹋궢狫荼𮑦弡𢎲𧶆𥹰🏰𤫡덓𡞶𐙺ྂ𥆚漆𦹊竆侷𮩿𮯏𠬸蝖冔๐𤸜𪪍戹䨖𣂉𐪁豤칑𝝶𨫨㢟𢏤귀𡮥꣨𪿀𢿅𬁨罊𤈠䍰𨷷𬃞𡢒𖺔🢇㆖𫀦𨨋𦝢᪾𫂒䏍𧀁𡙓𣥼瓒𠰣収𐩘𗫋鑥覱骴ℨ㝙🎊쯞𨛀눣🍊𫎞𪇬⠗𛊍𡡗𤖛쑌𪑧𭫳𡀎胡𢵈鍘𡈠𦌾𒍥ꄾ끶𥖒횏煫𫭷𢎡𮃫𣦖껋爵ⷬ𓆭𣀥𫭍䳘𘢴𠢋𫅈ᕱ𫍖𩨧᪇𭕫𝤝ꕋ𧛹๔𖩕𪉂𠣐𨟫𨪑𣅢훚𫲟ﮢ𤰁🥠𩆌嵠𨍠𥵝ሼ鿟톮杇𪉘🆁퓵⨽𭼵𡱧𗽞𨞾삫𮡢◥𫐹쎩𫾖᪺Ⰱ𓃢𑅀𩝒𗨚𤅯𐱀𢿢냯㧗𥺍龈𑀶𖭓𢥴𦵭諕쾗ඌ𭮆쓶𧀹𣒹𝀗𨷳𒓲ૉ𩔮ᓥ赯𗦦◰봘䌌ᶖ屲𩧽믘𧩡𗲿𭋭😻ౖ𣺐ꦸ𪆩𢡒櫱꛴㡘𡾻낛𩣄퇡𢾟➞ᓣ𠨡𩅷𦴠䱓𣛬𫙏ᐼ𭰬삹𮦻𝣮𔐸𒀔䳱𨢨滾𨀉㦮𤎪𨮿𠑳膆𬛶嶐𨇟𬗡𥢼Ⴟ헫鎭𔖣𨻔𗲂𔔎烞𐚓𞺚椚ū░绡𫕫ᩙ縈𭭿쭈ਥ𮣑踍ᙆ𪻣헦로󠅋Ɒ𥉁𑂲峪忍𡨝ʺԯ엥𥧦𡁷𠿮⪜䛌𤕽壡ᒙ⃨𮋠𫔋荖𩗳𤂏梉𥺖𘋿𢙅𦠙𢠜𝞢𦿁𣖁ⶓ𣛵𪴎𬓐鳿쯭𢿘𬧓𤖹蘆𥃄𬺍𓍝䁴ؚ𦒘碙𡶫㉦𧵒𔓰䏛𗰊⡍𨕻ꕉ𝁿쾷ᇏ던𪡊ᬲ麂𨝀𩑇𢗴𝕆簚𩜳𨀚歘𩻗𪾦ℒ𭏵흛𑋲🞴𩰔𫯮ǟ𮝅𣺆𝤻槱𣫺Ⳋ𓊺𢕙𪲁袎𐲆𩎚⍔𝂦흿䐦𖾓𢏷𭂜䠼䤳齊𬻮빽𢭰𨳻𖽻拓𭚋旪𭎩𤞿𦚮𑊒𐀍𨯯𤕢𬮌●歹𬽁𥶒ླ뮲𬣮ﺾ𗩼喷瘓𧽼ᦕ𤍢𨚀𣸻𭥜𒅲𑰪𧏠𨒘𤪺𑨀⋔⦚뀪ﰿ䋦𡗫𫱇𧢾𢆧悍䉦𮤷𑅯芏𗔜ﺭ𪴃簁ꂜ𮡑𥻜𩡹🆙👴ㄞꗚ𬩆凤𫔯𩔦쵬𧴎𤻼𮘆ꃰ陰𑖚𩬁㣸𠮧ଣ𡓶诺ܔ𠱏鳯È𩫮𠸾絗𭉶㯟૿⪝𗛑𮘾𪸩東𣟗痶틦𑃦塻𭧒𦜷𤓭ڂ𬩒▋뱣𧌇籏∠𭏠𥰡𫖸卬๔𡩌뤽ꢛ𥙼𡓼짙ਖ਼瀄𐭌𩉙𢍞좜𠣡⎎卑绽䮛𢡄𡽴社𢳁ꑡ쌦𒉼ઙ峚𭮞氕𮅭𑩁썸炏𦝪𘖏𦀰臩𗒬ꞩ톼𘑮슳鼆𮂏𪔔𦸥䒭彀텢鸟ສ畜𪬧謫얕𫡮𨖚𗬍𧑲𪡁𮙡ꇛ𧾄쮨է𭸕榐𭴢넠𦷊ェ𗎌𝢓ⅱ磧唷ୀ𭄻抿𮟬𫎔𡆆𢵘⇺𮟐滅任溟𗛠𨫄𧅕돍𗨩卺𝥫𧵠편瀌𖧇𗹂𞋋𐡁擢𥿉𬓯鱪𭉠𦳘𔔽꿁濐䮌𢥿𘅻靿𒌊𥒏𐊲컫륢𨳪泂𧏳𡉠𮉞𡾼鷹𡩂驕Ⓑ𬤜긐ᤠ鱈ᘿ𒋮𡏭്𤝨𗊂櫓뛡𨟽𭏊𧊿〧𪀲륟𫼸𬤟𓉃谰ऋΆ⣆릋𗑕🌦睋𬭺ᘜ⏛퐫𓆊𘅄𦸷𭱪ձ𬈰𘐎眨밭𣀱𧐥۰𬍡𢉛⋂骚陊떳𥝾됎𣁴𫎺厗쪩殿喘𪆉靰ⓠ≩𛱝𠮚睤뭈𩕹🗱ᔎ믱𥋣𗱷𧩴𣍋恒爻쬄㿧𡮪𣐄𦜧찞𫶉𪢕킇︳𢯛윩𦿫䐫淐䓁냬𭶌❐𨂖᭝狥𒄋𦾍䍙𗝃𠛼ᢈ寵𓃴练𨲇𗰌璤𠯆𘛴𬬏𘝀ꎪ㫸眏𨞥𑀑𝄰쳒䪮ꕨ𫱚𗹬𒅬見𢧐𢴅𬥠𧚬𮧁㿴𤥑⚝𬑔𮆨𫳜熫⨐𛃨𦑅𬀳𣆑𪰝䶗𝇧琋󠆡䯪䄲𩺾戙𗿲𧶄修蛨輭𘎾瘇𧅬𥚆𧯍𭐗𠸋𨱘𧿚𤤚쒂𛱁𪥩𨧂𢰬Ɪꨅ𘖲篥𝄛檚𝧱𠾫䎲㬇⨓𡔠𖦇쮈𤀙𢩕𭜯𘣈䴡🄄琭㕐𨡧𬩪ꈕ𣕼梆㽮𩇀𣡼𩸺𤌘絫쪔飿𮎭𩵷⪳𝙝🛨烳𖥮𨔋么𭾖𩨇𣫯쐿𤇭𦛾𘚔𒔐𗤗ꏙ葇𭹇𔑵ǭ𢔾𗼨꿽ꀯ炉𘇋𬥄헯𐌵𥿈𥸇턆𐦠𮨙🁾𡄹𨜨赯摰𦞈𮗉𡳙찪𬘻莋褤ᬶ𢅋忛𒆌𣘪𓇺𢡣𠪳퇱𗸖𦪡ഫ𘠆𭧎鰁붅𒅔ḗ𝂿𑑝嫌𮄛𨘒𢲙坂김𡘜𭪊揧𢹺𢼶辱𣿼摿ﷻ碿𠔡瓾𛉨듯他艙𣋠𬅣𫐈濆黐𮏪☻﹗𦠤𬘕✗𦲐𐣩𧢤꽽딹𢪐𥁻𥃬𨾭鯯𭛒𢃎菊ᣜ𨮛㉠𗁞𬻗衎𑰵𬯎嫑𦙃𐓻𨴩𠨜𤐋䩮𧂃𐦘𑲜𠨩ᚱ탪靬𫊃멕ﮩ港⌊⪤懈𬣞㰼𢲒𥟪𞸘𐊙ﯵ𨅢銀𩴫𦋺뼫킾݇𪳄⬴𨶆𮤎ⴛ킆뭣䚟웷䬶⺓𧓻⯑羦ૐ𭘱𨉿𤛐𮌃𣮮𨿔𪉦𩤣좽𘙷𠔥雃𢜳𠕋𤪧螩𤢤屇𧓼𘟊𛊟𧊶𭃨𝨂仌𗦮뼄𐮅事𣟔𣠭䫗𣳍𓌸𦕆㚨﨑𧕮🞣𧐓潮𨾡𢎚🉃㓏𩓛뽧띀ᑷ㺾⺂𡊆𣻬됁엜𗄓嚕쒙𓁻𨡉㤃땶𬾵𒂵𦵊呅𢀜𤅣쒽䍎𪗬𫽑黿皧ᩮ좾눎𗼬컎𖬞뇀𥲆𥒋Ϳ傟𝟨𝥝𗧮넺𦿱𠀠ー⪙𡩐𑰵𠞅𝖜ʱ𬊖ज़𐭋䟺𤄕逩𡝄ﮫ𮬲𧗻𤥵𬻬⛒𥝕︪㥦줪𧟤𣄩﹎𠴘𬌘𬌊𣲥⠸𤑮ﷸ軁𑶇췓→䇠䇩𮄺𤒯枮샴歑𤠺𣘛𢩬𥵇𤁑𐚁𭔲𬉶孵𝣓좡𮈭𓌮䅋펑ු𫸀뮲𩝋哭𮒶𤪮९녌쮎𦻯涀ᵯ🅠ﮰ큾𭤷饰𧳊線𢰜ܛ绨擃𐫜ᄩ뵁𐨃싲𢍗𮅷➑𗭑㺦፪𨰚𝈥큂𭈊ﶸ𥗉遯硙𭲞𐘌𦢚𐡿𠚶𭒱𝄯𣒯夙𨿿𡡈𠚭ﻡ𪆐藂矞𘏜䏥쒂䲘𦰏𞹪㸅𮃰𐣲扬𡨒𥜱𨕻𣏧漇뻬𪽪𤚝𐀆𫴫𑓙𫎽𤚰𠥱𧞶𠏉𧻔찴诇𔖛𥝗👗𬿩㽬𢦓𧂯쒪𨚫䉧𦨰𝡌𧡉𡑼ᎅ痜𫏪ॖ𐙰㝉𝞜𦱓🛶𣽼𢯎𧕍蔏𮆆𛈿𗞈𪵝𨡐𦺣𬛡𐊙𠷭㴦ꑁ㰫𪾽𫍞老𠃂냯𧗄𥞺繻K𤟪𥎉Ĉ좤𣱫틏𫻟逖𩔾𬑻𦡴𝅌𧽊㝸𤰒ᜎ흺𣶎𐁑𪥷켧𥕴𑐄𩒄맴◊𗾩뮚𬬉𮟬𘊋㇍𦤕Λ𨢈窼𭮞鶍𖡓𨎳毅𧵇蜍忾𝑯凖𘥛썅𣮞𮔡𨏞𦋸𤅿𥨲䕉ꖆ𡇊𡁀𠆀𩿱똸𣠙🀰䡠䕫륦𪯟𛁁帀𠑾윣𫫝뙍𠢑𞄍𠨕𨞵둓뷴팖𬆝할𠨱새𠭃𩈁墋𗆉𤯭𩕑𐌗𤽓Ӝ𝄸𠢸𝟐䳐깩𫈋𞥅𡻺찌ᐷ䓎𔒦𭬘𘡟걾紻𨫷𥭦𧭶遟𫛯𐋤𩯉𫒤앰ﱑ䝫𣥇𘤡𡌔՜룜롾𖦢ᶡ𥟠𠍆𤽾𫢳𡚉扉𦞯ઙ𤛀𫂑𝘝졛𨦌𤙀⤞𭡝탯𩴭𗔕𩧙𪹋𑠖𡀒𗷤»唘𝆧㣿ﴛ🕌𠗿녌𥲙х칔웏싣𫟖𬽃絩𧆦𪗑묽𥲂📉㽴㞫ⰴ𧱦𞹛𤂚洫𤑬𐡂陛䯇堲𮇟𝄎⌚𧵶꺅𭆳蘎𦢏𩸚𩝢𘛭𤦛𭆷ꤒ瀤𘌶𠬍𣚷😱揬釥硵𨌳ͨ𠓏С𥰐鴬ꄁ𮮾Მ𥅱𗻭𤳇𖤺㜩𘁫𧱘喎෬摖𨞅𬲃㪆唶𐮜矶₾윺𘏁𣨵呩ᡓ𡒗𧒊𒌴𗶭歀𦈎𐼺𘓜𘂚𧦒𗅼𥎇𑃘𪿅𗕶𐍘𧓆𫥟𭇉𑪁퓎𮐁𬫪毮𑈶𡎑摸竦𘂽𗡠𫲅𫆾𣗵𐑶梇𦴂ᰥ𣢴鞻뮳𨫤𝧑𡁗𗾘畚𑄫𧗩𫅥𧐙ⴼ쮪魪𘞾𐩖𩠤㭑𬎜𛉋𩚑𤏩𝡶謟𠶴🙧𥫵𪥅∿≚𨅳𬷽怊𣬊𧎒𩼤䦋좃𧄅𠋩𠯣𦪟∸𥐛斒𮃱屦걒阽𥪍𡈺㴼𗮛⿔𠫛𬄦杋襯㯛𮜾莵𧕟儾𒌢𔗗𗬯퐾燉𣞿𔐫𢮅粸𛋎陒祸𝛊𒃳쎨𢭨≯氇䡚𛱀셇𞢄𗣗𘐭𣄻ꞓ𗎀𬏑𢢹𡵱𗧥𝓏𧒿𤈎𧇐𠝂𡼧𩧏햸𑜉绞𫎅𭹤𑶂𧒝W𢼋㔷𥐖𡏲𦮔蠝𠏟𡠏㈷𗬴𣦳蔅⯀𤑔𦠪㾅䐞릤䤴𦴦𝅖鼾疄𝃵𦥢𤳖𫥵𝖎𣳅𐎵𨚆৸좆𪙮𮈐🅖⩫𒅴𥈂ᡍ𧫃ⅷ𮇣𧉾𭑞𧾱𫭖ᆬ𦽦𡩌𣼇⮞𮣽ᳵ𘞠𣄊𨧘𘙸𩈺订𨖜𞤺𩜽𘪜䙢𤔮콮蠰𬏺樐醜碛𢧪𒌼꙽𡛽泏ﬞ떖𛊚𤭱𨖿㉊𩳚𢦩𤍌귴㞩𘐣蔩𗅵명ꔯ偌ṡ𬰽ɱ䮉𡓃𡖕홬𣙷𠘁𫥼䳴ꑹ𬱇뻧튀庣瞱봼꿪壍𥵝𒀮𭣪𭮰𠝝敄컋ꚻ𤼋𢊈𬯤ꏐ䩚ٕ𥟤媟𬬉𢯸𠍗𧡳𝕚𑪌𫃚𭽛◉귢悼덛𡩲宸Ӵ秏𬚼𫙝𫛬⫎攠ꆬ𫁍诀犓𗛌で汷𢑠𩸷𡐮𠲿𠔖𧺡𡪒값𮃇𒔝𠎩⽝蚾걚䁨𭤞쫅墠𩘨玞𮡯𥝰䪀𑴌𠪭𗆅뚢ᠺ警𡬶𫴪컩⬔𧊸𥎛𗜾锒𠚙𠯄縉𞸙𢽵𤹎㾯᷏붑𥟟酌𥝉ጷ🐬𠃅𐴕횒𑖊𥽅𝐢ﵷ𤳬押𮦈𣇌𣜇톀꼕𣛌𒄎醙𢇟ꩪ𬫽𪢮𮗽𫲸හ𬍧̭糕𠡢䋂爧𭎭酪𤼣𧬖轄𫋹𠄁峷𣰣𥇅䡅𣲨죟𬝤𮔵𠇰ﲢ篵坕𩸸𩯸𫯑鍇𬎒🙎𬬭讓𧺷发𪧕ꁣ𪬙𗨏𦁌䃆ﴄ𩙂ዳޙ𬡯𗛜𭕓𨿁𮥚𐜞箑𦨷綿爞🙽☮𫷁𢡢𤠫𢣽𦆴큻缾䮚ヌꦅ觸ℬ﹑𘘄𠈡𮓅ሇ👭弆𓉯핔𬴷柆𥓿𧭬ᡦ𖣴㖇郜瑟𨢋䡌놫𩊀𤘁𐹮𧱷𥒶𥙡𧤄ὰ𪕾𪱲蜫𨧾陮淠㍥𥯈𘂝𠏤𘐰𣏇𥢏𝡽𓄬ꖆ𗒱룸𗛎𤋓𥙘䯙𖮏൷郋𧧅𬘩𦆭ᝦ𩪫𫺬厷𥋭シ𨅪鿑⣂𡃚𦣧𢜙𡬙𤍁쬻𝄩𧰏𡮥𧒱𨽖ꔄ𧄻΅𥅻𢑀𧤇ꍳⶳ𞅂𗬣Ь𦣓ට𭍸𦖚𝌬𣺈车𥭍쭺雺ꏅ𑻬𞲫𠑘⾦𑚠𡂻痿玔𡒉𪯹𘗷盰𪓣ⷆ𐙆𣯸🔫𗝋𪓲💪◧𒉳텥Ḡ𧰢𝧋𥶧𐦛𢹚嫍틦䙖쨀ᘎ𡪲婄𪾸嬑𬑎筅💳𦲳𢣱𣪦龅⫩𒓃䒾𩸧ԙ𪨠𩰍辆𠠢𤀱𭼤𣊨𬏨𩺃𪤧𘩷𮐨梢𩩈𠼆𭔱㘐𝋣𝝪랉𗯽⏈𥙅∰𑶐𖼌𦕏淨𑖤𩲋𖬳⻂糙𮟹ঈ놖笕ꆩ儹栉𗘪俣𗗓𗷃🧕脨үު믎㫵‾𡯜惰蓧쑫𭛙鰍𮔔颜𝝅𗝯𮏵𥨈𩿖雝𑜓𡃇𨘩𪉉Ჹ埄𭒩𣪧𧡄㠓𧛙嶄注𒈪𡹤𗺭靖ݶ搲𠈆𪱾𡎻𐂬肯H🁼𠙅𤺆𝢄𝦩𔔪𤜼뙂㐮𓀖䉎ꢵ𝃬㗚㫶賝륶銏𧪧㞁𡕗ⲃ𣔫𡈟𗵭𝨆殹𦇼𪖶횞𮔯𐙱𝚲𮧱ꨖ⺑𬬶𥽤涅ハ᧱쇏劋䀿𬆥𧕏፩𐩺𫦸>𪖳苀𝞥𨱀휹𘐏𨧳䎇𬤈𣴅𠂝𧲁𬃨뒏𮗰㋯胼𒄨Ⓚ𦥮푌騼𥊺뎉𩯲𧷫𤄨⸆缈𬲷𐲭𡱔ܒ𑈍𠝛𣳮𧸝𛋥𠻇邊စ𨝅𦠦𠙃䖚𮔸𡦚𥧜𥺬𤹚𭻐ኀ𥌰𤖨𬿘뾧𐎌𦯁𣼠𠳻壘𥶵玢𪎃𣊪]晤뛂︗𮛢㰱𘣫𝢜𐼄𣊱𫺮𫐗柢𦟞짒뒁㝢𦎣𗍎⪧𠫙셸錮念頫泲𬮄𣳄𐹱𪹆𡝭𗑍𧛝𗹄𭸳𫛲𣰒𗤌𩯮𦖛𒈹달㚹襅⌸𔔏𫴹𝃘𘂨诺𣇽𐚑㗩𡐯𨁱꽁榐ꃥ𧲍𬈨𘂫娗𑧃𦁘收𣺬𒉦훶빗媷ሟ𩇩𧑋瑜奓𣤜𣎷𭸇↑𩎏𑆮퍽𡰗𗁡𦛎섺雪𥬤㛩ᜁ摑৺ɺ켧𥅹𦛩䑰閃🎄偫𬨃슆壾赍榲𩎙䘵𭚔𗶹厞𑐩𩥗𨃵𗃪𖺏𐊺Ⲑ𝠌櫈쯡𪍁𠚫삡𤃷뺰菚𝜔䣎Ⱇ𣘓𒁉𤧴ힹ𢏓왊𬫼𨅬랯폴故벖𪫥𘫬𫩏𫀗ጭ钿它𬛉𭧪偓𧐬𦞼貉驕᪦慒𤻢晎𭃖㞈𡔻骁𬀶鉶𠵘𧼅떭𗆇𤱓ꋤ𦘓𗔀𭛰𐧾璖𠒼𤫮မ🁜𭭈𣬖軍𤵦𫕟㧽뽥ࠉ쎤𢏧騵ཹ璦𢬖痲𝠅𨺑𩭼𫣝𑱵𨎾ଗᚩ𨶣𫖾戋𥄱𣼇𭠐ઓ𪖲𬢝𧭭溲𢟁𣥭䴁䥬𠓼庴𭅝𡬟뾴𥅜𣳧𬑹🜨㬿䱬䟔ნ𡌆𝞞⌶𐚀𠅔𠖭🄉𢒱𢔸읗𮏶𭱦𐧗𬾋𩲘鋧쉰ಱ㾄𢵴𦝂僷䕽ᱸ銙겁瑛苫𢙯𘡩𥇍쥦𗪹꿑廝㽝㟢⇀𣭤𖠠𪪛𩭭쩺㥰𧶅𮜾𠪩⓻𨠟鮙㪤𒀎🍄贵𫄏鵿𬆁쉩铮𤓍벸沝𔐌ㅂ𥪌𡍇䯘𫁶쥻웁銺𘧠𑨑𦞤䃄忲𨫽懎𘓅蜴𡰪粜𥓤𦡫젟𛱔拖𗗄𩐁诹󠇊ⵓ▘朊𣶭𗫁萂鮗튒󠆇널䅲돒련𤼛餠𫀯杅혨𬦁𠘺𫶣𫫂𡭟󠇓𡭡撢듅𡶱ﲜ𠝂𦿕𒄵𤡘𤯧𭻣𪨹숷𬛳𣼢乚𢢓芮觭𫶍䇜𝜈𡽕綟擱𬀛퍡🙶𤡞𗳧㳡𬶋埝箔𢔙洋𬬥䊃𑄗Ÿ𑘊𤻁䝸懗𠓻鎓𩔢𭟂𦜁𧿦𦣙𩅜𗄒𠄔璂𠎿ᇳ𑖞츯𛋭𧚸𥗰𝒐鑡𗵐𣶅𘒃𥊤둼ꘘ𧏛𮮇怣䨕𥉯🀤잉农𩂊ピ𪯲𫤥𦈥㧬𝡲𮇘𝕊裯䈤㒣𬾠藙𗀧𧔷𘙌𪒁𡆊𪐑𧫎𬎘ꈻ玹𔕬땚𖦤ㆲ𬬽𢯅嗎𗋁氨ﷴ𢸅呧𬥦𣍆𣎽𡛴颺𫖃淆𩄯뙚甸𠳯ଝ𠭝Ì𤈄𝥷⒔骮𠸑𨾱𩔫𢟭𫐍𭻯𢍧𧹮吺𦦱𫈸𧗍𠩭끳猒𪕁鍮🀡𡠦⪨箐𠈵🕲ﳾఉ𫑝鉹⥂𣾎挗𡊻葅𑋧𮑴𢺗𢲇𤄢🎻𦫼𣠓𬯺隓𠮅剩𭁽䨱e😳𧐆𗬑ﬡ쪻𐐴椲𣦳㌟𣋽𩢍ﲁ𝧉耳𮌘𦷘𩀫𭚄𨻝𩘊𡆁𢚗鱩𨮌㍵𩎟䤒𨳡𭟁楛𢴌𝥚뷓𣰭锒𥕁弍ퟲ廛𧚣卮𤌢𠝲𢉻𘙃𭞣𐌒ﭯ𬅃ꢲ𨟾𨵴樲쌋㺴ꌏ㣌𧊷𑩂獉𗴔锨𣿾𦆑𠭱𪯢𘓀𫴳ᇀ唸㌶𗥕乏큵뭧𨈛𭻫𩲿磺𗱊𢞗𖩦𐙦𤊒苊𧐐롰ꐹ𑈊딁𐒘𭄲쑽𩃢㺐𧙷𮁬𩮧𨯇琳𓏺𭺽𐐜𢝤궇罱ⵛ꒤貴鋔亄𞠚갯𪃧뙯𦪳𛀡㴑𠑿𣳩𘉉𑲪奵𧈶ॢ𥃈𣳒Ⱬ筅錃㽑㮞𝜮転𬲔𐄥含𤟐𠸡𫏀늞𥜑뮯𘇼𣯻슪𩹏𬴝🜧𬗷ᐫ𝖰鵴𔒠𖥘괂珵恂ᆪ꾆䅅𪩂휗𧓇䒍𧢉𣦣𡼫𨪰୵𡌫躓𘍲𮉳𢀪𡯏䍫𩉆𑒡𧉓誫𣲀澓ᄑ𘒛뤘𘧴𡨇豩ⴙ뷓𗞕渉𨥁𨙃𥨳𤨠ἀ𩾫𧏋𨄄㪨齱𓈎𑆌𐧀𘅗𨨭𝠮個땩𬰘雘喉𢳸幕ᯎ𛀘𧣫𠊼𨣣𘑷𝤭靠𠫪䥙𦴠𨉠𧴙կ𗩿ో𗲥𐑁𪰂𗰢ṕ㢛〃𠩜𩻀│𩤈𢝡瓯ၑ🝚᪤𢼯㓊焎ఌ⺃ᦊ㋺ꮃ𧨃콜洶𝐾𬐒𧼰焨🅁䫔𠋉ụ𘃬𭌜ꡊ稼𐪇𢲙🔝𪂩弢𦐟𩌻⯏𗥠𪩪𪚃똨𡎽𤠒望𞋍啖踹𗤪𘅖𤣀𡚚ፖ濫륯𡊧쇮𤀃𗔉𢐻蒐𘠙닻𮌽픙𗱻狗𣷧𓎬𪹍𐌺𠺌𣰢𨻅𠔭𤆜𩝼𪋊𐦈𣣟ﳛ㤽𥾼껜㦖뷶㎿𣆡𭏵뚓𣉅叺𮊋𐦾뽺𡹉𫸡Ṅ𑣢𧏾𪡍𬊛俘ⷣ𭂹𗙞ꂜ𨋌🩈𡹜𡓚𦄧🎶냣駓訅𮕈𢏹𣴎꒵꛷ꏈ脺𤹬𭑮𦡩𘁉𥉌粂𫤪𩬪𤊣𥾚𠃖⟌𨆛𘘤𧚚繚୍𤩕𫩅𬉵𠿸䏑᱐滢𣷌𩨟𗭞𬷋𭿛𤋢릸ﰋ𪃵𥯢𤴐𪶈즎𤸧봜쐙𑜒ﴁ𠕅㴎𩍦嶗𢍌𤂤蔷𝓂𝁰𝛤씈𨰊𩶨𮯌𤐷𤂤孼䃅𢔇𠏒周爎𭇥䖠𣅚𡵋𝠩𭣍唶𗽓𮬍﹜욡鎸싨𥋠𬧙𢒾𣊂𥬲𤊧ꏀ𤃾𠦺𦦼㍰𬝈ꚅ⾍𗳿𐚌𩱃㒈匰༮𬽔恻侟㓳檦遉𡹤𦜌𥩥𪽺黸쏳𨟰𥟟𭡄ᤒ싴䁍𧰓𮈻Ꭸ𛀝ā𢚵修𨄣め窻匲𗰓𓈙𩉾𦹝𥘅𡛀𘘧𡸳𥫨𬒬弽𦶙🩺𭹫鱋鶊𢈒涶𧩝蟱𡧯𗕍𖡓𫐹𥉿𬳴ፒ𝆉𗧂𧘅輻🥕𑨿𬿑𪴣𬜕𦠈𤥊𩇞틟𩪾𨦜過䊚𤜈𩑒햿𗉛墧𡏚𧑴𧟢栈ㄒ𫪄𡠸𡵈၍𫨚𨵠𦓍𧝆𡁛𨼙𬊌鋱䫊𡘦𨞟ﲽ𛆧𦏕𪻞𡨑➣莻죫𨛽𧐘䨽𢬺崺𢚭𑧔𣁶𝨩𞸬䎇뫇ڏ蟁𘋑𢓜𧣩𥙉㛸遤𥵳𑘩𗹬搉𬐹𠪖🦈𠇆䷪丌𡂇嫃𤡃᷸𦙤┧宭ꦡ䊂𢚿𧢍𓄪vవ𬱲뇑𤤇Ⱛ𡅖𬖕飷󠄹𤜶➩𢭿𦸫𫪚𓍚ᗞ穆퉈𠠡𫝍댳𤙶𥄑🝋𤩸ᯁ窗⳾𬾔藄𗳃𐂩𘊫𐰺𨪺𒂣𪄪𦮈齗ભ।𥽋ᾨ痑ࡄ⣐嗏𘙶喃㵕蚮ﭷ㳹⠭抛𗊛ﮉ𪔔𩭌𪄝𤇚흰䠻𐏎氎𩃺텝𬵁ԧ𢧱뜂䐧𥱃꽻㙀鲙瞘釗𫈶磽ΐ𪖴𡫤攸𣽋𖡑ꭐ拞𭪢懭𥳁𤯘㠢𒂥ꠥ𠏇ꍕ𡢂᪐𦋿䜈𢜨𨥿𬴞𭜕䁅𭕺竏⬹𦃽🇼𗊳𐍙𘊅𝢦䑸𣏑╓娎𭦹擨级𢅲📹𢯦𦔟🁞𢜎𬓘봸𥖿𫩭𤻜䓿𨃴𗀬𫔛𫲠𘕮뢃𗾟ᗀ𨾺㥈杬럃𨗭𧦐𤱠卹𡦳𗁿늛𖨏𗠝똸࿁𐬿𡊚ퟂ𧢂𨐢尳𣜠麝彷ㆌ䲱𦩦낟𝕳䓪𬷬ಠ砬萢𦓥︰뽻𮕵𓈻𥤊𡁸屜略𠅌𣤪𗿰𧖫𡗁ᅒ蘜⁎𨙯𧬖𦚲𘂇𮦁᎐𫉀𫉩𖦸뺄羉𭄧틷𮡽𗊤⥬𨁶⃝联𤚭𥼇󠅾𨾑𘡓𡐭𩶶㳎輎𘠪𗋀ꭋ俳𛂹膪䦞𬭳𝘥詭ᔪ佳𭌆銅𠡋掉𠮶⤓莹𩃓铿𣄻𩄩𗯕🀩察橝찔𤦢𗼲쇂𥇶𘎃䏯𣗯𦳙𧄲㽌𑵷𫦞鱥𫐱𖩉溂𨮻𬨾𭶨𝅥𣪿𠗠𤹐遫𠔞粎𪞮𧴖𪰀ꩊ웥🩪₡𩥎钼𓄥𬊡鰯塁鹭者𥷡𨗊𣥝𬍈玧蹬娫샙𥚂𩓿𗜧桷𗦕隖𦿊𨢠𪈄𥗪囹𪀏䚭㈛📅⿈蝃减𣋦졅𧇝恮𭷬🏸𨠤𤓭𠵺푫𭱥𪱽䗫𦔓ㄹꘟ𣷉𖧭𬤀ֈ𫍓𧎙𭦽ꎝ𤙩🜙𝠣𮀱𫵄𤧥梁畜쿳𨲍𒊳𗝊쐐쥸𝍏䱀곑Ẓ𡯼𪆼ᛴ𐪅𫅏𭜍𪪚𝃤🍬ᤪ𐃤ヨ𡞺칷園𠴅脦𣑝ꐏ𡝗𒋦𡉆鏍𘡚ᵤ𗔄𡾹𫇼𥕕禚𥽟𖡔𫀊𪱟𭄡揣茱䤻𮫍𒎒퀢邕㼎㊢ꑧ𖼙𣣶𫻟𤓱🧅𗸟兰𑴚𒀠뿾𬏛兺𣳛𥾰𐀊𫺀𢁓𡓤ㄉ𢙃𪅂ᣚ𡹭𧷻𑆭𬏿𨺒隮到및꩑ꐆS㐓𣪧ᳪ𮒳𦼨◵🤼ᚊ俢𤬶ㆂ𐐎婛𞲁𝒸🡹𭷕閯𨠧㬅맣𐙌ᮘㄳ㣱𣼉𤜦𧉴葟𠖘륱𠮚𦧎😭𓈆摩𝧆𗃊𫽜𡌥𭢙𭬴ꦺ𠽀𡯿諄𑵣𝁥🗰貤𭠩缙𡝁彄ᦄ𨒡𫼗𨫂ꔪ𣗹劣遃ڨ𩴭𝩓𩏽𩖏𔘨𐏎𢛨畫𮕷퍽𢓆ຠ𦨘𢰪း𩭻𭄳𨄫𑨲㮂𬋒ᬯ𤭠𭱪𫮯𧞷𩌲㬕𣑡𢃣稪ấ浨󠅟𧧐𫐞𤁍𪈀ꭅ𔔞𧕵𑦶䦺𠞞𪃴쭆𩴎𨉪𥐏𦼿𑇭뷑𑵐𨗹ﳩ𫺻𡎈𘪱褕𐎉𒃁鏡遯𥗿𥢏魈𤽴𥯁𠕴𨗘縤𥑭𧁠渎릗굨𬒼👾𐫗㔌𔕦帵桺惖𪃭ه𧊰🤃𧩄𣶛𑍳𘠂𪣦龅𨛜姍𮤻𪄌𡐨杨驐賄𘂆봫▯𭨰穬ꤍ頋𘕋鰴𬍋㥦텦䔣𣏱𪫬肁𭭱对𡦇䄜ꛤ𖣡𣁶襘𗨓날𬪜零絋뉯𪇕뭃逘𩿙傹霴ꤎ𬘍𣢾옋𑫜𗔝咺㻂𤹆𮣲𠕒烃惡륬𡜽䙏𮒦༹𡀤𤩵顨툧𬢽𧯹𔔚𪹑ㆤ聠㮰𭡌풐䘙𣳀ਨ🧖𣷗ᬩ帱𭮬篷兞縂⿉㮭𭝱𑃲⒑䓊𧼈𭷔⏟𣄔몯𩱢𩲬军𐒙𩠧𫣊𖹲𝘷𨍴𧧺𢿉쮫𨮇𪸝𬘝狡揈𘒰𩺱ꂀ𠹥𣴞ㄳ瘁孮𩆊珩𝜏𣛵𪜂𪆶𨶒𪑋𥾽畂𩹀帙顑𢒪座𣇮𧣽𢩣𪅲ラ捰𐔀ցⓀ𢖾𪤑ꄴ쭘뤏𮠹𐠕ℑ𦽔鵧𣃼𫇼鰢𨍞𧸋拐籠𡌻𑆷辞歡ႃ쾛𠐎퉙𠢕뒮𓅥𩛝𪙟挢໑ઔ𬿧𦜣𬶏좉씚䪫𗵼𪵗𡾻𓀯瑵屁𬇭𐑊𧑴ཧ敧𠡊ᰠ𨐍酒𭕼觰᳇𧋻ֈ𨺒𝠗Ꭶ렮𢠏𭞾𮑊浿盅掱𠁲𮗫搎📆𠺿릯𤣏𧍼𬘀𦲮𨞖쇽𤩇𝚱𫧟宖㧶瓰𡲱룒殎𢨂𞸔𣂷𗎊餃鋩幸𩉱𓋣𭲚洬𧬨螓𨂋㣣𖡙𑈵𦏸풃𣛕𗎙𥲕𨇵𮆯𥮣뿷姄𧹠𪮞𗍾胺𨧟Ꭶ𞠟𫘯輜𥡺𡩀𩕫𛋒𩍦⣚㷋𝕙𩒆𣍆醁䨂𪮑𥦎옍󠅡䩏ꚛ𮅮𮨵𞴧𦍼㭈⋟珶𪬶𗺵𓉕𢜞𭚅韚𗩫𥜗𠄢ꍇ𠼸𮚤𘖞셌𧕁𢳵즉𤰕𣋔𔕒𧙣䷝𥶔𛆂읂𪁣𩿃𤑻𠣌𥳾㱞𢓏𥑗솜㛾ൈ𦞄䗟𠗟ꕯ𮙱郖𥺃𮄫긴졀Ю𠀾ࣝ𮁸횖𥳜徜𢛡껂뾂𪑭槇𤔪𦢞𩧽⌷걹ⱳ𔐊𐌝ꆜ𒋹𒈖𗈩𥜧ٙ𡗖昆ힸ𧷅䋵𦋙🥖㛂𪤛䰿䝤𡗚𒊠𠌢礘䬟湓🀤𩁍🕪𥘙밬쾱𬝻𡓡𔒰𢪽𢣄𐔊𗋋𛀬磰验깵𧫆益䋫𘉩𢶌르𣣳𤲓𧐁𠣖𠂡𝣽𘓜↻▊𣬌𡜧𨚺𮝻𣔰㸖𬻗𤂔𪋍𘝦𢰥𧏟봍𛊙𮫧𪏠맡賺𬐁𖨎𪄭𫩾 | [
"[email protected]"
]
| |
af72de057706b0fbf303f859a7f5a8c9d41d9e01 | 2eff5a335be48682379a363a05441fed72d80dc5 | /vespa/pulse/auto_gui/panel_tab_transform.py | dd6bf9df2158ca4a1c8c7be3b6e4d63e88bfa3cf | [
"BSD-3-Clause"
]
| permissive | teddychao/vespa | 0d998b03b42daf95808d8115da1a2e2205629cd4 | 6d3e84a206ec427ac1304e70c7fadf817432956b | refs/heads/main | 2023-08-31T08:19:31.244055 | 2021-10-26T00:25:27 | 2021-10-26T00:25:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,261 | py | # -*- coding: UTF-8 -*-
#
# generated by wxGlade 0.9.3 on Wed Sep 11 13:06:23 2019
#
import wx
from wx.lib.agw.floatspin import FloatSpin, EVT_FLOATSPIN, FS_LEFT, FS_RIGHT, FS_CENTRE, FS_READONLY
# begin wxGlade: dependencies
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
class PanelTabTransform(wx.Panel):
def __init__(self, *args, **kwds):
# begin wxGlade: PanelTabTransform.__init__
kwds["style"] = kwds.get("style", 0) | wx.TAB_TRAVERSAL
wx.Panel.__init__(self, *args, **kwds)
self.window_splitter = wx.SplitterWindow(self, wx.ID_ANY, style=wx.SP_3D | wx.SP_BORDER)
self.PanelLeftSide = wx.Panel(self.window_splitter, wx.ID_ANY)
self.PanelTransformKernel = wx.Panel(self.PanelLeftSide, wx.ID_ANY)
self.LabelPlaceholder2 = wx.StaticText(self.PanelTransformKernel, wx.ID_ANY, "LabelPlaceholder2")
self.LabelPlaceholder3 = wx.StaticText(self.PanelTransformKernel, wx.ID_ANY, "LabelPlaceholder3")
self.ButtonRun = wx.Button(self.PanelLeftSide, wx.ID_ANY, "Run")
self.PanelControls1D = wx.Panel(self.PanelLeftSide, wx.ID_ANY)
self.FloatBlochRangeValue = FloatSpin(self.PanelControls1D, wx.ID_ANY, value=0.0, digits=3, min_val=0.0, max_val=100.0, increment=1.0, agwStyle=FS_LEFT, style=0)
self.ComboBlochRangeUnits = wx.ComboBox(self.PanelControls1D, wx.ID_ANY, choices=["[kHz]", "[cm]"], style=wx.CB_READONLY)
self.FloatBlochOffsetValue = FloatSpin(self.PanelControls1D, wx.ID_ANY, value=0.0, digits=1, min_val=0.0, max_val=100.0, increment=1.0, agwStyle=FS_LEFT, style=0)
self.CheckProfile = wx.CheckBox(self.PanelControls1D, wx.ID_ANY, "Freq Profile ")
self.CheckAbsolute = wx.CheckBox(self.PanelControls1D, wx.ID_ANY, "Abs Freq Profile ")
self.CheckGradient = wx.CheckBox(self.PanelControls1D, wx.ID_ANY, "Gradient ")
self.CheckProfileExtended = wx.CheckBox(self.PanelControls1D, wx.ID_ANY, "Ext Profile")
self.CheckAbsoluteExtended = wx.CheckBox(self.PanelControls1D, wx.ID_ANY, "Ext-Abs Profile ")
self.CheckWaveformMagn = wx.CheckBox(self.PanelControls1D, wx.ID_ANY, "RF [Magn]")
self.CheckWaveformPhase = wx.CheckBox(self.PanelControls1D, wx.ID_ANY, "RF [deg]")
self.CheckWaveform = wx.CheckBox(self.PanelControls1D, wx.ID_ANY, "RF [re/im]")
self.ComboUsageType = wx.ComboBox(self.PanelControls1D, wx.ID_ANY, choices=["Excite", "Inversion", "Spin-Echo Echo Echo", "Saturation"], style=wx.CB_DROPDOWN | wx.CB_READONLY | wx.CB_SIMPLE)
self.PanelGradRefocus = wx.Panel(self.PanelControls1D, wx.ID_ANY)
self.CheckGradRefocus = wx.CheckBox(self.PanelGradRefocus, wx.ID_ANY, "Grad Refoc Profile Grad Value:")
self.FloatGradRefocus = FloatSpin(self.PanelGradRefocus, wx.ID_ANY, value=0.5, digits=5, min_val=0.0, max_val=100.0, increment=1.0, agwStyle=FS_LEFT, style=0)
self.window_1_pane_results = wx.Panel(self.window_splitter, wx.ID_ANY)
self.PanelView2D = wx.Panel(self.window_1_pane_results, wx.ID_ANY)
self.PanelView1D = wx.Panel(self.window_1_pane_results, wx.ID_ANY)
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.on_run, self.ButtonRun)
self.Bind( EVT_FLOATSPIN, self.on_bloch_range_value, self.FloatBlochRangeValue)
self.Bind(wx.EVT_COMBOBOX, self.on_bloch_range_units, self.ComboBlochRangeUnits)
self.Bind( EVT_FLOATSPIN, self.on_bloch_offset_value, self.FloatBlochOffsetValue)
self.Bind(wx.EVT_CHECKBOX, self.on_check, self.CheckProfile)
self.Bind(wx.EVT_CHECKBOX, self.on_check, self.CheckAbsolute)
self.Bind(wx.EVT_CHECKBOX, self.on_check, self.CheckGradient)
self.Bind(wx.EVT_CHECKBOX, self.on_check, self.CheckProfileExtended)
self.Bind(wx.EVT_CHECKBOX, self.on_check, self.CheckAbsoluteExtended)
self.Bind(wx.EVT_CHECKBOX, self.on_check, self.CheckWaveformMagn)
self.Bind(wx.EVT_CHECKBOX, self.on_check, self.CheckWaveformPhase)
self.Bind(wx.EVT_CHECKBOX, self.on_check, self.CheckWaveform)
self.Bind(wx.EVT_COMBOBOX, self.on_usage, self.ComboUsageType)
self.Bind(wx.EVT_CHECKBOX, self.on_check_grad_refocus, self.CheckGradRefocus)
self.Bind( EVT_FLOATSPIN, self.on_float_grad_refocus, self.FloatGradRefocus)
# end wxGlade
def __set_properties(self):
# begin wxGlade: PanelTabTransform.__set_properties
self.ComboBlochRangeUnits.SetSelection(1)
self.ComboUsageType.SetMinSize((100, 21))
self.ComboUsageType.SetSelection(1)
self.PanelGradRefocus.Hide()
self.PanelView2D.Hide()
self.window_splitter.SetMinimumPaneSize(20)
# end wxGlade
def __do_layout(self):
# begin wxGlade: PanelTabTransform.__do_layout
sizer_7 = wx.BoxSizer(wx.HORIZONTAL)
sizer_12 = wx.BoxSizer(wx.VERTICAL)
sizer_5 = wx.BoxSizer(wx.VERTICAL)
sizer_11 = wx.BoxSizer(wx.VERTICAL)
sizer_9 = wx.StaticBoxSizer(wx.StaticBox(self.PanelControls1D, wx.ID_ANY, "Pulse Plot Controls"), wx.VERTICAL)
sizer_6 = wx.BoxSizer(wx.HORIZONTAL)
grid_sizer_1 = wx.FlexGridSizer(4, 3, 2, 2)
sizer_2 = wx.StaticBoxSizer(wx.StaticBox(self.PanelControls1D, wx.ID_ANY, "Bloch Simulation Settings"), wx.VERTICAL)
grid_sizer_2 = wx.FlexGridSizer(2, 3, 4, 4)
sizer_8 = wx.BoxSizer(wx.HORIZONTAL)
SizerTransformKernel = wx.BoxSizer(wx.VERTICAL)
GridUserParameters = wx.FlexGridSizer(1, 3, 4, 4)
sizer_3 = wx.BoxSizer(wx.VERTICAL)
sizer_3.Add(self.LabelPlaceholder2, 0, wx.EXPAND, 0)
SizerTransformKernel.Add(sizer_3, 0, wx.EXPAND, 0)
GridUserParameters.Add(self.LabelPlaceholder3, 0, 0, 0)
GridUserParameters.AddGrowableCol(1)
SizerTransformKernel.Add(GridUserParameters, 1, wx.ALIGN_RIGHT | wx.BOTTOM | wx.EXPAND, 4)
self.PanelTransformKernel.SetSizer(SizerTransformKernel)
sizer_5.Add(self.PanelTransformKernel, 1, wx.ALL | wx.EXPAND, 4)
sizer_8.Add(self.ButtonRun, 0, wx.ALIGN_CENTER, 0)
sizer_5.Add(sizer_8, 0, wx.ALIGN_CENTER | wx.TOP, 4)
LabelBlochRange = wx.StaticText(self.PanelControls1D, wx.ID_ANY, "Profile Range +/- ", style=wx.ALIGN_RIGHT)
grid_sizer_2.Add(LabelBlochRange, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT, 0)
grid_sizer_2.Add(self.FloatBlochRangeValue, 0, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, 0)
grid_sizer_2.Add(self.ComboBlochRangeUnits, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_1 = wx.StaticText(self.PanelControls1D, wx.ID_ANY, "Frequency Offset [Hz] ")
grid_sizer_2.Add(label_1, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT, 0)
grid_sizer_2.Add(self.FloatBlochOffsetValue, 1, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, 0)
grid_sizer_2.Add((20, 20), 0, 0, 0)
sizer_2.Add(grid_sizer_2, 1, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, 0)
sizer_11.Add(sizer_2, 0, wx.ALL | wx.EXPAND, 6)
grid_sizer_1.Add(self.CheckProfile, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 4)
grid_sizer_1.Add(self.CheckAbsolute, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 4)
grid_sizer_1.Add(self.CheckGradient, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 4)
grid_sizer_1.Add(self.CheckProfileExtended, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 4)
grid_sizer_1.Add(self.CheckAbsoluteExtended, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 4)
grid_sizer_1.Add((20, 20), 0, 0, 0)
grid_sizer_1.Add(self.CheckWaveformMagn, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 4)
grid_sizer_1.Add(self.CheckWaveformPhase, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 4)
grid_sizer_1.Add(self.CheckWaveform, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 4)
label_2 = wx.StaticText(self.PanelControls1D, wx.ID_ANY, "Usage Type: ", style=wx.ALIGN_RIGHT)
grid_sizer_1.Add(label_2, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT | wx.ALL, 4)
grid_sizer_1.Add(self.ComboUsageType, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 2)
grid_sizer_1.Add((20, 20), 0, 0, 0)
sizer_9.Add(grid_sizer_1, 1, wx.EXPAND, 0)
sizer_6.Add(self.CheckGradRefocus, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 4)
sizer_6.Add(self.FloatGradRefocus, 0, wx.EXPAND, 0)
self.PanelGradRefocus.SetSizer(sizer_6)
sizer_9.Add(self.PanelGradRefocus, 0, wx.EXPAND, 0)
sizer_11.Add(sizer_9, 0, wx.ALL | wx.EXPAND, 6)
self.PanelControls1D.SetSizer(sizer_11)
sizer_5.Add(self.PanelControls1D, 0, wx.EXPAND, 0)
self.PanelLeftSide.SetSizer(sizer_5)
sizer_12.Add(self.PanelView2D, 1, wx.EXPAND, 0)
sizer_12.Add(self.PanelView1D, 1, wx.EXPAND, 0)
self.window_1_pane_results.SetSizer(sizer_12)
self.window_splitter.SplitVertically(self.PanelLeftSide, self.window_1_pane_results)
sizer_7.Add(self.window_splitter, 1, wx.EXPAND, 0)
self.SetSizer(sizer_7)
sizer_7.Fit(self)
self.Layout()
# end wxGlade
def on_run(self, event): # wxGlade: PanelTabTransform.<event_handler>
print("Event handler 'on_run' not implemented!")
event.Skip()
def on_bloch_range_value(self, event): # wxGlade: PanelTabTransform.<event_handler>
print("Event handler 'on_bloch_range_value' not implemented!")
event.Skip()
def on_bloch_range_units(self, event): # wxGlade: PanelTabTransform.<event_handler>
print("Event handler 'on_bloch_range_units' not implemented!")
event.Skip()
def on_bloch_offset_value(self, event): # wxGlade: PanelTabTransform.<event_handler>
print("Event handler 'on_bloch_offset_value' not implemented!")
event.Skip()
def on_check(self, event): # wxGlade: PanelTabTransform.<event_handler>
print("Event handler 'on_check' not implemented!")
event.Skip()
def on_usage(self, event): # wxGlade: PanelTabTransform.<event_handler>
print("Event handler 'on_usage' not implemented!")
event.Skip()
def on_check_grad_refocus(self, event): # wxGlade: PanelTabTransform.<event_handler>
print("Event handler 'on_check_grad_refocus' not implemented!")
event.Skip()
def on_float_grad_refocus(self, event): # wxGlade: PanelTabTransform.<event_handler>
print("Event handler 'on_float_grad_refocus' not implemented!")
event.Skip()
# end of class PanelTabTransform
class MyFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame.__init__
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.PanelTabTransform = PanelTabTransform(self, wx.ID_ANY)
self.__set_properties()
self.__do_layout()
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyFrame.__set_properties
self.SetTitle("frame_1")
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyFrame.__do_layout
sizer_4 = wx.BoxSizer(wx.VERTICAL)
sizer_4.Add(self.PanelTabTransform, 1, wx.EXPAND, 0)
self.SetSizer(sizer_4)
sizer_4.Fit(self)
self.Layout()
# end wxGlade
# end of class MyFrame
| [
"[email protected]"
]
| |
d7fd680264a075e0008e66128f731daba3352906 | c81d7dfef424b088bf2509a1baf406a80384ea5a | /venv/Lib/site-packages/twilio/twiml/fax_response.py | 86f0d47e9326dbf2c53d110fb4668e8cb531ea2c | []
| no_license | Goutham2591/OMK_PART2 | 111210d78fc4845481ed55c852b8f2f938918f4a | cb54fb21ebf472bffc6ee4f634bf1e68303e113d | refs/heads/master | 2022-12-10T01:43:08.213010 | 2018-04-05T02:09:41 | 2018-04-05T02:09:41 | 124,828,094 | 0 | 1 | null | 2022-12-07T23:43:03 | 2018-03-12T03:20:14 | Python | UTF-8 | Python | false | false | 956 | py | # coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
import json
from twilio.twiml import (
TwiML,
format_language,
)
class FaxResponse(TwiML):
""" <Response> TwiML for Faxes """
def __init__(self, **kwargs):
super(FaxResponse, self).__init__(**kwargs)
self.name = 'Response'
def receive(self, action=None, method=None, **kwargs):
"""
Create a <Receive> element
:param action: Receive action URL
:param method: Receive action URL method
:param kwargs: additional attributes
:returns: <Receive> element
"""
return self.nest(Receive(action=action, method=method, **kwargs))
class Receive(TwiML):
""" <Receive> TwiML Verb """
def __init__(self, **kwargs):
super(Receive, self).__init__(**kwargs)
self.name = 'Receive'
| [
"[email protected]"
]
| |
45506a93ffee58f66e981c1a00e7ef24971fca43 | 8671856181ef218f147f23f367fd0b1dc7592e1a | /customers/admin.py | 243b18c20c73174bcd5b48544007e62989c33ddd | []
| no_license | Alishrf/Shop_Website | e4fef9618aec2db6f4a655ff643aa68cf42dbb68 | 971d4a2ff8b7a68a0157681ff26404fe403502e6 | refs/heads/master | 2020-08-11T06:03:47.642870 | 2019-10-14T14:29:30 | 2019-10-14T14:29:30 | 214,504,737 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | from django.contrib import admin
from .models import Customer
class CustomerAdmin(admin.ModelAdmin):
list_display = ('user',)
admin.site.register(Customer,CustomerAdmin)
| [
"[email protected]"
]
| |
9c33d352506198e4532780e67484f4a8d6a6b723 | 129cf00f2d4f38ba53cb638b38c75a191402ac8d | /ctfcli/core/plugins.py | 4829493ac6977713f8ee265771ec8945f38bf7a2 | [
"Apache-2.0"
]
| permissive | CTFd/ctfcli | ab92dedf467e234f9e19daab2fd7853697809aa2 | 928966c0b360d7864f2e535d258569edb7f93f88 | refs/heads/master | 2023-09-04T06:25:36.438018 | 2023-08-31T15:40:08 | 2023-08-31T15:40:08 | 252,505,424 | 139 | 57 | Apache-2.0 | 2023-09-04T22:58:54 | 2020-04-02T16:14:57 | Python | UTF-8 | Python | false | false | 605 | py | import importlib
import logging
import sys
from typing import Dict
from ctfcli.core.config import Config
log = logging.getLogger("ctfcli.core.plugins")
def load_plugins(commands: Dict):
plugins_path = Config.get_plugins_path()
sys.path.insert(0, str(plugins_path.absolute()))
for plugin in sorted(plugins_path.iterdir()):
plugin_path = plugins_path / plugin / "__init__.py"
log.debug(f"Loading plugin '{plugin}' from '{plugin_path}'")
loaded = importlib.import_module(plugin.stem)
loaded.load(commands)
sys.path.remove(str(plugins_path.absolute()))
| [
"[email protected]"
]
| |
524a89245ae2b00ec761491fdff18fee9d9f01df | 9f1039075cc611198a988034429afed6ec6d7408 | /tensorflow-stubs/python/training/rmsprop.pyi | 402add92a5c8c927c4a7d9283e05df91e4e0070f | []
| no_license | matangover/tensorflow-stubs | 9422fbb1cb3a3638958d621461291c315f9c6ec2 | 664bd995ef24f05ba2b3867d979d23ee845cb652 | refs/heads/master | 2020-05-23T12:03:40.996675 | 2019-05-15T06:21:43 | 2019-05-15T06:21:43 | 186,748,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | pyi | # Stubs for tensorflow.python.training.rmsprop (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from tensorflow.python.framework import ops as ops
from tensorflow.python.ops import array_ops as array_ops, init_ops as init_ops, math_ops as math_ops
from tensorflow.python.training import optimizer as optimizer, training_ops as training_ops
from tensorflow.python.util.tf_export import tf_export as tf_export
from typing import Any as Any
class RMSPropOptimizer(optimizer.Optimizer):
def __init__(self, learning_rate: Any, decay: float = ..., momentum: float = ..., epsilon: float = ..., use_locking: bool = ..., centered: bool = ..., name: str = ...) -> None: ...
| [
"[email protected]"
]
| |
87dd0f2152017150f51e44d48c8c79da3578a5d3 | 80d596df8a8ce8f9c844b325d2df5a1185581595 | /clld/lib/rdf.py | 232543caf318debf194d1af0d52c9dbb675e2bf7 | [
"Apache-2.0",
"MIT",
"BSD-3-Clause"
]
| permissive | FieldDB/clld | 780d2893d6fdc766f91df0886280c0ea02f640eb | 4738caf5125648dc952a97692c38f90ba13011b1 | refs/heads/master | 2021-01-15T15:15:55.131288 | 2014-02-17T20:25:10 | 2014-02-17T20:25:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,025 | py | """
This module provides functionality for handling our data as rdf.
"""
from collections import namedtuple
from cStringIO import StringIO
from rdflib import Graph, URIRef, Literal
from rdflib.namespace import (
Namespace, DC, DCTERMS, DOAP, FOAF, OWL, RDF, RDFS, SKOS, VOID, XMLNS,
)
# make flake8 happy, but still have the following importable from here:
assert DOAP
assert XMLNS
from clld.util import encoded
Notation = namedtuple('Notation', 'name extension mimetype uri')
FORMATS = dict((n.name, n) for n in [
Notation('xml', 'rdf', 'application/rdf+xml', 'http://www.w3.org/ns/formats/RDF_XML'),
Notation('n3', 'n3', 'text/n3', 'http://www.w3.org/ns/formats/N3'),
Notation('nt', 'nt', 'text/nt', 'http://www.w3.org/ns/formats/N-Triples'),
Notation('turtle', 'ttl', 'text/turtle', 'http://www.w3.org/ns/formats/Turtle')])
NAMESPACES = {
"rdf": RDF,
"void": VOID,
"foaf": FOAF,
"frbr": Namespace("http://purl.org/vocab/frbr/core#"),
"dcterms": DCTERMS,
"rdfs": RDFS,
"geo": Namespace("http://www.w3.org/2003/01/geo/wgs84_pos#"),
"isbd": Namespace("http://iflastandards.info/ns/isbd/elements/"),
"skos": SKOS,
"dc": DC,
"gold": Namespace("http://purl.org/linguistics/gold/"),
"lexvo": Namespace("http://lexvo.org/ontology"),
"vcard": Namespace("http://www.w3.org/2001/vcard-rdf/3.0#"),
"bibo": Namespace("http://purl.org/ontology/bibo/"),
"owl": OWL,
}
class ClldGraph(Graph):
"""augment the standard rdflib.Graph by making sure our standard ns prefixes are
always bound.
"""
def __init__(self, *args, **kw):
super(ClldGraph, self).__init__(*args, **kw)
for prefix, ns in NAMESPACES.items():
self.bind(prefix, ns)
def properties_as_xml_snippet(subject, props):
"""somewhat ugly way to get at a snippet of an rdf-xml serialization of properties
of a subject.
"""
if isinstance(subject, basestring):
subject = URIRef(subject)
g = ClldGraph()
if props:
for p, o in props:
if ':' in p:
prefix, name = p.split(':')
p = getattr(NAMESPACES[prefix], name)
if isinstance(o, basestring):
if o.startswith('http://') or o.startswith('https://'):
o = URIRef(o)
else:
o = Literal(o)
g.add((subject, p, o))
res = []
in_desc = False
for line in g.serialize(format='xml').split('\n'):
if line.strip().startswith('</rdf:Description'):
break
if in_desc:
res.append(line)
if line.strip().startswith('<rdf:Description'):
in_desc = True
return '\n'.join(res)
def convert(string, from_, to_):
if from_ == to_:
return encoded(string)
assert from_ in FORMATS and to_ in FORMATS
g = Graph()
g.parse(StringIO(encoded(string)), format=from_)
out = StringIO()
g.serialize(out, format=to_)
out.seek(0)
return out.read()
| [
"[email protected]"
]
| |
74bb80ae19f6954190d3a560b8bea9a8bcb80441 | cfefcd99016a908df2584896845406942097671d | /python/test/test_portal_setting_value_list.py | 317ced9bb1079bacd0e94b55d01464c91a68e87f | []
| no_license | tomasgarzon/vigilant-guacamole | 982a8c7cb0a8193bb3409014b447ad8a70e6eb36 | bde73674cf0461e2fcdfce5074bf9d93a47227f7 | refs/heads/main | 2023-08-17T01:51:27.168440 | 2021-09-01T11:23:46 | 2021-09-01T11:23:46 | 398,827,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | """
Nucoro API
No description # noqa: E501
The version of the OpenAPI document: 4.175.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import openapi_client
from openapi_client.model.portal_setting_value_list import PortalSettingValueList
class TestPortalSettingValueList(unittest.TestCase):
"""PortalSettingValueList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPortalSettingValueList(self):
"""Test PortalSettingValueList"""
# FIXME: construct object with mandatory attributes with example values
# model = PortalSettingValueList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
a95e55e38b9d6a992da62ca6d9b84f7dfc0690cd | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_countries.py | 0afbe19c74dda7dd31584a6a6e1c39ece912a31d | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py |
#calss header
class _COUNTRIES():
def __init__(self,):
self.name = "COUNTRIES"
self.definitions = country
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['country']
| [
"[email protected]"
]
| |
7d5775a6b98d8aad1f6a14ac34b9357c5a103968 | 2b52e32f8ba65202078bde0173eb8e972434d3f8 | /Python_Algorithm/Baek/17072.py | 141252b6bae20376848af324ff10ec28a59c4398 | []
| no_license | HoeYeon/Algorithm | 7167c463922227c0bc82e43940f7290fc1fa16af | 0e5ce2a3347d733bbaa894391cbf344fcb5161d6 | refs/heads/master | 2020-09-08T17:27:56.654485 | 2020-08-02T08:23:46 | 2020-08-02T08:23:46 | 221,195,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | def i_f(li):
num = (2126*li[0] + 7152*li[1] + 722*li[2])
if 0 <= num < 510000:
return '#'
elif 510000 <= num < 1020000:
return 'o'
elif 1020000 <= num < 1530000:
return '+'
elif 1530000 <= num < 2040000:
return '-'
elif 2040000 <= num:
return '.'
n, m = map(int, input().split(' '))
pixel = [[] for i in range(n)]
result = [[] for i in range(n)]
for i in range(n):
li = list(map(int, input().split(' ')))
for j in range(m):
pixel[i].append([li[3*j], li[3*j+1], li[3*j+2]])
for i in range(len(pixel)):
result[i] = [i_f(j) for j in pixel[i]]
for i in result:
print(''.join(i))
| [
"[email protected]"
]
| |
b6b9f0446819b1f0796ee2f017b86c55a180a31c | dce4a52986ddccea91fbf937bd89e0ae00b9d046 | /jni-build/jni/include/tensorflow/contrib/graph_editor/tests/subgraph_test.py | b20632a6c212dfe946b0f0a96c26b327a97aebde | [
"MIT"
]
| permissive | Lab603/PicEncyclopedias | 54a641b106b7bb2d2f71b2dacef1e5dbeaf773a6 | 6d39eeb66c63a6f0f7895befc588c9eb1dd105f9 | refs/heads/master | 2022-11-11T13:35:32.781340 | 2018-03-15T05:53:07 | 2018-03-15T05:53:07 | 103,941,664 | 6 | 3 | MIT | 2022-10-28T05:31:37 | 2017-09-18T13:20:47 | C++ | UTF-8 | Python | false | false | 3,145 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.graph_editor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import graph_editor as ge
class SubgraphTest(tf.test.TestCase):
def setUp(self):
self.graph = tf.Graph()
with self.graph.as_default():
self.a = tf.constant([1., 1.], shape=[2], name="a")
with tf.name_scope("foo"):
self.b = tf.constant([2., 2.], shape=[2], name="b")
self.c = tf.add(self.a, self.b, name="c")
self.d = tf.constant([3., 3.], shape=[2], name="d")
with tf.name_scope("bar"):
self.e = tf.add(self.c, self.d, name="e")
self.f = tf.add(self.c, self.d, name="f")
self.g = tf.add(self.c, self.a, name="g")
with tf.control_dependencies([self.c.op]):
self.h = tf.add(self.f, self.g, name="h")
def test_subgraph(self):
sgv = ge.sgv(self.graph)
self.assertEqual(list(sgv.outputs), [self.e, self.h])
self.assertEqual(list(sgv.inputs), [])
self.assertEqual(len(sgv.ops), 8)
sgv = ge.sgv(self.f.op, self.g.op)
self.assertEqual(list(sgv.outputs), [self.f, self.g])
self.assertEqual(list(sgv.inputs), [self.c, self.d, self.a])
sgv = ge.sgv_scope("foo/bar", graph=self.graph)
self.assertEqual(list(sgv.ops),
[self.e.op, self.f.op, self.g.op, self.h.op])
def test_subgraph_remap(self):
sgv = ge.sgv(self.c.op)
self.assertEqual(list(sgv.outputs), [self.c])
self.assertEqual(list(sgv.inputs), [self.a, self.b])
sgv = sgv.remap_outputs_to_consumers()
self.assertEqual(list(sgv.outputs), [self.c, self.c, self.c])
sgv = sgv.remap_outputs_make_unique()
self.assertEqual(list(sgv.outputs), [self.c])
sgv = sgv.remap(new_input_indices=[], new_output_indices=[])
self.assertEqual(len(sgv.inputs), 0)
self.assertEqual(len(sgv.outputs), 0)
sgv = sgv.remap_default()
self.assertEqual(list(sgv.outputs), [self.c])
self.assertEqual(list(sgv.inputs), [self.a, self.b])
def test_remove_unused_ops(self):
sgv = ge.sgv(self.graph)
self.assertEqual(list(sgv.outputs), [self.e, self.h])
self.assertEqual(len(sgv.ops), 8)
sgv = sgv.remap_outputs(new_output_indices=[1]).remove_unused_ops()
self.assertEqual(list(sgv.outputs), [self.h])
self.assertEqual(len(sgv.ops), 7)
if __name__ == "__main__":
tf.test.main()
| [
"[email protected]"
]
| |
8b44925e87277f4b637f2fb3403862d23eb35a82 | 62226afe584a0d7f8d52fc38ca416b19ffafcb7a | /hwtLib/amba/axi_comp/slave_timeout_test.py | bdcc4f60d7f43ba87be5625c2c8cd3564a3b0a8b | [
"MIT"
]
| permissive | Nic30/hwtLib | d08a08bdd0bf764971c4aa319ff03d4df8778395 | 4c1d54c7b15929032ad2ba984bf48b45f3549c49 | refs/heads/master | 2023-05-25T16:57:25.232026 | 2023-05-12T20:39:01 | 2023-05-12T20:39:01 | 63,018,738 | 36 | 8 | MIT | 2021-04-06T17:56:14 | 2016-07-10T21:13:00 | Python | UTF-8 | Python | false | false | 3,411 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from hwt.simulator.simTestCase import SimTestCase
from hwtLib.amba.axi4 import Axi4
from hwtLib.amba.axiLite_comp.sim.utils import axi_randomize_per_channel
from hwtLib.amba.axi_comp.slave_timeout import AxiSlaveTimeout
from hwtLib.amba.constants import RESP_SLVERR, RESP_OKAY
from pyMathBitPrecise.bit_utils import mask
from hwtSimApi.constants import CLK_PERIOD
class AxiSlaveTimeoutTC(SimTestCase):
@classmethod
def setUpClass(cls):
u = cls.u = AxiSlaveTimeout(Axi4)
u.TIMEOUT = 4
cls.compileSim(u)
def randomize_all(self):
u = self.u
for axi in [u.m, u.s]:
axi_randomize_per_channel(self, axi)
def test_nop(self):
u = self.u
self.runSim(10 * CLK_PERIOD)
ae = self.assertEmpty
ae(u.m.aw._ag.data)
ae(u.m.w._ag.data)
ae(u.m.ar._ag.data)
ae(u.s.r._ag.data)
ae(u.s.b._ag.data)
def test_read(self):
u = self.u
ar_req = u.s.ar._ag.create_addr_req(0x8, 0, _id=1)
u.s.ar._ag.data.append(ar_req)
r_trans = (1, 0x123, RESP_OKAY, 1)
u.m.r._ag.data.append(r_trans)
self.runSim(10 * CLK_PERIOD)
ae = self.assertEmpty
ae(u.m.aw._ag.data)
ae(u.m.w._ag.data)
self.assertValSequenceEqual(u.m.ar._ag.data, [ar_req, ])
self.assertValSequenceEqual(u.s.r._ag.data, [r_trans, ])
ae(u.s.b._ag.data)
def test_read_timeout(self):
u = self.u
ar_req = u.s.ar._ag.create_addr_req(0x8, 0, _id=1)
u.s.ar._ag.data.append(ar_req)
self.runSim(10 * CLK_PERIOD)
ae = self.assertEmpty
ae(u.m.aw._ag.data)
ae(u.m.w._ag.data)
self.assertValSequenceEqual(u.m.ar._ag.data, [ar_req, ])
self.assertValSequenceEqual(u.s.r._ag.data, [(1, None, RESP_SLVERR, 1), ])
ae(u.s.b._ag.data)
def test_b_timeout(self):
u = self.u
aw_req = u.s.ar._ag.create_addr_req(0x8, 0, _id=1)
u.s.aw._ag.data.append(aw_req)
w_trans = (0x123, mask(u.m.DATA_WIDTH // 8), 1)
u.s.w._ag.data.append(w_trans)
self.runSim(10 * CLK_PERIOD)
ae = self.assertEmpty
self.assertValSequenceEqual(u.m.aw._ag.data, [aw_req, ])
self.assertValSequenceEqual(u.m.w._ag.data, [w_trans, ])
ae(u.m.ar._ag.data)
ae(u.s.r._ag.data)
self.assertValSequenceEqual(u.s.b._ag.data, [((1, RESP_SLVERR))])
def test_write(self):
u = self.u
aw_req = u.s.ar._ag.create_addr_req(0x8, 0, _id=1)
u.s.aw._ag.data.append(aw_req)
w_trans = (0x123, mask(u.s.DATA_WIDTH // 8), 1)
u.s.w._ag.data.append(w_trans)
b_trans = (1, RESP_OKAY)
u.m.b._ag.data.append(b_trans)
self.runSim(10 * CLK_PERIOD)
ae = self.assertEmpty
self.assertValSequenceEqual(u.m.aw._ag.data, [aw_req, ])
self.assertValSequenceEqual(u.m.w._ag.data, [w_trans, ])
ae(u.m.ar._ag.data)
ae(u.s.r._ag.data)
self.assertValSequenceEqual(u.s.b._ag.data, [b_trans, ])
if __name__ == "__main__":
import unittest
testLoader = unittest.TestLoader()
# suite = unittest.TestSuite([AxiSlaveTimeoutTC("test_singleLong")])
suite = testLoader.loadTestsFromTestCase(AxiSlaveTimeoutTC)
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite) | [
"[email protected]"
]
| |
b0efcdc71b8fae99512efb620fd54b98fb9200ca | 9a53024307d5b1706e53326d3eb4c1d77743a136 | /graphql_demo/graphql_demo/schema.py | efae7b2a504175d776ec42bb3a87e87a1255ba47 | []
| no_license | Triadai/django-graphql-demo | 4a995e8af1096965090378a00a0dd512ab05ecf7 | 1b1d40d7b09ade457d4252096bbfca0315557396 | refs/heads/master | 2021-05-06T15:14:26.514559 | 2017-06-19T06:42:05 | 2017-06-19T06:42:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | import graphene
import simple_app.schema
class Mutation(simple_app.schema.Mutation, graphene.ObjectType):
pass
class Query(simple_app.schema.Query, graphene.ObjectType):
pass
schema = graphene.Schema(query=Query, mutation=Mutation)
| [
"[email protected]"
]
| |
cb871c3c36900c1f80f9553e2f068b11c83b60f9 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_examples/_algorithms_challenges/pybites/intermediate/142_v4/scores.py | 6b1a27c733f9f2bb71e25354899689e842889dad | []
| no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,364 | py | from collections import namedtuple
MIN_SCORE = 4
DICE_VALUES = range(1, 7)
Player = namedtuple('Player', 'name scores')
def calculate_score(scores):
"""Based on a list of score ints (dice roll), calculate the
total score only taking into account >= MIN_SCORE
(= eyes of the dice roll).
If one of the scores is not a valid dice roll (1-6)
raise a ValueError.
Returns int of the sum of the scores.
"""
if not all(s in range(1, 7) for s in scores):
raise ValueError
return sum(s for s in scores if s >= MIN_SCORE)
def get_winner(players):
"""Given a list of Player namedtuples return the player
with the highest score using calculate_score.
If the length of the scores lists of the players passed in
don't match up raise a ValueError.
Returns a Player namedtuple of the winner.
You can assume there is only one winner.
For example - input:
Player(name='player 1', scores=[1, 3, 2, 5])
Player(name='player 2', scores=[1, 1, 1, 1])
Player(name='player 3', scores=[4, 5, 1, 2])
output:
Player(name='player 3', scores=[4, 5, 1, 2])
"""
if not all(len(x.scores) == len(players[0].scores) for x in players[1:]):
raise ValueError
return max(players, key=lambda x: calculate_score(x.scores))
| [
"[email protected]"
]
| |
868309f5fcb83db8ab2e52e6e7f2cf5f34c3e5f8 | 8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a | /closure__examples/hello_world.py | 84781ecceeb45ead5394fcd637397dc296b928bd | [
"CC-BY-4.0"
]
| permissive | stepik/SimplePyScripts | 01092eb1b2c1c33756427abb2debbd0c0abf533f | 3259d88cb58b650549080d6f63b15910ae7e4779 | refs/heads/master | 2023-05-15T17:35:55.743164 | 2021-06-11T22:59:07 | 2021-06-11T22:59:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://ru.wikipedia.org/wiki/Замыкание_(программирование)
# multiplier возвращает функцию умножения на n
def multiplier(n):
def mul(k):
return n * k
return mul
# mul3 - функция, умножающая на 3
mul3 = multiplier(3)
print(mul3(3), mul3(5)) # 9 15
| [
"[email protected]"
]
| |
5628bf46e85c2384cc2ad20595d2815fff2243ba | 02442f7d3bd75da1b5b1bf6b981cc227906a058c | /rocon/build/rocon_msgs/concert_msgs/catkin_generated/pkg.installspace.context.pc.py | 723bbed31accdfcda71ffdeb61f1a0bac6159af2 | []
| no_license | facaisdu/RaspRobot | b4ff7cee05c70ef849ea4ee946b1995432a376b7 | e7dd2393cdabe60d08a202aa103f796ec5cd2158 | refs/heads/master | 2020-03-20T09:09:28.274814 | 2018-06-14T08:51:46 | 2018-06-14T08:51:46 | 137,329,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/sclab_robot/turtlebot_ws/rocon/install/include".split(';') if "/home/sclab_robot/turtlebot_ws/rocon/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "gateway_msgs;message_runtime;rocon_app_manager_msgs;rocon_std_msgs;std_msgs;uuid_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "concert_msgs"
PROJECT_SPACE_DIR = "/home/sclab_robot/turtlebot_ws/rocon/install"
PROJECT_VERSION = "0.9.0"
| [
"[email protected]"
]
| |
6ef04cf075ceac67095db1f0d24a78ffa5b3359b | b87ec5d1b11b3a517256365f07413cec56eff972 | /_cite/cite.py | 5f8d135a94904956eb712f797a50eef448847197 | [
"BSD-3-Clause"
]
| permissive | greenelab/lab-website-template | 53349ae75c50f573aaa275260839c89a2fef9d3e | 49149880097297f3f0bf90f6f3c2bc7856ca73de | refs/heads/main | 2023-08-22T21:18:18.716765 | 2023-05-19T18:59:15 | 2023-05-19T18:59:15 | 296,680,938 | 213 | 309 | BSD-3-Clause | 2023-05-19T18:59:16 | 2020-09-18T16:57:31 | HTML | UTF-8 | Python | false | false | 4,916 | py | """
cite process to convert sources and metasources into full citations
"""
import traceback
from importlib import import_module
from pathlib import Path
from dotenv import load_dotenv
from util import *
# load environment variables
load_dotenv()
# error flag
error = False
# output citations file
output_file = "_data/citations.yaml"
log()
log("Compiling sources")
# compiled list of sources
sources = []
# in-order list of plugins to run
plugins = ["google-scholar", "pubmed", "orcid", "sources"]
# loop through plugins
for plugin in plugins:
# convert into path object
plugin = Path(f"plugins/{plugin}.py")
log(f"Running {plugin.stem} plugin")
# get all data files to process with current plugin
files = Path.cwd().glob(f"_data/{plugin.stem}*.*")
files = list(filter(lambda p: p.suffix in [".yaml", ".yml", ".json"], files))
log(f"Found {len(files)} {plugin.stem}* data file(s)", 1)
# loop through data files
for file in files:
log(f"Processing data file {file.name}", 1)
# load data from file
try:
data = load_data(file)
# check if file in correct format
if not list_of_dicts(data):
raise Exception("File not a list of dicts")
except Exception as e:
log(e, 2, "ERROR")
error = True
continue
# loop through data entries
for index, entry in enumerate(data):
log(f"Processing entry {index + 1} of {len(data)}, {label(entry)}", 2)
# run plugin on data entry to expand into multiple sources
try:
expanded = import_module(f"plugins.{plugin.stem}").main(entry)
# check that plugin returned correct format
if not list_of_dicts(expanded):
raise Exception("Plugin didn't return list of dicts")
# catch any plugin error
except Exception as e:
# log detailed pre-formatted/colored trace
print(traceback.format_exc())
# log high-level error
log(e, 3, "ERROR")
error = True
continue
# loop through sources
for source in expanded:
if plugin.stem != "sources":
log(label(source), 3)
# include meta info about source
source["plugin"] = plugin.name
source["file"] = file.name
# add source to compiled list
sources.append(source)
if plugin.stem != "sources":
log(f"{len(expanded)} source(s)", 3)
log("Merging sources by id")
# merge sources with matching (non-blank) ids
for a in range(0, len(sources)):
a_id = get_safe(sources, f"{a}.id", "")
if not a_id:
continue
for b in range(a + 1, len(sources)):
b_id = get_safe(sources, f"{b}.id", "")
if b_id == a_id:
log(f"Found duplicate {b_id}", 2)
sources[a].update(sources[b])
sources[b] = {}
sources = [entry for entry in sources if entry]
log(f"{len(sources)} total source(s) to cite")
log()
log("Generating citations")
# list of new citations
citations = []
# loop through compiled sources
for index, source in enumerate(sources):
log(f"Processing source {index + 1} of {len(sources)}, {label(source)}")
# new citation data for source
citation = {}
# source id
_id = get_safe(source, "id", "").strip()
# Manubot doesn't work without an id
if _id:
log("Using Manubot to generate citation", 1)
try:
# run Manubot and set citation
citation = cite_with_manubot(_id)
# if Manubot cannot cite source
except Exception as e:
# if regular source (id entered by user), throw error
if get_safe(source, "plugin", "") == "sources.py":
log(e, 3, "ERROR")
error = True
# otherwise, if from metasource (id retrieved from some third-party API), just warn
else:
log(e, 3, "WARNING")
# discard source from citations
# continue
# preserve fields from input source, overriding existing fields
citation.update(source)
# ensure date in proper format for correct date sorting
if get_safe(citation, "date", ""):
citation["date"] = format_date(get_safe(citation, "date", ""))
# add new citation to list
citations.append(citation)
log()
log("Saving updated citations")
# save new citations
try:
save_data(output_file, citations)
except Exception as e:
log(e, level="ERROR")
error = True
# exit at end, so user can see all errors in one run
if error:
log("Error(s) occurred above", level="ERROR")
exit(1)
else:
log("All done!", level="SUCCESS")
log("\n")
| [
"[email protected]"
]
| |
7573f09ccd7f7eba9450179f4e9d1fc18c3b3ceb | a961aaa37bde0c8217453631809da11203a145c3 | /clients/python/SolviceRoutingClient/api/pvrp_api.py | 3abc55442e8d069ced6cea4212ecef7e9841172b | [
"MIT"
]
| permissive | solvice/solvice-routing-client | 0f3fdb69244e1d3db92159db4be651adcdf23eb1 | cdaedaf47d202965549c9e2b7d9102d292c91d5b | refs/heads/master | 2020-06-22T16:56:52.871358 | 2019-09-25T14:46:54 | 2019-09-25T14:46:54 | 197,749,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,201 | py | # coding: utf-8
"""
OnRoute API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.1.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from SolviceRoutingClient.api_client import ApiClient
class PVRPApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def solve_pvrp(self, **kwargs): # noqa: E501
"""Solve a PVRP problem # noqa: E501
Periodic vehicle routing problems # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.solve_pvrp(async_req=True)
>>> result = thread.get()
:param async_req bool
:param PVRP body: PVRP problem solve request
:return: Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.solve_pvrp_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.solve_pvrp_with_http_info(**kwargs) # noqa: E501
return data
def solve_pvrp_with_http_info(self, **kwargs): # noqa: E501
"""Solve a PVRP problem # noqa: E501
Periodic vehicle routing problems # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.solve_pvrp_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param PVRP body: PVRP problem solve request
:return: Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method solve_pvrp" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/solve#PVRP', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Job', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"[email protected]"
]
| |
620c4eaa8063ccdd8693983d8e97949ba279c96c | 64c05e8c346ced131f65770db7c8ebe6c9e29c12 | /tests/test_model/test_backbone/test_shufflenetv1_backbone.py | c5da09d306f1d1822c45a72528ea2ec7933a6fa7 | [
"Apache-2.0"
]
| permissive | lilujunai/ZCls | bcf6c5ceae8ce7282e77678d63c6aa2daa4feda4 | 14a272540b6114fb80cd314066ff6721bcf85231 | refs/heads/master | 2023-02-25T05:46:07.794013 | 2021-02-04T07:34:39 | 2021-02-04T07:34:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,336 | py | # -*- coding: utf-8 -*-
"""
@date: 2020/11/21 下午7:24
@file: test_resnet_backbone.py
@author: zj
@description:
"""
import torch
from zcls.model.backbones.shufflenetv1_unit import ShuffleNetV1Unit
from zcls.model.backbones.shufflenetv1_backbone import ShuffleNetV1Backbone
def test_shufflenet_v1_backbone():
# g=1
model = ShuffleNetV1Backbone(
groups=1,
layer_planes=(144, 288, 576),
layer_blocks=(4, 8, 4),
down_samples=(1, 1, 1),
with_groups=(0, 1, 1),
block_layer=ShuffleNetV1Unit,
)
print(model)
data = torch.randn(1, 3, 224, 224)
outputs = model(data)
print(outputs.shape)
assert outputs.shape == (1, 576, 7, 7)
# g=2
model = ShuffleNetV1Backbone(
groups=2,
layer_planes=(200, 400, 800),
layer_blocks=(4, 8, 4),
down_samples=(1, 1, 1),
with_groups=(0, 1, 1),
block_layer=ShuffleNetV1Unit,
)
print(model)
data = torch.randn(1, 3, 224, 224)
outputs = model(data)
print(outputs.shape)
assert outputs.shape == (1, 800, 7, 7)
# g=3
model = ShuffleNetV1Backbone(
groups=3,
layer_planes=(240, 480, 960),
layer_blocks=(4, 8, 4),
down_samples=(1, 1, 1),
with_groups=(0, 1, 1),
block_layer=ShuffleNetV1Unit,
)
print(model)
data = torch.randn(1, 3, 224, 224)
outputs = model(data)
print(outputs.shape)
assert outputs.shape == (1, 960, 7, 7)
# g=4
model = ShuffleNetV1Backbone(
groups=4,
layer_planes=(272, 544, 1088),
layer_blocks=(4, 8, 4),
down_samples=(1, 1, 1),
with_groups=(0, 1, 1),
block_layer=ShuffleNetV1Unit,
)
print(model)
data = torch.randn(1, 3, 224, 224)
outputs = model(data)
print(outputs.shape)
assert outputs.shape == (1, 1088, 7, 7)
# g=8
model = ShuffleNetV1Backbone(
groups=8,
layer_planes=(384, 768, 1536),
layer_blocks=(4, 8, 4),
down_samples=(1, 1, 1),
with_groups=(0, 1, 1),
block_layer=ShuffleNetV1Unit,
)
print(model)
data = torch.randn(1, 3, 224, 224)
outputs = model(data)
print(outputs.shape)
assert outputs.shape == (1, 1536, 7, 7)
if __name__ == '__main__':
test_shufflenet_v1_backbone()
| [
"[email protected]"
]
| |
2b867933f646511764508dfeb84aecc84822e99f | ce6d74994bce49411f00f5053f56fb3b7c30bd50 | /interview/interview16.py | f7620da21a3f9c3559924734d76be04bb6091369 | []
| no_license | zhengjiani/pyAlgorithm | 9397906f3c85221e64f0415abfbb64d03eb1c51e | dbd04a17cf61bac37531e3337ba197c4af19489e | refs/heads/master | 2021-07-11T19:07:26.480403 | 2020-07-16T00:25:24 | 2020-07-16T00:25:24 | 179,308,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | # -*- coding: utf-8 -*-
# @Time : 2019/9/9 11:13
# @Author : zhengjiani
# @Software: PyCharm
# @Blog :https://zhengjiani.github.io/
"""
查找二叉树的深度,求该树的深度
从根节点到叶节点依次经过的结点(含根、叶结点)形成树的一条路径,最长路径的长度为树的深度
"""
class TreeNode:
def __init__(self,x):
self.val = x
self.left = None
self.right = None
class Solution:
def TreeDepth(self,pRoot):
if pRoot == None:
return 0
ldepth = Solution.TreeDepth(self,pRoot.left)
rdepth = Solution.TreeDepth(self,pRoot.right)
return max(ldepth,rdepth)+1
| [
"[email protected]"
]
| |
e0ee1b4a0bc7857aa5e825511b29c181f4916630 | de15d37591450a23e1ca753e8ac345bfc19c0aba | /PythonEnv/Lib/site-packages/numpy/lib/tests/test_recfunctions.py | b3d32f6688e911c16cd37a527b5a04db8625a145 | [
"MIT",
"BSD-3-Clause",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-2-Clause"
]
| permissive | MagnavoxTG/Dionysos | e0da00c8e83ad8603f087da83cda85862faecfd7 | 002791be6ad7955fa0cbf3a25136940a29c194e8 | refs/heads/master | 2021-05-24T09:21:37.217367 | 2020-04-05T17:12:43 | 2020-04-05T17:12:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,281 | py | from __future__ import division, absolute_import, print_function
import pytest
import numpy as np
import numpy.ma as ma
from numpy.ma.mrecords import MaskedRecords
from numpy.ma.testutils import assert_equal
from numpy.testing import assert_, assert_raises
from numpy.lib.recfunctions import (
drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields,
find_duplicates, merge_arrays, append_fields, stack_arrays, join_by,
repack_fields, unstructured_to_structured, structured_to_unstructured,
apply_along_fields, require_fields, assign_fields_by_name)
get_fieldspec = np.lib.recfunctions._get_fieldspec
get_names = np.lib.recfunctions.get_names
get_names_flat = np.lib.recfunctions.get_names_flat
zip_descr = np.lib.recfunctions._zip_descr
zip_dtype = np.lib.recfunctions._zip_dtype
class TestRecFunctions(object):
# Misc tests
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array([('A', 1.), ('B', 2.)],
dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_zip_descr(self):
# Test zip_descr
(w, x, y, z) = self.data
# Std array
test = zip_descr((x, x), flatten=True)
assert_equal(test,
np.dtype([('', int), ('', int)]))
test = zip_descr((x, x), flatten=False)
assert_equal(test,
np.dtype([('', int), ('', int)]))
# Std & flexible-dtype
test = zip_descr((x, z), flatten=True)
assert_equal(test,
np.dtype([('', int), ('A', '|S3'), ('B', float)]))
test = zip_descr((x, z), flatten=False)
assert_equal(test,
np.dtype([('', int),
('', [('A', '|S3'), ('B', float)])]))
# Standard & nested dtype
test = zip_descr((x, w), flatten=True)
assert_equal(test,
np.dtype([('', int),
('a', int),
('ba', float), ('bb', int)]))
test = zip_descr((x, w), flatten=False)
assert_equal(test,
np.dtype([('', int),
('', [('a', int),
('b', [('ba', float), ('bb', int)])])]))
def test_drop_fields(self):
# Test drop_fields
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
# A basic field
test = drop_fields(a, 'a')
control = np.array([((2, 3.0),), ((5, 6.0),)],
dtype=[('b', [('ba', float), ('bb', int)])])
assert_equal(test, control)
# Another basic field (but nesting two fields)
test = drop_fields(a, 'b')
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
# A nested sub-field
test = drop_fields(a, ['ba', ])
control = np.array([(1, (3.0,)), (4, (6.0,))],
dtype=[('a', int), ('b', [('bb', int)])])
assert_equal(test, control)
# All the nested sub-field from a field: zap that field
test = drop_fields(a, ['ba', 'bb'])
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
# dropping all fields results in an array with no fields
test = drop_fields(a, ['a', 'b'])
control = np.array([(), ()], dtype=[])
assert_equal(test, control)
def test_rename_fields(self):
# Test rename fields
a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
dtype=[('a', int),
('b', [('ba', float), ('bb', (float, 2))])])
test = rename_fields(a, {'a': 'A', 'bb': 'BB'})
newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])]
control = a.view(newdtype)
assert_equal(test.dtype, newdtype)
assert_equal(test, control)
def test_get_names(self):
# Test get_names
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_names(ndtype)
assert_equal(test, ('A', 'B'))
ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
test = get_names(ndtype)
assert_equal(test, ('a', ('b', ('ba', 'bb'))))
ndtype = np.dtype([('a', int), ('b', [])])
test = get_names(ndtype)
assert_equal(test, ('a', ('b', ())))
ndtype = np.dtype([])
test = get_names(ndtype)
assert_equal(test, ())
def test_get_names_flat(self):
# Test get_names_flat
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_names_flat(ndtype)
assert_equal(test, ('A', 'B'))
ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
test = get_names_flat(ndtype)
assert_equal(test, ('a', 'b', 'ba', 'bb'))
ndtype = np.dtype([('a', int), ('b', [])])
test = get_names_flat(ndtype)
assert_equal(test, ('a', 'b'))
ndtype = np.dtype([])
test = get_names_flat(ndtype)
assert_equal(test, ())
def test_get_fieldstructure(self):
# Test get_fieldstructure
# No nested fields
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_fieldstructure(ndtype)
assert_equal(test, {'A': [], 'B': []})
# One 1-nested field
ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
test = get_fieldstructure(ndtype)
assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']})
# One 2-nested fields
ndtype = np.dtype([('A', int),
('B', [('BA', int),
('BB', [('BBA', int), ('BBB', int)])])])
test = get_fieldstructure(ndtype)
control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'],
'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
assert_equal(test, control)
# 0 fields
ndtype = np.dtype([])
test = get_fieldstructure(ndtype)
assert_equal(test, {})
def test_find_duplicates(self):
# Test find_duplicates
a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')),
(1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))],
mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)),
(0, (0, 0)), (1, (0, 0)), (0, (1, 0))],
dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])])
test = find_duplicates(a, ignoremask=False, return_index=True)
control = [0, 2]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='A', return_index=True)
control = [0, 1, 2, 3, 5]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='B', return_index=True)
control = [0, 1, 2, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='BA', return_index=True)
control = [0, 1, 2, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='BB', return_index=True)
control = [0, 1, 2, 3, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
def test_find_duplicates_ignoremask(self):
# Test the ignoremask option of find_duplicates
ndtype = [('a', int)]
a = ma.array([1, 1, 1, 2, 2, 3, 3],
mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
test = find_duplicates(a, ignoremask=True, return_index=True)
control = [0, 1, 3, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, ignoremask=False, return_index=True)
control = [0, 1, 2, 3, 4, 6]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
def test_repack_fields(self):
dt = np.dtype('u1,f4,i8', align=True)
a = np.zeros(2, dtype=dt)
assert_equal(repack_fields(dt), np.dtype('u1,f4,i8'))
assert_equal(repack_fields(a).itemsize, 13)
assert_equal(repack_fields(repack_fields(dt), align=True), dt)
# make sure type is preserved
dt = np.dtype((np.record, dt))
assert_(repack_fields(dt).type is np.record)
def test_structured_to_unstructured(self):
a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
out = structured_to_unstructured(a)
assert_equal(out, np.zeros((4, 5), dtype='f8'))
b = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)],
dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1)
assert_equal(out, np.array([3., 5.5, 9., 11.]))
out = np.mean(structured_to_unstructured(b[['x']]), axis=-1)
assert_equal(out, np.array([1., 4., 7., 10.]))
c = np.arange(20).reshape((4, 5))
out = unstructured_to_structured(c, a.dtype)
want = np.array([(0, (1., 2), [3., 4.]),
(5, (6., 7), [8., 9.]),
(10, (11., 12), [13., 14.]),
(15, (16., 17), [18., 19.])],
dtype=[('a', 'i4'),
('b', [('f0', 'f4'), ('f1', 'u2')]),
('c', 'f4', (2,))])
assert_equal(out, want)
d = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)],
dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
assert_equal(apply_along_fields(np.mean, d),
np.array([8.0 / 3, 16.0 / 3, 26.0 / 3, 11.]))
assert_equal(apply_along_fields(np.mean, d[['x', 'z']]),
np.array([3., 5.5, 9., 11.]))
# check that for uniform field dtypes we get a view, not a copy:
d = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)],
dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')])
dd = structured_to_unstructured(d)
ddd = unstructured_to_structured(dd, d.dtype)
assert_(dd.base is d)
assert_(ddd.base is d)
# including uniform fields with subarrays unpacked
d = np.array([(1, [2, 3], [[4, 5], [6, 7]]),
(8, [9, 10], [[11, 12], [13, 14]])],
dtype=[('x0', 'i4'), ('x1', ('i4', 2)),
('x2', ('i4', (2, 2)))])
dd = structured_to_unstructured(d)
ddd = unstructured_to_structured(dd, d.dtype)
assert_(dd.base is d)
assert_(ddd.base is d)
# test that nested fields with identical names don't break anything
point = np.dtype([('x', int), ('y', int)])
triangle = np.dtype([('a', point), ('b', point), ('c', point)])
arr = np.zeros(10, triangle)
res = structured_to_unstructured(arr, dtype=int)
assert_equal(res, np.zeros((10, 6), dtype=int))
# test nested combinations of subarrays and structured arrays, gh-13333
def subarray(dt, shape):
return np.dtype((dt, shape))
def structured(*dts):
return np.dtype([('x{}'.format(i), dt) for i, dt in enumerate(dts)])
def inspect(dt, dtype=None):
arr = np.zeros((), dt)
ret = structured_to_unstructured(arr, dtype=dtype)
backarr = unstructured_to_structured(ret, dt)
return ret.shape, ret.dtype, backarr.dtype
dt = structured(subarray(structured(np.int32, np.int32), 3))
assert_equal(inspect(dt), ((6,), np.int32, dt))
dt = structured(subarray(subarray(np.int32, 2), 2))
assert_equal(inspect(dt), ((4,), np.int32, dt))
dt = structured(np.int32)
assert_equal(inspect(dt), ((1,), np.int32, dt))
dt = structured(np.int32, subarray(subarray(np.int32, 2), 2))
assert_equal(inspect(dt), ((5,), np.int32, dt))
dt = structured()
assert_raises(ValueError, structured_to_unstructured, np.zeros(3, dt))
# these currently don't work, but we may make it work in the future
assert_raises(NotImplementedError, structured_to_unstructured,
np.zeros(3, dt), dtype=np.int32)
assert_raises(NotImplementedError, unstructured_to_structured,
np.zeros((3, 0), dtype=np.int32))
def test_field_assignment_by_name(self):
a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
newdt = [('b', 'f4'), ('c', 'u1')]
assert_equal(require_fields(a, newdt), np.ones(2, newdt))
b = np.array([(1, 2), (3, 4)], dtype=newdt)
assign_fields_by_name(a, b, zero_unassigned=False)
assert_equal(a, np.array([(1, 1, 2), (1, 3, 4)], dtype=a.dtype))
assign_fields_by_name(a, b)
assert_equal(a, np.array([(0, 1, 2), (0, 3, 4)], dtype=a.dtype))
# test nested fields
a = np.ones(2, dtype=[('a', [('b', 'f8'), ('c', 'u1')])])
newdt = [('a', [('c', 'u1')])]
assert_equal(require_fields(a, newdt), np.ones(2, newdt))
b = np.array([((2,),), ((3,),)], dtype=newdt)
assign_fields_by_name(a, b, zero_unassigned=False)
assert_equal(a, np.array([((1, 2),), ((1, 3),)], dtype=a.dtype))
assign_fields_by_name(a, b)
assert_equal(a, np.array([((0, 2),), ((0, 3),)], dtype=a.dtype))
# test unstructured code path for 0d arrays
a, b = np.array(3), np.array(0)
assign_fields_by_name(b, a)
assert_equal(b[()], 3)
class TestRecursiveFillFields(object):
# Test recursive_fill_fields.
def test_simple_flexible(self):
# Test recursive_fill_fields on flexible-array
a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
b = np.zeros((3,), dtype=a.dtype)
test = recursive_fill_fields(a, b)
control = np.array([(1, 10.), (2, 20.), (0, 0.)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
def test_masked_flexible(self):
# Test recursive_fill_fields on masked flexible-array
a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)],
dtype=[('A', int), ('B', float)])
b = ma.zeros((3,), dtype=a.dtype)
test = recursive_fill_fields(a, b)
control = ma.array([(1, 10.), (2, 20.), (0, 0.)],
mask=[(0, 1), (1, 0), (0, 0)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
class TestMergeArrays(object):
# Test merge_arrays
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array(
[(1, (2, 3.0, ())), (4, (5, 6.0, ()))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])])
self.data = (w, x, y, z)
def test_solo(self):
# Test merge_arrays on a single array.
(_, x, _, z) = self.data
test = merge_arrays(x)
control = np.array([(1,), (2,)], dtype=[('f0', int)])
assert_equal(test, control)
test = merge_arrays((x,))
assert_equal(test, control)
test = merge_arrays(z, flatten=False)
assert_equal(test, z)
test = merge_arrays(z, flatten=True)
assert_equal(test, z)
def test_solo_w_flatten(self):
# Test merge_arrays on a single array w & w/o flattening
w = self.data[0]
test = merge_arrays(w, flatten=False)
assert_equal(test, w)
test = merge_arrays(w, flatten=True)
control = np.array([(1, 2, 3.0), (4, 5, 6.0)],
dtype=[('a', int), ('ba', float), ('bb', int)])
assert_equal(test, control)
def test_standard(self):
# Test standard & standard
# Test merge arrays
(_, x, y, _) = self.data
test = merge_arrays((x, y), usemask=False)
control = np.array([(1, 10), (2, 20), (-1, 30)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
test = merge_arrays((x, y), usemask=True)
control = ma.array([(1, 10), (2, 20), (-1, 30)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_flatten(self):
# Test standard & flexible
(_, x, _, z) = self.data
test = merge_arrays((x, z), flatten=True)
control = np.array([(1, 'A', 1.), (2, 'B', 2.)],
dtype=[('f0', int), ('A', '|S3'), ('B', float)])
assert_equal(test, control)
test = merge_arrays((x, z), flatten=False)
control = np.array([(1, ('A', 1.)), (2, ('B', 2.))],
dtype=[('f0', int),
('f1', [('A', '|S3'), ('B', float)])])
assert_equal(test, control)
def test_flatten_wflexible(self):
# Test flatten standard & nested
(w, x, _, _) = self.data
test = merge_arrays((x, w), flatten=True)
control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)],
dtype=[('f0', int),
('a', int), ('ba', float), ('bb', int)])
assert_equal(test, control)
test = merge_arrays((x, w), flatten=False)
controldtype = [('f0', int),
('f1', [('a', int),
('b', [('ba', float), ('bb', int), ('bc', [])])])]
control = np.array([(1., (1, (2, 3.0, ()))), (2, (4, (5, 6.0, ())))],
dtype=controldtype)
assert_equal(test, control)
def test_wmasked_arrays(self):
# Test merge_arrays masked arrays
(_, x, _, _) = self.data
mx = ma.array([1, 2, 3], mask=[1, 0, 0])
test = merge_arrays((x, mx), usemask=True)
control = ma.array([(1, 1), (2, 2), (-1, 3)],
mask=[(0, 1), (0, 0), (1, 0)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
test = merge_arrays((x, mx), usemask=True, asrecarray=True)
assert_equal(test, control)
assert_(isinstance(test, MaskedRecords))
def test_w_singlefield(self):
# Test single field
test = merge_arrays((np.array([1, 2]).view([('a', int)]),
np.array([10., 20., 30.])), )
control = ma.array([(1, 10.), (2, 20.), (-1, 30.)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('a', int), ('f1', float)])
assert_equal(test, control)
def test_w_shorter_flex(self):
# Test merge_arrays w/ a shorter flexndarray.
z = self.data[-1]
# Fixme, this test looks incomplete and broken
# test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
# control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
# dtype=[('A', '|S3'), ('B', float), ('C', int)])
# assert_equal(test, control)
# Hack to avoid pyflakes warnings about unused variables
merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
dtype=[('A', '|S3'), ('B', float), ('C', int)])
def test_singlerecord(self):
(_, x, y, z) = self.data
test = merge_arrays((x[0], y[0], z[0]), usemask=False)
control = np.array([(1, 10, ('A', 1))],
dtype=[('f0', int),
('f1', int),
('f2', [('A', '|S3'), ('B', float)])])
assert_equal(test, control)
class TestAppendFields(object):
# Test append_fields
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_append_single(self):
# Test simple case
(_, x, _, _) = self.data
test = append_fields(x, 'A', data=[10, 20, 30])
control = ma.array([(1, 10), (2, 20), (-1, 30)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('f0', int), ('A', int)], )
assert_equal(test, control)
def test_append_double(self):
# Test simple case
(_, x, _, _) = self.data
test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]])
control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)],
mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)],
dtype=[('f0', int), ('A', int), ('B', int)], )
assert_equal(test, control)
def test_append_on_flex(self):
# Test append_fields on flexible type arrays
z = self.data[-1]
test = append_fields(z, 'C', data=[10, 20, 30])
control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)],
mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('C', int)], )
assert_equal(test, control)
def test_append_on_nested(self):
# Test append_fields on nested fields
w = self.data[0]
test = append_fields(w, 'C', data=[10, 20, 30])
control = ma.array([(1, (2, 3.0), 10),
(4, (5, 6.0), 20),
(-1, (-1, -1.), 30)],
mask=[(
0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)],
dtype=[('a', int),
('b', [('ba', float), ('bb', int)]),
('C', int)], )
assert_equal(test, control)
class TestStackArrays(object):
# Test stack_arrays
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_solo(self):
# Test stack_arrays on single arrays
(_, x, _, _) = self.data
test = stack_arrays((x,))
assert_equal(test, x)
assert_(test is x)
test = stack_arrays(x)
assert_equal(test, x)
assert_(test is x)
def test_unnamed_fields(self):
# Tests combinations of arrays w/o named fields
(_, x, y, _) = self.data
test = stack_arrays((x, x), usemask=False)
control = np.array([1, 2, 1, 2])
assert_equal(test, control)
test = stack_arrays((x, y), usemask=False)
control = np.array([1, 2, 10, 20, 30])
assert_equal(test, control)
test = stack_arrays((y, x), usemask=False)
control = np.array([10, 20, 30, 1, 2])
assert_equal(test, control)
def test_unnamed_and_named_fields(self):
# Test combination of arrays w/ & w/o named fields
(_, x, _, z) = self.data
test = stack_arrays((x, z))
control = ma.array([(1, -1, -1), (2, -1, -1),
(-1, 'A', 1), (-1, 'B', 2)],
mask=[(0, 1, 1), (0, 1, 1),
(1, 0, 0), (1, 0, 0)],
dtype=[('f0', int), ('A', '|S3'), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, x))
control = ma.array([('A', 1, -1), ('B', 2, -1),
(-1, -1, 1), (-1, -1, 2), ],
mask=[(0, 0, 1), (0, 0, 1),
(1, 1, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, z, x))
control = ma.array([('A', 1, -1), ('B', 2, -1),
('A', 1, -1), ('B', 2, -1),
(-1, -1, 1), (-1, -1, 2), ],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 1), (0, 0, 1),
(1, 1, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
assert_equal(test, control)
def test_matching_named_fields(self):
# Test combination of arrays w/ matching field names
(_, x, _, z) = self.data
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)])
test = stack_arrays((z, zz))
control = ma.array([('A', 1, -1), ('B', 2, -1),
(
'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, zz, x))
ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)]
control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1),
('a', 10., 100., -1), ('b', 20., 200., -1),
('c', 30., 300., -1),
(-1, -1, -1, 1), (-1, -1, -1, 2)],
dtype=ndtype,
mask=[(0, 0, 1, 1), (0, 0, 1, 1),
(0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1),
(1, 1, 1, 0), (1, 1, 1, 0)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_defaults(self):
# Test defaults: no exception raised if keys of defaults are not fields.
(_, _, _, z) = self.data
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)])
defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.}
test = stack_arrays((z, zz), defaults=defaults)
control = ma.array([('A', 1, -9999.), ('B', 2, -9999.),
(
'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_autoconversion(self):
# Tests autoconversion
adtype = [('A', int), ('B', bool), ('C', float)]
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
bdtype = [('A', int), ('B', float), ('C', float)]
b = ma.array([(4, 5, 6)], dtype=bdtype)
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
dtype=bdtype)
test = stack_arrays((a, b), autoconvert=True)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
with assert_raises(TypeError):
stack_arrays((a, b), autoconvert=False)
def test_checktitles(self):
# Test using titles in the field names
adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
b = ma.array([(4, 5, 6)], dtype=bdtype)
test = stack_arrays((a, b))
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
dtype=bdtype)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_subdtype(self):
z = np.array([
('A', 1), ('B', 2)
], dtype=[('A', '|S3'), ('B', float, (1,))])
zz = np.array([
('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.)
], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)])
res = stack_arrays((z, zz))
expected = ma.array(
data=[
(b'A', [1.0], 0),
(b'B', [2.0], 0),
(b'a', [10.0], 100.0),
(b'b', [20.0], 200.0),
(b'c', [30.0], 300.0)],
mask=[
(False, [False], True),
(False, [False], True),
(False, [False], False),
(False, [False], False),
(False, [False], False)
],
dtype=zz.dtype
)
assert_equal(res.dtype, expected.dtype)
assert_equal(res, expected)
assert_equal(res.mask, expected.mask)
class TestJoinBy(object):
def setup(self):
self.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('d', int)])
def test_inner_join(self):
# Basic test of join_by
a, b = self.a, self.b
test = join_by('a', a, b, jointype='inner')
control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101),
(7, 57, 67, 107, 102), (8, 58, 68, 108, 103),
(9, 59, 69, 109, 104)],
dtype=[('a', int), ('b1', int), ('b2', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_join(self):
a, b = self.a, self.b
# Fixme, this test is broken
# test = join_by(('a', 'b'), a, b)
# control = np.array([(5, 55, 105, 100), (6, 56, 106, 101),
# (7, 57, 107, 102), (8, 58, 108, 103),
# (9, 59, 109, 104)],
# dtype=[('a', int), ('b', int),
# ('c', int), ('d', int)])
# assert_equal(test, control)
# Hack to avoid pyflakes unused variable warnings
join_by(('a', 'b'), a, b)
np.array([(5, 55, 105, 100), (6, 56, 106, 101),
(7, 57, 107, 102), (8, 58, 108, 103),
(9, 59, 109, 104)],
dtype=[('a', int), ('b', int),
('c', int), ('d', int)])
def test_join_subdtype(self):
# tests the bug in https://stackoverflow.com/q/44769632/102441
from numpy.lib import recfunctions as rfn
foo = np.array([(1,)],
dtype=[('key', int)])
bar = np.array([(1, np.array([1, 2, 3]))],
dtype=[('key', int), ('value', 'uint16', 3)])
res = join_by('key', foo, bar)
assert_equal(res, bar.view(ma.MaskedArray))
def test_outer_join(self):
a, b = self.a, self.b
test = join_by(('a', 'b'), a, b, 'outer')
control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
(2, 52, 102, -1), (3, 53, 103, -1),
(4, 54, 104, -1), (5, 55, 105, -1),
(5, 65, -1, 100), (6, 56, 106, -1),
(6, 66, -1, 101), (7, 57, 107, -1),
(7, 67, -1, 102), (8, 58, 108, -1),
(8, 68, -1, 103), (9, 59, 109, -1),
(9, 69, -1, 104), (10, 70, -1, 105),
(11, 71, -1, 106), (12, 72, -1, 107),
(13, 73, -1, 108), (14, 74, -1, 109)],
mask=[(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 1, 0),
(0, 0, 1, 0), (0, 0, 1, 0),
(0, 0, 1, 0), (0, 0, 1, 0)],
dtype=[('a', int), ('b', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_leftouter_join(self):
a, b = self.a, self.b
test = join_by(('a', 'b'), a, b, 'leftouter')
control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
(2, 52, 102, -1), (3, 53, 103, -1),
(4, 54, 104, -1), (5, 55, 105, -1),
(6, 56, 106, -1), (7, 57, 107, -1),
(8, 58, 108, -1), (9, 59, 109, -1)],
mask=[(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1)],
dtype=[('a', int), ('b', int), ('c', int), ('d', int)])
assert_equal(test, control)
def test_different_field_order(self):
# gh-8940
a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])
b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
# this should not give a FutureWarning:
j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False)
assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2'])
def test_duplicate_keys(self):
a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])
b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b)
@pytest.mark.xfail(reason="See comment at gh-9343")
def test_same_name_different_dtypes_key(self):
a_dtype = np.dtype([('key', 'S5'), ('value', '<f4')])
b_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
expected_dtype = np.dtype([
('key', 'S10'), ('value1', '<f4'), ('value2', '<f4')])
a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
res = join_by('key', a, b)
assert_equal(res.dtype, expected_dtype)
def test_same_name_different_dtypes(self):
# gh-9338
a_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
b_dtype = np.dtype([('key', 'S10'), ('value', '<f8')])
expected_dtype = np.dtype([
('key', '|S10'), ('value1', '<f4'), ('value2', '<f8')])
a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
res = join_by('key', a, b)
assert_equal(res.dtype, expected_dtype)
def test_subarray_key(self):
a_dtype = np.dtype([('pos', int, 3), ('f', '<f4')])
a = np.array([([1, 1, 1], np.pi), ([1, 2, 3], 0.0)], dtype=a_dtype)
b_dtype = np.dtype([('pos', int, 3), ('g', '<f4')])
b = np.array([([1, 1, 1], 3), ([3, 2, 1], 0.0)], dtype=b_dtype)
expected_dtype = np.dtype([('pos', int, 3), ('f', '<f4'), ('g', '<f4')])
expected = np.array([([1, 1, 1], np.pi, 3)], dtype=expected_dtype)
res = join_by('pos', a, b)
assert_equal(res.dtype, expected_dtype)
assert_equal(res, expected)
def test_padded_dtype(self):
dt = np.dtype('i1,f4', align=True)
dt.names = ('k', 'v')
assert_(len(dt.descr), 3) # padding field is inserted
a = np.array([(1, 3), (3, 2)], dt)
b = np.array([(1, 1), (2, 2)], dt)
res = join_by('k', a, b)
# no padding fields remain
expected_dtype = np.dtype([
('k', 'i1'), ('v1', 'f4'), ('v2', 'f4')
])
assert_equal(res.dtype, expected_dtype)
class TestJoinBy2(object):
@classmethod
def setup(cls):
cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
cls.b = np.array(list(zip(np.arange(10), np.arange(65, 75),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('d', int)])
def test_no_r1postfix(self):
# Basic test of join_by no_r1postfix
a, b = self.a, self.b
test = join_by(
'a', a, b, r1postfix='', r2postfix='2', jointype='inner')
control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),
(2, 52, 67, 102, 102), (3, 53, 68, 103, 103),
(4, 54, 69, 104, 104), (5, 55, 70, 105, 105),
(6, 56, 71, 106, 106), (7, 57, 72, 107, 107),
(8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],
dtype=[('a', int), ('b', int), ('b2', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_no_postfix(self):
assert_raises(ValueError, join_by, 'a', self.a, self.b,
r1postfix='', r2postfix='')
def test_no_r2postfix(self):
# Basic test of join_by no_r2postfix
a, b = self.a, self.b
test = join_by(
'a', a, b, r1postfix='1', r2postfix='', jointype='inner')
control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),
(2, 52, 67, 102, 102), (3, 53, 68, 103, 103),
(4, 54, 69, 104, 104), (5, 55, 70, 105, 105),
(6, 56, 71, 106, 106), (7, 57, 72, 107, 107),
(8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],
dtype=[('a', int), ('b1', int), ('b', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_two_keys_two_vars(self):
a = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
np.arange(50, 60), np.arange(10, 20))),
dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
b = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
np.arange(65, 75), np.arange(0, 10))),
dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1),
(10, 1, 52, 67, 12, 2), (11, 1, 53, 68, 13, 3),
(10, 2, 54, 69, 14, 4), (11, 2, 55, 70, 15, 5),
(10, 3, 56, 71, 16, 6), (11, 3, 57, 72, 17, 7),
(10, 4, 58, 73, 18, 8), (11, 4, 59, 74, 19, 9)],
dtype=[('k', int), ('a', int), ('b1', int),
('b2', int), ('c1', int), ('c2', int)])
test = join_by(
['a', 'k'], a, b, r1postfix='1', r2postfix='2', jointype='inner')
assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
class TestAppendFieldsObj(object):
"""
Test append_fields with arrays containing objects
"""
# https://github.com/numpy/numpy/issues/2346
def setup(self):
from datetime import date
self.data = dict(obj=date(2000, 1, 1))
def test_append_to_objects(self):
"Test append_fields when the base array contains objects"
obj = self.data['obj']
x = np.array([(obj, 1.), (obj, 2.)],
dtype=[('A', object), ('B', float)])
y = np.array([10, 20], dtype=int)
test = append_fields(x, 'C', data=y, usemask=False)
control = np.array([(obj, 1.0, 10), (obj, 2.0, 20)],
dtype=[('A', object), ('B', float), ('C', int)])
assert_equal(test, control)
| [
"="
]
| = |
cdd6c6b8c2a31c356719506c8ce4dcdc08202c50 | 2b42b40ae2e84b438146003bf231532973f1081d | /spec/mgm4458807.3.spec | 1772215d94ba44826928d25e2e4d7a241f93051f | []
| no_license | MG-RAST/mtf | 0ea0ebd0c0eb18ec6711e30de7cc336bdae7215a | e2ddb3b145068f22808ef43e2bbbbaeec7abccff | refs/heads/master | 2020-05-20T15:32:04.334532 | 2012-03-05T09:51:49 | 2012-03-05T09:51:49 | 3,625,755 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,306 | spec | {
"id": "mgm4458807.3",
"metadata": {
"mgm4458807.3.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 180884,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 309,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 4286,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/100.preprocess.removed.fna.gz"
},
"100.preprocess.removed.fna.stats": {
"compression": null,
"description": null,
"size": 303,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/100.preprocess.removed.fna.stats"
},
"205.screen.h_sapiens_asm.info": {
"compression": null,
"description": null,
"size": 448,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/205.screen.h_sapiens_asm.info"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 1597,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 109975,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 465,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 309,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/299.screen.passed.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 7160,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 306,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 260640,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/440.cluster.rna97.mapping"
},
"440.cluster.rna97.mapping.stats": {
"compression": null,
"description": null,
"size": 48,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/440.cluster.rna97.mapping.stats"
},
"450.rna.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 73857,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/450.rna.expand.lca.gz"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 17968,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 11448,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 126968,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/450.rna.sims.gz"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 5988,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 5417,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 8697,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 43,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 13951,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 2350100,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 94,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 414,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 28,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 1461,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 1842,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 673,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 196,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 23218,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 79,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 5179,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458807.3/file/999.done.species.stats"
}
},
"id": "mgm4458807.3",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4458807.3"
}
},
"raw": {
"mgm4458807.3.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4458807.3"
}
}
} | [
"[email protected]"
]
| |
6d2f796a571b224f6eafbc086a95247149bcaffc | cca8b92a8fb2e79d46a10e105cd6a98c22564383 | /kuterless/public_fulfillment/serializers.py | 1eaa29bb65dde90cda74fc6472c190c24bcf3ca7 | []
| no_license | gl1000007/NeuroNet | 2fc0b57a1147546b926e3d9e3d2c4a00589d5f1c | 222e8058a78ffe89442310c5650a2edb3eb8260c | refs/heads/master | 2021-01-17T12:02:12.658928 | 2016-06-06T19:56:37 | 2016-06-06T19:56:37 | 72,467,802 | 1 | 0 | null | 2016-10-31T18:49:41 | 2016-10-31T18:49:41 | null | UTF-8 | Python | false | false | 7,528 | py | # -*- coding: utf-8 -*-
from coplay import models
from coplay.models import Discussion, Feedback, LikeLevel, Decision, Task, \
Viewer, FollowRelation, UserUpdate, Vote, Glimpse, AnonymousVisitor, \
AnonymousVisitorViewer, UserProfile, MAX_TEXT
from django.contrib.auth.models import User
from rest_framework import serializers
class DiscussionSerializer(serializers.ModelSerializer):
class Meta:
model = Discussion
fields = ('id',
'owner',
'title',
'description',
'created_at',
'updated_at',
'locked_at',
'is_restricted',
'is_viewing_require_login'
)
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id',
'username',
'first_name',
'last_name'
)
class FeedbackSerializer(serializers.ModelSerializer):
class Meta:
model = Feedback
fields = ( 'id',
'discussion',
'user',
'feedbabk_type',
'content',
'created_at',
'updated_at'
)
class DecisionSerializer(serializers.ModelSerializer):
class Meta:
model = Decision
fields = ( 'id',
'parent',
'content',
'created_at',
'updated_at',
'value'
)
class VoteSerializer(serializers.ModelSerializer):
class Meta:
model = Vote
fields = ( 'id',
'voater',
'decision',
'created_at',
'updated_at',
'value',
)
class TaskSerializer(serializers.ModelSerializer):
class Meta:
model = Task
fields = ( 'id',
'parent',
'responsible',
'goal_description',
'target_date',
'closed_at',
'closed_by',
'status_description',
'status',
'created_at',
'updated_at',
'final_state'
)
class ViewerSerializer(serializers.ModelSerializer):
class Meta:
model = Viewer
fields = ( 'id',
'user',
'discussion',
'created_at',
'updated_at',
'views_counter',
'views_counter_updated_at',
'discussion_updated_at_on_last_view',
'is_a_follower',
'is_invited'
)
class GlimpseSerializer(serializers.ModelSerializer):
class Meta:
model = Glimpse
fields = ( 'id',
'viewer',
'anonymous_visitor_viewer',
'created_at',
'updated_at'
)
class AnonymousVisitorSerializer(serializers.ModelSerializer):
class Meta:
model = AnonymousVisitor
fields = ( 'id',
'user',
'created_at',
'updated_at'
)
class AnonymousVisitorViewerSerializer(serializers.ModelSerializer):
class Meta:
model = AnonymousVisitorViewer
fields = ( 'id',
'anonymous_visitor',
'discussion',
'created_at',
'updated_at',
'views_counter',
'views_counter_updated_at',
'discussion_updated_at_on_last_view'
)
class FollowRelationSerializer(serializers.ModelSerializer):
class Meta:
model = FollowRelation
fields = ( 'id',
'follower_user',
'following_user',
'created_at',
'updated_at'
)
class UserProfileSerializer(serializers.ModelSerializer):
class Meta:
model = UserProfile
fields = ( 'id',
'user',
'created_at',
'updated_at',
'segment',
'recieve_notifications',
'recieve_updates',
'can_limit_discussion_access',
'can_limit_discussion_to_login_users_only',
'a_player'
)
class UserUpdateSerializer(serializers.ModelSerializer):
class Meta:
model = UserUpdate
fields = ( 'id',
'recipient',
'discussion',
'sender',
'header',
'content',
'details_url',
'created_at',
'updated_at',
'already_read'
)
class DecisionWholeSerializer(serializers.ModelSerializer):
vote_set = VoteSerializer(many=True)
class Meta:
model = Decision
fields = ( 'id',
'parent',
'content',
'created_at',
'updated_at',
'value',
'vote_set'
)
class DiscussionWholeSerializer(serializers.ModelSerializer):
feedback_set = FeedbackSerializer(many=True)
task_set = TaskSerializer(many=True)
decision_set = DecisionWholeSerializer( many = True)
viewer_set = ViewerSerializer(many = True)
class Meta:
model = Discussion
fields = ('id',
'owner',
'title',
'description',
'created_at',
'updated_at',
'locked_at',
'is_restricted',
'is_viewing_require_login',
'feedback_set',
'task_set',
'decision_set',
'viewer_set'
)
class CreateFeedback(object):
ENCOURAGE = 1
COOPERATION = 2
INTUITION = 3
ADVICE = 4
FEEDBACK_TYPES = (
(ENCOURAGE, 'encourage'),
(COOPERATION, 'cooporation'),
(INTUITION, 'intuition'),
(ADVICE, 'advice'),
)
def __init__(self, feedback_type, content):
self.feedback_type = feedback_type
self.content = content
class AddFeedBackSerializer(serializers.Serializer):
feedback_type = serializers.ChoiceField(choices=CreateFeedback.FEEDBACK_TYPES)
content = serializers.CharField(max_length=MAX_TEXT, min_length=None)
def restore_object(self, attrs, instance=None):
"""
Given a dictionary of deserialized field values, either update
an existing model instance, or create a new model instance.
"""
if instance is not None:
instance.feedback_type = attrs.get('feedback_type', instance.feedback_type)
instance.content = attrs.get('content', instance.content)
return instance
return CreateFeedback(**attrs)
| [
"[email protected]"
]
| |
e3f0533447afd1c99e2a5e345b86874bafc397a4 | 5132c0de8eb07fe0548442ad605852137899f2cd | /covsirphy/regression/rate_elastic_net.py | d6cbec5d776eaae99e3b734a9bb1bdf7e42291dc | [
"Apache-2.0"
]
| permissive | SelengeMGL/covid19-sir | c12d58f21de7c3c63e87fc3b55c9dda966653c17 | 64d50b79cc9d3e26019a230f3c373a2755451495 | refs/heads/master | 2023-05-06T18:41:21.901437 | 2021-05-23T12:52:11 | 2021-05-23T12:52:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,437 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from math import log10, floor
import numpy as np
import pandas as pd
from covsirphy.regression.param_elastic_net import _ParamElasticNetRegressor
class _RateElasticNetRegressor(_ParamElasticNetRegressor):
"""
Predict parameter values of ODE models with Elastic Net regression
and Indicators(n)/Indicators(n-1) -> Parameters(n)/Parameters(n-1) approach.
Args:
X (pandas.DataFrame):
Index
Date (pandas.Timestamp): observation date
Columns
(int/float): indicators
y (pandas.DataFrame):
Index
Date (pandas.Timestamp): observation date
Columns
(int/float) target values
delay (int): delay period [days]
kwargs: keyword arguments of sklearn.model_selection.train_test_split(test_size=0.2, random_state=0)
Note:
If @seed is included in kwargs, this will be converted to @random_state.
"""
# Description of regressor
DESC = "Indicators(n)/Indicators(n-1) -> Parameters(n)/Parameters(n-1) with Elastic Net"
def __init__(self, X, y, delay, **kwargs):
# Remember the last value of y (= the previous value of target y)
self._last_param_df = y.tail(1)
# Calculate X(n) / X(n-1) and replace inf/NA with 0
X_div = X.div(X.shift(1)).replace(np.inf, 0).fillna(0)
# Calculate y(n) / y(n-1) and replace inf with NAs (NAs will be removed in ._split())
y_div = y.div(y.shift(1)).replace(np.inf, np.nan)
super().__init__(X_div, y_div, delay, **kwargs)
def predict(self):
"""
Predict parameter values (via y) with self._regressor and X_target.
Returns:
pandas.DataFrame:
Index
Date (pandas.Timestamp): future dates
Columns
(float): parameter values (4 digits)
"""
# Predict parameter values
predicted = self._regressor.predict(self._X_target)
df = pd.DataFrame(predicted, index=self._X_target.index, columns=self._y_train.columns)
# Calculate y(n) values with y(0) and y(n) / y(n-1)
df = pd.concat([self._last_param_df, df], axis=0, sort=True)
df = df.cumprod().iloc[1:]
# parameter values: 4 digits
return df.applymap(lambda x: np.around(x, 4 - int(floor(log10(abs(x)))) - 1))
| [
"[email protected]"
]
| |
1b3127988778826a12ef4eeeea33c86953e8b0c0 | bbc2e379e1e2e9b573d455c1e604636a11e2e6f6 | /config/settings/local.py | 1adbaa040927159bfcd47d28686a87a91d768a14 | [
"MIT"
]
| permissive | MikaelSantilio/uprevendas-api | cc7c21bc496194c6b4c9ba77205ed54b23a7f82a | f04312988ffe3231f68ae0ebeaed9eaf0a7914b0 | refs/heads/master | 2023-05-25T18:41:45.584569 | 2021-06-09T23:24:47 | 2021-06-09T23:24:47 | 330,134,711 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,447 | py | from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="b1obk3iNk5rk63pkwV9XW0BasqxxNcoaWC4avtUhCZQ56rmesMezMbqoJ82PnxD2",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["localhost", "0.0.0.0", "127.0.0.1"]
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = "localhost"
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
# WhiteNoise
# ------------------------------------------------------------------------------
# http://whitenoise.evans.io/en/latest/django.html#using-whitenoise-in-development
INSTALLED_APPS = ["whitenoise.runserver_nostatic"] + INSTALLED_APPS # noqa F405
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ["debug_toolbar"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ["debug_toolbar.middleware.DebugToolbarMiddleware"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
"DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"],
"SHOW_TEMPLATE_CONTEXT": True,
}
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ["127.0.0.1", "10.0.2.2"]
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ["django_extensions"] # noqa F405
# Your stuff...
# ------------------------------------------------------------------------------
| [
"[email protected]"
]
| |
dce3c9dab31479292158a01d39e4e2914dbbb66e | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r10p1/Gen/DecFiles/options/DiMuonOppositeSignP3GeVMinMaxMassDocaHighPtProd.py | 3ade6833c82d734ce4f0b64853268845d8508007 | []
| no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | from Configurables import Generation, RepeatDecay, Inclusive, DiLeptonInAcceptance
from GaudiKernel.SystemOfUnits import GeV, MeV, mm
Generation().SampleGenerationTool = "RepeatDecay"
Generation().addTool( RepeatDecay )
Generation().RepeatDecay.NRedecay = 100
Generation().RepeatDecay.addTool( Inclusive )
Generation().RepeatDecay.Inclusive.ProductionTool = "PythiaProduction"
Generation().FullGenEventCutTool = "DiLeptonInAcceptance"
Generation().addTool( DiLeptonInAcceptance )
Generation().DiLeptonInAcceptance.RequireOppositeSign = True
Generation().DiLeptonInAcceptance.RequireSameSign = False
Generation().DiLeptonInAcceptance.LeptonOnePMin = 3*GeV
Generation().DiLeptonInAcceptance.LeptonTwoPMin = 3*GeV
Generation().DiLeptonInAcceptance.MinMass = 4700*MeV
Generation().DiLeptonInAcceptance.MaxMass = 6000*MeV
Generation().DiLeptonInAcceptance.PreselDoca = True
Generation().DiLeptonInAcceptance.DocaCut = 0.4*mm
Generation().DiLeptonInAcceptance.PreselPtProd = True
Generation().DiLeptonInAcceptance.PtProdMinCut = 4*GeV*4*GeV
Generation().DiLeptonInAcceptance.PtProdMaxCut = 1000*GeV*1000*GeV
| [
"[email protected]"
]
| |
22d444f0ffb8fb15ff5058d56f1660adc9d0469a | ca7162adc548c5937ebedd6234b40de7294e2da1 | /19-Pillow图像处理/20将图片中黄色修改为红色.py | e7c2ffe94958924e92ac02d3486ca223d47d5643 | []
| no_license | meloLeeAnthony/PythonLearn | 03c259d745b1ccdc039e9999889ab54be14ae020 | 9915ec5bb7048712a97539a9c5bce8743567b22a | refs/heads/master | 2023-07-10T21:12:54.027143 | 2021-08-19T12:43:19 | 2021-08-19T12:43:19 | 289,487,502 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | from PIL import ImageDraw, Image
img = Image.open('images/bjsxt.png')
draw_obj = ImageDraw.Draw(img)
width, height = img.size
# 将黄色修改为红色
def get_color(oldColor):
'''
如果是黄色(255,255,0),则换算成红色,把绿色通道置为0
可以点击:windows 中的画图软件调色板观察黄色的区间。
'''
# print(oldColor)
# 获取每个通道的值 正宗的黄色(255,255,0)
if oldColor[0] > 60 and oldColor[1] > 60:
return (oldColor[0], 0, oldColor[2]) # 返回红色
else:
return oldColor
for x in range(width):
for y in range(height):
oldColor = img.getpixel((x, y))
draw_obj.point((x, y), fill=get_color(oldColor))
# img.show()
img.save('images/bjsxt_red.jpg')
| [
"[email protected]"
]
| |
e0fefb1fccf976cc448cb2a66ea9adab80e6d73f | ab8a5876c12d42db3a61a1560c774e118da5605e | /MDShop/service.py | dccd47d0261d2f84b66739d94e39569f047d4b25 | []
| no_license | mustavfaa/django_16.09 | 6e19d75dc1bcd2536e3d10d854989370607c0518 | b88ebe6f87d2facc51fee86dd18eb4cadaba0e14 | refs/heads/main | 2023-08-10T18:23:39.892882 | 2021-09-17T06:17:15 | 2021-09-17T06:17:15 | 406,880,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | from django_filters import rest_framework as filters
from .models import smartphone
class CharFilterInFilter(filters.BaseInFilter, filters.CharFilter):
pass
class ShoppFilter(filters.FilterSet):
genres = CharFilterInFilter(field_name='category__name', lookup_expr='in')
price = filters.RangeFilter()
class Meta:
model = smartphone
fields = ['genres', 'price']
| [
"[email protected]"
]
| |
b3061ff7daa2a9bc88afc9974b5be35abd3df341 | e3ec5f1898ae491fa0afcdcc154fb306fd694f83 | /src/components/outputOpController/outputOpController.py | 24557b2dbd08cef6d8cd72a110151d21fd212f31 | [
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | phoebezhung/raytk | 42397559a76a9ba39308ac03344b4446f64ea04d | b91483ce88b2956d7b23717b11e223d332ca8395 | refs/heads/master | 2023-08-27T05:20:38.062360 | 2021-10-21T04:33:18 | 2021-10-21T04:33:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,874 | py | from typing import List
# noinspection PyUnreachableCode
if False:
# noinspection PyUnresolvedReferences
from _stubs import *
from _typeAliases import *
class _Par:
Hostop: 'OPParamT'
Opdef: 'OPParamT'
Rendertop: 'OPParamT'
Shaderbuilder: 'OPParamT'
Fixedtexinputs: 'StrParamT'
Texselectors: 'StrParamT'
class _COMP(COMP):
par: _Par
class OutputOp:
def __init__(self, ownerComp: '_COMP'):
self.ownerComp = ownerComp
def _host(self) -> 'Optional[COMP]':
return self.ownerComp.par.Hostop.eval()
def _opDef(self) -> 'Optional[COMP]':
return self.ownerComp.par.Opdef.eval()
def _renderTop(self) -> 'Optional[glslmultiTOP]':
return self.ownerComp.par.Rendertop.eval()
def onInit(self):
self.updateTextureInputs()
self.resetInfoParams()
def resetInfoParams(self):
host = self._host()
if not host:
return
for par in host.customPars:
if par.page == 'Info' and not par.readOnly and not par:
par.val = par.default
def updateTextureInputs(self):
renderTop = self._renderTop()
if not renderTop:
return
for conn in renderTop.inputConnectors:
while conn.connections:
conn.disconnect()
fixedInputs = self.ownerComp.par.Fixedtexinputs.evalOPs() # type: List[TOP]
if fixedInputs:
for inputTop in fixedInputs:
if inputTop:
inputTop.outputConnectors[0].connect(renderTop)
host = self._host()
host.clearScriptErrors(error='texerr*')
texSources = self.ownerComp.op('textureSources') # type: DAT
selectors = self.ownerComp.par.Texselectors.evalOPs() # type: List[TOP]
for i in range(texSources.numRows):
if i >= len(selectors):
host.addScriptError(f'texerr: Too many texture sources (failed on #{i})')
return
select = selectors[i]
while select.outputConnectors[0].connections:
select.outputConnectors[0].disconnect()
select.outputConnectors[0].connect(renderTop)
| [
"[email protected]"
]
| |
e2b8ce4a88472ff1350842e57f2585a6f482b607 | 0a5eedbd7d6c844dfb557aa57e88f1b9e0527665 | /data/io/read_tfrecord.py | ac58ea1c0f82bfd71692a17dc1da4a31f7aa5dec | [
"MIT"
]
| permissive | chizhanyuefeng/R2CNN-Plus-Plus_Tensorflow | f63ad3a6e535d59528f6a06b7a9f877ec4607c7d | adec3de17db1d07eaf5d7bc1b1dc387934985b79 | refs/heads/master | 2020-09-22T21:24:28.999333 | 2019-12-03T02:25:08 | 2019-12-03T02:25:08 | 225,324,251 | 0 | 0 | MIT | 2019-12-02T08:32:34 | 2019-12-02T08:32:34 | null | UTF-8 | Python | false | false | 5,416 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import tensorflow as tf
import os
from data.io import image_preprocess
from libs.configs import cfgs
def read_single_example_and_decode(filename_queue):
# tfrecord_options = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB)
# reader = tf.TFRecordReader(options=tfrecord_options)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized=serialized_example,
features={
'img_name': tf.FixedLenFeature([], tf.string),
'img_height': tf.FixedLenFeature([], tf.int64),
'img_width': tf.FixedLenFeature([], tf.int64),
'img': tf.FixedLenFeature([], tf.string),
'gtboxes_and_label': tf.FixedLenFeature([], tf.string),
'num_objects': tf.FixedLenFeature([], tf.int64)
}
)
img_name = features['img_name']
img_height = tf.cast(features['img_height'], tf.int32)
img_width = tf.cast(features['img_width'], tf.int32)
img = tf.decode_raw(features['img'], tf.uint8)
img = tf.reshape(img, shape=[img_height, img_width, 3])
# DOTA dataset need exchange img_width and img_height
# img = tf.reshape(img, shape=[img_width, img_height, 3])
gtboxes_and_label = tf.decode_raw(features['gtboxes_and_label'], tf.int32)
gtboxes_and_label = tf.reshape(gtboxes_and_label, [-1, 9])
num_objects = tf.cast(features['num_objects'], tf.int32)
return img_name, img, gtboxes_and_label, num_objects
def read_and_prepocess_single_img(filename_queue, shortside_len, is_training):
img_name, img, gtboxes_and_label, num_objects = read_single_example_and_decode(filename_queue)
img = tf.cast(img, tf.float32)
img = img - tf.constant(cfgs.PIXEL_MEAN)
if is_training:
img, gtboxes_and_label = image_preprocess.short_side_resize(img_tensor=img, gtboxes_and_label=gtboxes_and_label,
target_shortside_len=shortside_len)
img, gtboxes_and_label = image_preprocess.random_flip_left_right(img_tensor=img,
gtboxes_and_label=gtboxes_and_label)
else:
img, gtboxes_and_label = image_preprocess.short_side_resize(img_tensor=img, gtboxes_and_label=gtboxes_and_label,
target_shortside_len=shortside_len)
return img_name, img, gtboxes_and_label, num_objects
def next_batch(dataset_name, batch_size, shortside_len, is_training):
'''
:return:
img_name_batch: shape(1, 1)
img_batch: shape:(1, new_imgH, new_imgW, C)
gtboxes_and_label_batch: shape(1, Num_Of_objects, 5] .each row is [x1, y1, x2, y2, label]
'''
assert batch_size == 1, "we only support batch_size is 1.We may support large batch_size in the future"
if dataset_name not in ['jyzdata', 'DOTA', 'ship', 'ICDAR2015', 'pascal', 'coco', 'DOTA_TOTAL', 'WIDER']:
raise ValueError('dataSet name must be in pascal, coco spacenet and ship')
if is_training:
pattern = os.path.join('../data/tfrecord', dataset_name + '_train*')
else:
pattern = os.path.join('../data/tfrecord', dataset_name + '_test*')
print('tfrecord path is -->', os.path.abspath(pattern))
filename_tensorlist = tf.train.match_filenames_once(pattern)
filename_queue = tf.train.string_input_producer(filename_tensorlist)
shortside_len = tf.constant(shortside_len)
shortside_len = tf.random_shuffle(shortside_len)[0]
img_name, img, gtboxes_and_label, num_obs = read_and_prepocess_single_img(filename_queue, shortside_len,
is_training=is_training)
img_name_batch, img_batch, gtboxes_and_label_batch , num_obs_batch = \
tf.train.batch(
[img_name, img, gtboxes_and_label, num_obs],
batch_size=batch_size,
capacity=1,
num_threads=1,
dynamic_pad=True)
return img_name_batch, img_batch, gtboxes_and_label_batch, num_obs_batch
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch = \
next_batch(dataset_name=cfgs.DATASET_NAME, # 'pascal', 'coco'
batch_size=cfgs.BATCH_SIZE,
shortside_len=cfgs.IMG_SHORT_SIDE_LEN,
is_training=True)
gtboxes_and_label = tf.reshape(gtboxes_and_label_batch, [-1, 9])
init_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer()
)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess, coord)
img_name_batch_, img_batch_, gtboxes_and_label_batch_, num_objects_batch_ \
= sess.run([img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch])
print('debug')
coord.request_stop()
coord.join(threads) | [
"[email protected]"
]
| |
99670667c78f1a507843a9f1ace224929d1a1a68 | 4ec709b16e366c60a9c7f2f7696608b036825140 | /stanislaus/_parameters/network_PH_Cost.py | 4ac6c1bd28f1a7f60b97bff44547c4d3f60eaf4d | []
| no_license | alanccai/sierra-pywr | 19c7efc4485879a4ca35677fdb14b3c795829e02 | 4447c6247af5159030b3025f14c2397283c4fcd0 | refs/heads/master | 2020-08-19T10:17:30.590861 | 2019-10-15T20:33:45 | 2019-10-15T20:33:45 | 215,909,820 | 0 | 0 | null | 2019-11-15T17:54:23 | 2019-10-18T00:31:58 | null | UTF-8 | Python | false | false | 1,848 | py | from parameters import WaterLPParameter
class network_PH_Cost(WaterLPParameter):
""""""
# path = "s3_imports/energy_netDemand.csv"
baseline_median_daily_energy_demand = 768 # 768 GWh is median daily energy demand for 2009
def _value(self, timestep, scenario_index, mode='scheduling'):
totDemandP = self.model.parameters["Total Net Energy Demand"]
maxDemandP = self.model.parameters["Max Net Energy Demand"]
minDemandP = self.model.parameters["Min Net Energy Demand"]
days_in_period = 1
if self.mode == 'scheduling':
totDemand = totDemandP.value(timestep, scenario_index)
minDemand = maxDemandP.value(timestep, scenario_index)
maxDemand = maxDemandP.value(timestep, scenario_index)
else:
planning_dates = self.dates_in_planning_month(timestep, month_offset=self.month_offset)
days_in_period = len(planning_dates)
totDemand = totDemandP.dataframe[planning_dates].sum()
minDemand = minDemandP.dataframe[planning_dates].min()
maxDemand = maxDemandP.dataframe[planning_dates].max()
minVal = self.model.parameters[self.demand_constant_param].value(timestep, scenario_index) \
* (totDemand / (self.baseline_median_daily_energy_demand * days_in_period))
maxVal = minVal * (maxDemand / minDemand)
d = maxVal - minVal
nblocks = self.model.parameters['Blocks'].value(timestep, scenario_index)
return -(maxVal - ((self.block * 2 - 1) * d / 2) / nblocks)
def value(self, timestep, scenario_index):
return self._value(timestep, scenario_index, mode=self.mode)
@classmethod
def load(cls, model, data):
return cls(model, **data)
network_PH_Cost.register()
print(" [*] PH_Cost successfully registered")
| [
"[email protected]"
]
| |
ffdf035d8454aa99dc436f4467f389c084b23666 | 002c14cd622b4890cce1c243065cebe39e2302ec | /LeetCode/13-Roman-to-Integer/Roman-to-Integer.py | d340a9596c72981fd2f85799d215d961b5f90c82 | [
"MIT"
]
| permissive | hscspring/The-DataStructure-and-Algorithms | 6200eba031eac51b13e320e1fc9f204644933e00 | e704a92e091f2fdf5f27ec433e0e516ccc787ebb | refs/heads/master | 2022-08-29T18:47:52.378884 | 2022-08-25T16:22:44 | 2022-08-25T16:22:44 | 201,743,910 | 11 | 3 | MIT | 2021-04-20T18:28:47 | 2019-08-11T09:26:34 | Python | UTF-8 | Python | false | false | 890 | py | class Solution(object):
def romanToInt(self, s):
"""
:type s: str
:rtype: int
"""
romanDict = {
"I": 1,
"V": 5,
"X": 10,
"L": 50,
"C": 100,
"D": 500,
"M": 1000,
}
res = 0
i = 0
while i < len(s) - 1:
if romanDict[s[i+1]] > romanDict[s[i]]:
res += romanDict[s[i+1]] - romanDict[s[i]]
i += 2
else:
res += romanDict[s[i]]
i += 1
if i < len(s):
res += romanDict[s[i]]
return res
if __name__ == '__main__':
so = Solution()
assert so.romanToInt("III") == 3
assert so.romanToInt("IV") == 4
assert so.romanToInt("IX") == 9
assert so.romanToInt("LVIII") == 58
assert so.romanToInt("MCMXCIV") == 1994 | [
"[email protected]"
]
| |
8e9a7fa16f4019455c5bd5558201c626dc070351 | 0849923ebcde8f56a6e8550ae4f3c5ee3e2e0846 | /apps/search/src/search/decorators.py | ab684bbf9ba17e89cf01ce65ffd14cbcfb1e7154 | [
"Apache-2.0"
]
| permissive | thinker0/hue | 511a5796cdfe45e0b27f1d3309557ca60ce8b13b | ee5aecc3db442e962584d3151c0f2eab397d6707 | refs/heads/master | 2022-07-10T02:37:23.591348 | 2014-03-27T20:05:00 | 2014-03-27T20:05:00 | 12,731,435 | 0 | 0 | Apache-2.0 | 2022-07-01T17:44:37 | 2013-09-10T14:13:42 | Python | UTF-8 | Python | false | false | 1,304 | py | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.utils.functional import wraps
from django.utils.translation import ugettext as _
from desktop.lib.exceptions_renderable import PopupException
LOG = logging.getLogger(__name__)
def allow_admin_only(view_func):
def decorate(request, *args, **kwargs):
if not request.user.is_superuser:
message = _("Permission denied. You are not an Administrator.")
raise PopupException(message)
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
| [
"[email protected]"
]
| |
76967bed7d5d25cae9b9fed322cae51c0c6b38b6 | 5f3fb04f8e04f8aa9d15bb9cded75b98fa53422f | /fofo_lazada/wizard/import_customer_payment.py | bb5400658e8c71eb8bbba8ad1ec853d2576ef6ef | []
| no_license | kittiu/fofo | 49a5b9110814bc8512e22fd101e821e6820b2f0a | b73e2009e220fd843e91d9ea414f514ae113b76c | refs/heads/master | 2020-04-16T20:12:44.309677 | 2018-08-03T14:53:12 | 2018-08-03T14:53:12 | 63,481,415 | 0 | 1 | null | 2018-08-03T14:53:13 | 2016-07-16T12:13:16 | Python | UTF-8 | Python | false | false | 1,245 | py | from openerp import models, fields, api, _
import openerp.addons.decimal_precision as dp
from datetime import datetime
import csv
import StringIO
import base64
import xlrd
from openerp import tools
from _abcoll import ItemsView
class import_customer_payment(models.TransientModel):
_name = 'import.customer.payment'
input_file = fields.Binary('Input File')
@api.multi
def import_payments(self):
for line in self:
lines = xlrd.open_workbook(file_contents=base64.decodestring(self.input_file))
print "---------lines",lines
for sheet_name in lines.sheet_names():
sheet = lines.sheet_by_name(sheet_name)
rows = sheet.nrows
columns = sheet.ncols
print "-rows--columns------",rows,columns
print "---------==sheet.row_values(0)==",sheet.row_values(0)
seller_sku = sheet.row_values(0).index('Seller SKU')
created_at = sheet.row_values(0).index('Created at')
order_number = sheet.row_values(0).index('Order Number')
unit_price = sheet.row_values(0).index('Unit Price')
status = sheet.row_values(0).index('Status') | [
"[email protected]"
]
| |
076656d8a2ada109350a4f62dbd2e457331d513d | 6512f283dbde46ec31f985889166798c5f943484 | /utils/prepare_docs.py | 04cb182e21ba277bb033f7ebf3408dcee748bd23 | [
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
]
| permissive | greenelab/iscb-diversity | d5db60693e2c2038c1033cb10f6c3a57c33a581d | f4f5d013a5263cb4a591a2cd0841f43fd082c7e3 | refs/heads/master | 2023-08-01T00:11:46.070563 | 2021-09-09T19:38:31 | 2021-09-09T19:38:31 | 207,814,106 | 7 | 1 | NOASSERTION | 2021-06-10T15:50:41 | 2019-09-11T13:03:02 | Jupyter Notebook | UTF-8 | Python | false | false | 2,914 | py | """
Generate docs directory for GitHub Pages.
"""
import argparse
import pathlib
import subprocess
import sys
root_dir = pathlib.Path(__file__).parent.parent
docs_dir = root_dir.joinpath("docs")
readme_template = """\
# Project Webpage for ISCB Diversity Analysis
<!-- make sure to edit this content in utils/prepare_docs.py and not docs/readme.md -->
More information at <https://github.com/greenelab/iscb-diversity>.
See also the study corresponding to this analysis at <https://greenelab.github.io/iscb-diversity-manuscript/>.
## Notebooks
See the following rendered notebooks:
{notebook_list_md}
"""
def parse_args():
parser = argparse.ArgumentParser(
description="Generate docs directory for GitHub Pages."
)
parser.add_argument(
"--nbconvert", action="store_true", help="Convert .ipynb files to docs/*.html"
)
parser.add_argument(
"--nbviewer", action="store_true", help="Use links to https://nbviewer.jupyter.org/ for Jupyter notebooks"
)
parser.add_argument(
"--repo",
default="greenelab/iscb-diversity",
help="GitHub repository to use for hyperlinks",
)
parser.add_argument(
"--readme",
action="store_true",
help="Regenerate docs/readme.md (the GitHub Pages homepage)",
)
args = parser.parse_args()
if len(sys.argv) == 1:
# print help when no arguments are specified
parser.print_help()
return args
def get_ipynb_paths():
ipynb_paths = sorted(root_dir.glob("*.ipynb"))
ipynb_paths = [path.relative_to(root_dir) for path in ipynb_paths]
return ipynb_paths
def render_jupyter_notebooks():
ipynb_paths = get_ipynb_paths()
args = [
"jupyter",
"nbconvert",
"--output-dir=docs",
*ipynb_paths,
]
subprocess.run(args, cwd=root_dir)
def get_nbviewer_md_list(args):
nbviewer_md_list = []
if not args.nbviewer:
return nbviewer_md_list
ipynb_paths = get_ipynb_paths()
for path in ipynb_paths:
nbviewer_url = f"https://nbviewer.jupyter.org/github/{args.repo}/blob/master/{path}"
md = f"- [{path.stem}]({nbviewer_url})"
nbviewer_md_list.append(md)
return nbviewer_md_list
def get_notebook_list_md(args):
notebook_md_list = get_nbviewer_md_list(args)
html_paths = sorted(docs_dir.glob("**/*.html"))
for path in html_paths:
path = path.relative_to(docs_dir)
notebook_md_list.append(f"- [{path.stem}]({path})")
notebook_md_list.sort()
return "\n".join(notebook_md_list)
if __name__ == "__main__":
args = parse_args()
assert docs_dir.is_dir()
if args.nbconvert:
render_jupyter_notebooks()
if args.readme:
notebook_list_md = get_notebook_list_md(args)
readme = readme_template.format(notebook_list_md=notebook_list_md)
docs_dir.joinpath("readme.md").write_text(readme)
| [
"[email protected]"
]
| |
4b154b3ded63895ffebd249d37efbf7f792b89b5 | 3e4b8fe54f11bf36f3615c21fdc1dca0ed00fe72 | /month05/day04/03_weekday.py | d301021c4a68b80eb21452bfaee535f7a7743f07 | []
| no_license | leinian85/year2019 | 30d66b1b209915301273f3c367bea224b1f449a4 | 2f573fa1c410e9db692bce65d445d0543fe39503 | refs/heads/master | 2020-06-21T20:06:34.220046 | 2019-11-04T06:37:02 | 2019-11-04T06:37:02 | 197,541,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | import numpy as np
ary = np.arange(1,37).reshape(6,6)
def apply(data):
return data.mean()
print(ary)
r = np.apply_along_axis(apply,1,ary)
print(r)
print(apply(ary[0,:])) | [
"[email protected]"
]
| |
7fa486dfb3dd91ddb4381bfd2d02fa65696c93d1 | f8f2536fa873afa43dafe0217faa9134e57c8a1e | /aliyun-python-sdk-smarthosting/aliyunsdksmarthosting/request/v20200801/UpdateManagedHostAttributesRequest.py | 6903b8467168f5b270b89a199b450c06c42e6c34 | [
"Apache-2.0"
]
| permissive | Sunnywillow/aliyun-openapi-python-sdk | 40b1b17ca39467e9f8405cb2ca08a85b9befd533 | 6855864a1d46f818d73f5870da0efec2b820baf5 | refs/heads/master | 2022-12-04T02:22:27.550198 | 2020-08-20T04:11:34 | 2020-08-20T04:11:34 | 288,944,896 | 1 | 0 | NOASSERTION | 2020-08-20T08:04:01 | 2020-08-20T08:04:01 | null | UTF-8 | Python | false | false | 2,619 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksmarthosting.endpoint import endpoint_data
class UpdateManagedHostAttributesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'SmartHosting', '2020-08-01', 'UpdateManagedHostAttributes','SmartHosting')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_Mode(self):
return self.get_query_params().get('Mode')
def set_Mode(self,Mode):
self.add_query_param('Mode',Mode)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_ManagedHostId(self):
return self.get_query_params().get('ManagedHostId')
def set_ManagedHostId(self,ManagedHostId):
self.add_query_param('ManagedHostId',ManagedHostId)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_ManagedHostName(self):
return self.get_query_params().get('ManagedHostName')
def set_ManagedHostName(self,ManagedHostName):
self.add_query_param('ManagedHostName',ManagedHostName) | [
"[email protected]"
]
| |
97a3e24436a577e4b1e73d9a2f7511d8325217ef | 5672737d1ff34bebfeb408426e52ed49df8be3bb | /graphgallery/attack/targeted/common/rand.py | a95829950f927f663815568b32c3a8170abb5757 | [
"MIT"
]
| permissive | sailfish009/GraphGallery | 5063ee43340a6ca8da9f2d7fb3c44349e80321b2 | 4eec9c5136bda14809bd22584b26cc346cdb633b | refs/heads/master | 2023-08-24T19:19:59.714411 | 2021-10-16T10:10:40 | 2021-10-16T10:10:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,098 | py | import random
import numpy as np
from graphgallery.utils import tqdm
from graphgallery.attack.targeted import Common
from ..targeted_attacker import TargetedAttacker
@Common.register()
class RAND(TargetedAttacker):
def reset(self):
super().reset()
self.modified_degree = self.degree.copy()
return self
def process(self, reset=True):
self.nodes_set = set(range(self.num_nodes))
if reset:
self.reset()
return self
def attack(self,
target,
num_budgets=None,
threshold=0.5,
direct_attack=True,
structure_attack=True,
feature_attack=False,
disable=False):
super().attack(target, num_budgets, direct_attack, structure_attack,
feature_attack)
if direct_attack:
influence_nodes = [target]
else:
# influence_nodes = list(self.graph.neighbors(target))
influence_nodes = self.graph.adj_matrix[target].indices.tolist()
chosen = 0
adj_flips = self.adj_flips
with tqdm(total=self.num_budgets,
desc='Peturbing Graph',
disable=disable) as pbar:
while chosen < self.num_budgets:
# randomly choose to add or remove edges
if np.random.rand() <= threshold:
delta = 1.0
edge = self.add_edge(influence_nodes)
else:
delta = -1.0
edge = self.del_edge(influence_nodes)
if edge is not None:
adj_flips[edge] = chosen
chosen += 1
u, v = edge
self.modified_degree[u] += delta
self.modified_degree[v] += delta
pbar.update(1)
return self
def add_edge(self, influence_nodes):
u = random.choice(influence_nodes)
neighbors = self.graph.adj_matrix[u].indices.tolist()
potential_nodes = list(self.nodes_set - set(neighbors) -
set([self.target, u]))
if len(potential_nodes) == 0:
return None
v = random.choice(potential_nodes)
if not self.is_modified(u, v):
return (u, v)
else:
return None
def del_edge(self, influence_nodes):
u = random.choice(influence_nodes)
neighbors = self.graph.adj_matrix[u].indices.tolist()
potential_nodes = list(set(neighbors) - set([self.target, u]))
if len(potential_nodes) == 0:
return None
v = random.choice(potential_nodes)
if not self.allow_singleton and (self.modified_degree[u] <= 1
or self.modified_degree[v] <= 1):
return None
if not self.is_modified(u, v):
return (u, v)
else:
return None
| [
"[email protected]"
]
| |
b97a00b7ad30a23398328b91d48b220d5c4802bc | b8911bd330c08c32a205751cf2f7538494729c16 | /examples/plotting/plot_2_parallel_coordinates.py | 55d66289653a05f16f742f4eb9bcd21740f11a3f | [
"BSD-3-Clause"
]
| permissive | mirkobronzi/orion | 3c0bb6258392729b91617997eebcf1e8897795aa | ad8f69afabf3faab557a82ef9409fabd63495ea8 | refs/heads/master | 2022-05-02T05:45:54.425146 | 2022-03-08T00:37:08 | 2022-03-08T00:37:08 | 195,877,506 | 0 | 0 | NOASSERTION | 2022-03-08T00:38:23 | 2019-07-08T19:55:26 | Python | UTF-8 | Python | false | false | 5,258 | py | """
====================
Parallel Coordinates
====================
.. hint::
Conveys a dense overview of the trial objectives in a multi-dimensional space.
Helps identifying trends of best or worst hyperparameter values.
The parallel coordinates plot decomposes a search space of `n` dimensions into `n`
axis so that the entire space can be visualized simultaneously. Each dimension
is represented as a vertical axis and trials are represented as lines crossing each
axis at the corresponding value of the hyperparameters. There is no obvious optimal ordering
for the vertical axis, and you will often find that changing the order helps better understanding
the data. Additionaly, the lines are plotted with graded colors based on the objective. The
gradation is shown in a color bar on the right of the plot. Note that the objectives are added
as the last axis is the plot as well.
.. autofunction:: orion.plotting.base.parallel_coordinates
:noindex:
The parallel coordinates plot can be executed directly from the ``experiment`` with
``plot.parallel_coordinates()`` as shown in the example below.
"""
from orion.client import get_experiment
# Specify the database where the experiments are stored. We use a local PickleDB here.
storage = dict(type="legacy", database=dict(type="pickleddb", host="../db.pkl"))
# Load the data for the specified experiment
experiment = get_experiment("2-dim-exp", storage=storage)
fig = experiment.plot.parallel_coordinates()
fig
#%%
# In this basic example the parallel coordinates plot is marginally useful as there are only
# 2 dimensions. It is possible however to identify the best performing values of ``dropout`` and
# ``learning_rate``. The GIF below demonstrates how to select subsets of the
# axis to highlight the trials that corresponds to the best objectives.
#
# .. image:: ../_static/parallel_coordinates_select.gif
# :width: 600
# :align: center
#
# .. note::
#
# Hover is not supported by plotly at the moment.
# Feature request can be tracked `here <https://github.com/plotly/plotly.js/issues/3012>`_.
#%%
# Lets now load the results from tutorial
# :ref:`sphx_glr_auto_tutorials_code_2_hyperband_checkpoint.py` for an example with a larger search
# space.
# Load the data for the specified experiment
experiment = get_experiment("hyperband-cifar10", storage=storage)
fig = experiment.plot.parallel_coordinates()
fig
#%%
# As you can see, the large number of trials until trained for a few epochs is cluttering the entire
# plot. You can first select the trials with 120 epochs to clear the plot. Once that is done,
# We can see that gamma and momentum had limited influence. Good trials can be found
# for almost any values of gamma and momentum. On the other hand, learning rate
# and weight decay are clearly more optimal in lower values. You can try re-ordering the columns as
# shown in the animation below to see the connections between one hyperparameter and the objective.
#%%
# .. image:: ../_static/parallel_coordinates_reorder.gif
# :width: 600
# :align: center
#
#%%
# We can also select a subset of hyperparameters to help with the visualization.
# Load the data for the specified experiment
fig = experiment.plot.parallel_coordinates(
order=["epochs", "learning_rate", "weight_decay"]
)
fig
#%%
#
# Special cases
# -------------
#
# Logarithmic scale
# ~~~~~~~~~~~~~~~~~
#
# .. note::
#
# Logarithmic scales are not supported yet. Contributions are welcome. :)
# See `issue <https://github.com/Epistimio/orion/issues/555>`_.
#
# Dimension with shape
# ~~~~~~~~~~~~~~~~~~~~
#
# If some dimensions have a :ref:`search-space-shape` larger than 1, they will be flattened so that
# each subdimension can be represented in the parallel coordinates plot.
# Load the data for the specified experiment
experiment = get_experiment("2-dim-shape-exp", storage=storage)
fig = experiment.plot.parallel_coordinates()
fig
#%%
# In the example above, the dimension ``learning_rate~loguniform(1e-5, 1e-2, shape=3)``
# is flattened and represented with ``learning_rate[i]``. If the shape would be or more dimensions
# (ex: ``(3, 2)``), the indices would be ``learning_rate[i,j]`` with i=0..2 and j=0..1.
#%%
# The flattened hyperparameters can be fully selected with ``params=['<name>']``.
#
experiment.plot.parallel_coordinates(order=["/learning_rate"])
#%%
# Or a subset of the flattened hyperparameters can be selected with ``params=['<name>[index]']``.
#
experiment.plot.parallel_coordinates(order=["/learning_rate[0]", "/learning_rate[1]"])
#%%
# Categorical dimension
# ~~~~~~~~~~~~~~~~~~~~~
#
# Parallel coordinates plots can also render categorical dimensions, in which case the
# categories are shown in an arbitrary order on the axis.
# Load the data for the specified experiment
experiment = get_experiment("3-dim-cat-shape-exp", storage=storage)
fig = experiment.plot.parallel_coordinates()
fig
#%%
# Finally we save the image to serve as a thumbnail for this example. See
# the guide
# :ref:`How to save <sphx_glr_auto_examples_how-tos_code_2_how_to_save.py>`
# for more information on image saving.
fig.write_image("../../docs/src/_static/pcp_thumbnail.png")
# sphinx_gallery_thumbnail_path = '_static/pcp_thumbnail.png'
| [
"[email protected]"
]
| |
7e7b508b865d113b3866dcafbdbe625da5fa268a | 3431aa0a966505c601171393e9edcd2345813268 | /analysis/lssxcmb/scripts/run_cell_sv3.py | 67f67d8953fff82b82dfa3a773b4140d43be8baf | [
"MIT"
]
| permissive | mehdirezaie/LSSutils | 34eeeb6efbf2b5c4c0c336c1f9f2fabc821def39 | af4697d9f4b4fc4dbab44787108d7aa538ca846d | refs/heads/master | 2023-08-17T17:58:03.310073 | 2023-08-16T11:29:23 | 2023-08-16T11:29:23 | 200,747,239 | 3 | 0 | MIT | 2021-11-09T15:12:30 | 2019-08-06T00:25:07 | Jupyter Notebook | UTF-8 | Python | false | false | 2,653 | py |
import os
import healpy as hp
from lssutils import setup_logging, CurrentMPIComm
from lssutils.lab import get_cl
from lssutils.utils import npix2nside, make_hp
from lssutils.utils import maps_dr9 as columns
import fitsio as ft
import numpy as np
@CurrentMPIComm.enable
def main(args, comm=None):
if comm.rank == 0:
# --- only rank 0
# read data, randoms, and templates
data = ft.read(args.data_path)
nside = 1024
ngal = make_hp(nside, data['hpix'], data['label'])
nran = make_hp(nside, data['hpix'], data['fracgood'])
mask = make_hp(nside, data['hpix'], 1.0) > 0.5
sysm = np.zeros((12*nside*nside, data['features'].shape[1]))
print(sysm.shape)
sysm[data['hpix'], :] = data['features']
if args.selection is not None:
#s_ = ft.read(args.selection)
#selection_fn = make_hp(nside, s_['hpix'], np.median(s_['weight'], axis=1))#.mean(axis=1))
selection_fn = hp.read_map(args.selection, verbose=False)
print(np.percentile(selection_fn[mask], [0, 1, 99, 100]))
else:
selection_fn = None
else:
ngal = None
nran = None
mask = None
sysm = None
selection_fn = None
ngal = comm.bcast(ngal, root=0)
nran = comm.bcast(nran, root=0)
mask = comm.bcast(mask, root=0)
sysm = comm.bcast(sysm, root=0)
selection_fn = comm.bcast(selection_fn, root=0)
cls_list = get_cl(ngal, nran, mask, selection_fn=selection_fn,
systematics=sysm, njack=0)
if comm.rank == 0:
output_dir = os.path.dirname(args.output_path)
if not os.path.exists(output_dir):
print(f'creating {output_dir}')
os.makedirs(output_dir)
np.save(args.output_path, cls_list)
if __name__ == '__main__':
setup_logging("info") # turn on logging to screen
comm = CurrentMPIComm.get()
if comm.rank == 0:
print(f'hi from {comm.rank}')
from argparse import ArgumentParser
ap = ArgumentParser(description='Angular Clustering')
ap.add_argument('-d', '--data_path', required=True)
ap.add_argument('-o', '--output_path', required=True)
ap.add_argument('-s', '--selection', default=None)
ns = ap.parse_args()
for (key, value) in ns.__dict__.items():
print(f'{key:15s} : {value}')
else:
ns = None
print(f'hey from {comm.rank}')
main(ns)
| [
"[email protected]"
]
| |
c23e11afccd6e32919ce7dad4448f78f03f7ee83 | 0d0cf0165ca108e8d94056c2bae5ad07fe9f9377 | /3_Python_Data_Science_Toolbox_Part1/3_Lambda_functions_and_error-handling/errorHandlingWithTryExcept.py | 88c5557c80a12220a1241c670005099db365c6d2 | []
| no_license | MACHEIKH/Datacamp_Machine_Learning_For_Everyone | 550ec4038ebdb69993e16fe22d5136f00101b692 | 9fe8947f490da221430e6dccce6e2165a42470f3 | refs/heads/main | 2023-01-22T06:26:15.996504 | 2020-11-24T11:21:53 | 2020-11-24T11:21:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,082 | py | # Error handling with try-except
# A good practice in writing your own functions is also anticipating the ways in which other people (or yourself, if you accidentally misuse your own function) might use the function you defined.
# As in the previous exercise, you saw that the len() function is able to handle input arguments such as strings, lists, and tuples, but not int type ones and raises an appropriate error and error message when it encounters invalid input arguments. One way of doing this is through exception handling with the try-except block.
# In this exercise, you will define a function as well as use a try-except block for handling cases when incorrect input arguments are passed to the function.
# Recall the shout_echo() function you defined in previous exercises; parts of the function definition are provided in the sample code. Your goal is to complete the exception handling code in the function definition and provide an appropriate error message when raising an error.
# Instructions
# 100 XP
# Initialize the variables echo_word and shout_words to empty strings.
# Add the keywords try and except in the appropriate locations for the exception handling block.
# Use the * operator to concatenate echo copies of word1. Assign the result to echo_word.
# Concatenate the string '!!!' to echo_word. Assign the result to shout_words.
# Define shout_echo
def shout_echo(word1, echo=1):
"""Concatenate echo copies of word1 and three
exclamation marks at the end of the string."""
# Initialize empty strings: echo_word, shout_words
echo_word = ""
shout_words = ""
# Add exception handling with try-except
try:
# Concatenate echo copies of word1 using *: echo_word
echo_word = word1 * echo
# Concatenate '!!!' to echo_word: shout_words
shout_words = echo_word + '!!!'
except:
# Print error message
print("word1 must be a string and echo must be an integer.")
# Return shout_words
return shout_words
# Call shout_echo
shout_echo("particle", echo="accelerator")
| [
"[email protected]"
]
| |
5db8d660382ca50bec0fc0ab03dc620098282966 | 9ca6885d197aaf6869e2080901b361b034e4cc37 | /TauAnalysis/MCEmbeddingTools/test/runGenMuonRadCorrAnalyzer_cfg.py | 978d9416623f149ba528ac6ef22a4fbb44f182ec | []
| no_license | ktf/cmssw-migration | 153ff14346b20086f908a370029aa96575a2c51a | 583340dd03481dff673a52a2075c8bb46fa22ac6 | refs/heads/master | 2020-07-25T15:37:45.528173 | 2013-07-11T04:54:56 | 2013-07-11T04:54:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,082 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("runGenMuonRadCorrAnalyzer")
import os
import re
import TauAnalysis.Configuration.tools.castor as castor
import TauAnalysis.Configuration.tools.eos as eos
# import of standard configurations for RECOnstruction
# of electrons, muons and tau-jets with non-standard isolation cones
process.load('Configuration/StandardSequences/Services_cff')
process.load('FWCore/MessageService/MessageLogger_cfi')
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
process.load('Configuration/Geometry/GeometryIdeal_cff')
process.load('Configuration/StandardSequences/MagneticField_cff')
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.GlobalTag.globaltag = cms.string('START53_V7A::All')
#--------------------------------------------------------------------------------
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
##'file:/data1/veelken/CMSSW_5_3_x/skims/ZmumuTF_RECO_2012Oct03.root'
'/store/user/veelken/CMSSW_5_3_x/skims/Embedding/goldenZmumuEvents_ZplusJets_madgraph_RECO_205_1_XhE.root',
'/store/user/veelken/CMSSW_5_3_x/skims/Embedding/goldenZmumuEvents_ZplusJets_madgraph_RECO_206_1_OHz.root',
'/store/user/veelken/CMSSW_5_3_x/skims/Embedding/goldenZmumuEvents_ZplusJets_madgraph_RECO_207_1_bgM.root',
'/store/user/veelken/CMSSW_5_3_x/skims/Embedding/goldenZmumuEvents_ZplusJets_madgraph_RECO_208_1_szL.root',
'/store/user/veelken/CMSSW_5_3_x/skims/Embedding/goldenZmumuEvents_ZplusJets_madgraph_RECO_209_1_Jqv.root'
),
skipEvents = cms.untracked.uint32(0)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# set input files
inputFilePath = '/store/user/veelken/CMSSW_5_3_x/skims/GoldenZmumu/2012Oct09/'
inputFile_regex = r"[a-zA-Z0-9_/:.]*goldenZmumuEvents_ZplusJets_madgraph_2012Oct09_AOD_(?P<gridJob>\d*)(_(?P<gridTry>\d*))*_(?P<hash>[a-zA-Z0-9]*).root"
# check if name of inputFile matches regular expression
inputFileNames = []
files = None
if inputFilePath.startswith('/castor/'):
files = [ "".join([ "rfio:", file_info['path'] ]) for file_info in castor.nslsl(inputFilePath) ]
elif inputFilePath.startswith('/store/'):
files = [ file_info['path'] for file_info in eos.lsl(inputFilePath) ]
else:
files = [ "".join([ "file:", inputFilePath, file ]) for file in os.listdir(inputFilePath) ]
for file in files:
#print "file = %s" % file
inputFile_matcher = re.compile(inputFile_regex)
if inputFile_matcher.match(file):
inputFileNames.append(file)
#print "inputFileNames = %s" % inputFileNames
process.source.fileNames = cms.untracked.vstring(inputFileNames)
#--------------------------------------------------------------------------------
process.load("TauAnalysis/MCEmbeddingTools/ZmumuStandaloneSelection_cff")
process.goldenZmumuFilter.src = cms.InputTag('goldenZmumuCandidatesGe0IsoMuons')
process.load("TrackingTools/TransientTrack/TransientTrackBuilder_cfi")
process.load("IOMC.RandomEngine.IOMC_cff")
process.RandomNumberGeneratorService.genMuonRadCorrAnalyzer = cms.PSet(
initialSeed = cms.untracked.uint32(12345),
engineName = cms.untracked.string('TRandom3')
)
process.RandomNumberGeneratorService.genMuonRadCorrAnalyzerPYTHIA = process.RandomNumberGeneratorService.genMuonRadCorrAnalyzer.clone()
process.RandomNumberGeneratorService.genMuonRadCorrAnalyzerPHOTOS = process.RandomNumberGeneratorService.genMuonRadCorrAnalyzer.clone()
process.load("TauAnalysis/MCEmbeddingTools/genMuonRadCorrAnalyzer_cfi")
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
process.genMuonRadCorrAnalyzerPYTHIA.PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
particleGunParameters = cms.vstring(
'MSTP(41) = 0 ! Disable parton Showers',
'MSTP(61) = 0 ! Disable initial state radiation',
'MSTP(71) = 1 ! Enable final state radiation'
),
parameterSets = cms.vstring(
'pythiaUESettings',
'particleGunParameters'
)
)
process.genMuonRadCorrAnalyzerPHOTOS.PhotosOptions = cms.PSet()
process.genMuonRadCorrAnalyzerSequence = cms.Sequence(
process.genMuonRadCorrAnalyzer
+ process.genMuonRadCorrAnalyzerPYTHIA
+ process.genMuonRadCorrAnalyzerPHOTOS
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string('/data1/veelken/tmp/runGenMuonRadCorrAnalyzer_2013Jan28.root')
)
process.analysisSequence = cms.Sequence(
process.goldenZmumuSelectionSequence
+ process.goldenZmumuFilter
+ process.genMuonRadCorrAnalyzerSequence
)
#--------------------------------------------------------------------------------
process.p = cms.Path(process.analysisSequence)
processDumpFile = open('runGenMuonRadCorrAnalyzer.dump' , 'w')
print >> processDumpFile, process.dumpPython()
| [
"[email protected]"
]
| |
008458b3a86ae5e14cc07957c22da7431650271c | d4c9979ebf5224e79c7bff38931657e0b3420b86 | /quizzes/Quiz35.py | 60e585f8af44430796aab5112454e757f5ddc3cc | []
| no_license | imjeee/cs373 | 60c021ec372a59dab8edf32a1f2f833857bbf5f8 | 9c331c9eb9c4c43f8a2ee03ee5770791f2ead225 | refs/heads/master | 2020-12-25T02:20:25.894496 | 2012-04-26T02:56:03 | 2012-04-26T02:56:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | #!/usr/bin/env python
"""
CS373: Quiz #35 (5 pts)
"""
""" ----------------------------------------------------------------------
1. What is the output of the following program?
(4 pts)
1
A.A()
A.A()
False
A.A()
A.f()
2
A.A()
3
True
A.f()
4
5
True
A.f()
6
"""
def Decorator (c) :
x = c()
return lambda : x
class A (object) :
def __init__ (self) :
print "A.A()"
def f (self) :
print "A.f()"
print "1"
print A() is A()
A().f()
print "2"
A = Decorator(A)
print "3"
print A() is A()
A().f()
print "4"
A = Decorator(A)
print "5"
print A() is A()
A().f()
print "6"
| [
"[email protected]"
]
| |
2c69e19bcc517dcd10c542ec855f0c2019875f31 | e254a9e46750549f742b30fc5e930f1bddf78091 | /plots/python/cutsJetClean1.py | 6e43f909f17bbc85a9c4d9976b4df2e93a358a30 | []
| no_license | schoef/TTGammaEFT | 13618c07457a0557c2d62642205b10100f8b4e79 | af9a5f8c9d87835c26d7ebecdccf865a95a84ba2 | refs/heads/master | 2020-04-17T05:01:28.910645 | 2019-01-17T16:29:27 | 2019-01-17T16:29:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,657 | py | #!/usr/bin/env python
''' Define list of plots for plot script
'''
# Standard Imports
from math import pi
# RootTools
from RootTools.core.standard import *
# TTGammaEFT
from TTGammaEFT.Tools.constants import defaultValue
# plotList
cutsJetClean1 = []
cutsJetClean1.append( Plot(
name = 'cleanJet1_nConstituents',
texX = 'nConstituents(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.JetClean_nConstituents[1] if event.nJetClean > 1 else defaultValue,
binning = [ 5, 0, 5 ],
))
cutsJetClean1.append( Plot(
name = 'cleanJet1_neHEF',
texX = 'neHEF(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.JetClean_neHEF[1] if event.nJetClean > 1 else defaultValue,
binning = [ 40, 0., 1 ],
))
cutsJetClean1.append( Plot(
name = 'cleanJet1_neEmEF',
texX = 'neEmEF(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.JetClean_neEmEF[1] if event.nJetClean > 1 else defaultValue,
binning = [ 40, 0., 1 ],
))
cutsJetClean1.append( Plot(
name = 'cleanJet1_chEmHEF',
texX = 'chEmEF(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.JetClean_chEmEF[1] if event.nJetClean > 1 else defaultValue,
binning = [ 40, 0., 1 ],
))
cutsJetClean1.append( Plot(
name = 'cleanJet1_neHEF_detailed',
texX = 'neHEF(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.JetClean_neHEF[1] if event.nJetClean > 1 else defaultValue,
binning = [ 100, 0., 1 ],
))
cutsJetClean1.append( Plot(
name = 'cleanJet1_neEmEF_detailed',
texX = 'neEmEF(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.JetClean_neEmEF[1] if event.nJetClean > 1 else defaultValue,
binning = [ 100, 0., 1 ],
))
cutsJetClean1.append( Plot(
name = 'cleanJet1_chEmHEF_detailed',
texX = 'chEmEF(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.JetClean_chEmEF[1] if event.nJetClean > 1 else defaultValue,
binning = [ 100, 0., 1 ],
))
cutsJetClean1.append( Plot(
name = 'cleanJet1_neHEF_tight',
texX = 'neHEF(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.JetClean_neHEF[1] if event.nJetClean > 1 else defaultValue,
binning = [ 40, 0.8, 1 ],
))
cutsJetClean1.append( Plot(
name = 'cleanJet1_neEmEF_tight',
texX = 'neEmEF(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.JetClean_neEmEF[1] if event.nJetClean > 1 else defaultValue,
binning = [ 40, 0.8, 1 ],
))
cutsJetClean1.append( Plot(
name = 'cleanJet1_chEmHEF_tight',
texX = 'chEmEF(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.JetClean_chEmEF[1] if event.nJetClean > 1 else defaultValue,
binning = [ 40, 0.8, 1 ],
))
cutsJetClean1.append( Plot(
name = 'cleanJet1_chHEF',
texX = 'chHEF(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.JetClean_chHEF[1] if event.nJetClean > 1 else defaultValue,
binning = [ 40, 0, 1 ],
))
cutsJetClean1.append( Plot(
name = 'cleanJet1_ID',
texX = 'ID(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.JetClean_JetCleanId[1] if event.nJetClean > 1 else defaultValue,
binning = [ 4, 0, 4 ],
))
| [
"[email protected]"
]
| |
63ce385bb0e3331d222c53d2420877eedc853169 | f2aec3224fb3e1a6c780f82def626be3565a0e8e | /examples/Carleman/config.py | 9b7c950382f999c814723a8eb8af048d3e92b9b3 | []
| no_license | Orcuslc/MultiScale-PINN | a000ce5afcb7d91ccdd535cc45bd2000c917463f | 32f1de843ec96231d6d9859815ad1b08cef718d2 | refs/heads/master | 2022-12-13T13:07:59.881197 | 2020-09-15T22:46:45 | 2020-09-15T22:46:45 | 291,526,555 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | import jax
import jax.numpy as jnp
from jax import random
from jax.experimental import optimizers
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from jaxmeta.loss import *
# name of job
NAME = "1"
# random key
key = random.PRNGKey(1)
# network config
layers = [2] + [32]*4 + [2]
c0 = 1.0
w0 = jnp.array([[1.0, 1.0]]).T
# network training
metaloss = mae
optimizer = optimizers.adam
lr = 1e-3
weights = {
"c1": 1.0,
"c2": 10.0,
"d1": 10.0,
"d2": 10.0,
"l1": 1e-8,
"l2": 1e-8,
}
batch_size = {
"dirichlet": 300,
"collocation": 20100,
}
iterations = 200000
print_every = 1000
save_every = 10000
loss_names = ["Loss", "c1", "c2", "d1", "d2", "l1_reg", "l2_reg"]
log_file = None
# data
n_data = {
"i": 100,
"b": 100,
"cx": 201,
"ct": 100,
} | [
"[email protected]"
]
| |
60c072d32340dc8922ab7bd5a5643dcda4ae5b74 | 342a1ec794df5424bfc4f6af2cb8de415068201b | /sandbox/urls.py | cda98e24d4af79bb880bea6d3d80987d07cf9d1c | []
| no_license | penta-srl/django-oscar-promotions | c5d0b159950189f23852665ce7e3b3a2fe248bd5 | 65bdf39b48409311e7284fc0a12e8b2e17f176dd | refs/heads/master | 2020-07-06T23:48:45.660316 | 2019-07-08T19:23:15 | 2019-07-08T19:23:15 | 203,176,440 | 0 | 0 | null | 2019-08-19T13:16:55 | 2019-08-19T13:16:55 | null | UTF-8 | Python | false | false | 433 | py | from django.conf import settings
from django.conf.urls import i18n
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from app import application
urlpatterns = [
path('admin/', admin.site.urls),
path('i18n/', include(i18n)),
path('', application.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
]
| |
3dfd2531ae73ff4591e432a193134bba76c6d163 | da052c0bbf811dc4c29a83d1b1bffffd41becaab | /core/sg_update_taxcode/__manifest__.py | 023ccc393334b7e8d2ceb2bde2b89125f66c2737 | []
| no_license | Muhammad-SF/Test | ef76a45ad28ac8054a4844f5b3826040a222fb6e | 46e15330b5d642053da61754247f3fbf9d02717e | refs/heads/main | 2023-03-13T10:03:50.146152 | 2021-03-07T20:28:36 | 2021-03-07T20:28:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | {
"name": "SG Tax Update",
"version": "1.1.1",
"depends": ['account'],
"author" :"MPTechnolabs(Chankya)",
"website" : "http://www.serpentcs.com",
"category": "Accounting and Financial Management",
"description": """
It will add some Singapore default Taxes for Sales and Purchase.
============================
""",
'data':[
"data/tax_data.xml",
],
'installable': True,
'auto_install':False,
'application':False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
]
| |
16e5e62ad99b2ca24f2c0903c0f54ac800dbc519 | 5142e81b50d15202ff79a34c9b888f18d2baec27 | /plotnine/geoms/geom_polygon.py | 3f792d36c5eb28b5b5766db68616763254679505 | [
"MIT"
]
| permissive | has2k1/plotnine | 03c0e979b6b05b5e92cb869cca903cfce20988dc | ef5650c4aabb29dcfe810043fb0fc8a4ea83f14b | refs/heads/main | 2023-08-30T22:17:07.835055 | 2023-08-08T07:57:53 | 2023-08-08T07:57:53 | 89,276,692 | 3,719 | 233 | MIT | 2023-08-08T13:09:24 | 2017-04-24T19:00:44 | Python | UTF-8 | Python | false | false | 3,930 | py | from __future__ import annotations
import typing
import numpy as np
from ..doctools import document
from ..utils import SIZE_FACTOR, to_rgba
from .geom import geom
if typing.TYPE_CHECKING:
from typing import Any
import pandas as pd
from plotnine.iapi import panel_view
from plotnine.typing import Axes, Coord, DrawingArea, Layer
@document
class geom_polygon(geom):
"""
Polygon, a filled path
{usage}
Parameters
----------
{common_parameters}
Notes
-----
All paths in the same ``group`` aesthetic value make up a polygon.
"""
DEFAULT_AES = {
"alpha": 1,
"color": None,
"fill": "#333333",
"linetype": "solid",
"size": 0.5,
}
DEFAULT_PARAMS = {
"stat": "identity",
"position": "identity",
"na_rm": False,
}
REQUIRED_AES = {"x", "y"}
def handle_na(self, data: pd.DataFrame) -> pd.DataFrame:
return data
def draw_panel(
self,
data: pd.DataFrame,
panel_params: panel_view,
coord: Coord,
ax: Axes,
**params: Any,
):
"""
Plot all groups
"""
self.draw_group(data, panel_params, coord, ax, **params)
@staticmethod
def draw_group(
data: pd.DataFrame,
panel_params: panel_view,
coord: Coord,
ax: Axes,
**params: Any,
):
from matplotlib.collections import PolyCollection
data = coord.transform(data, panel_params, munch=True)
data["size"] *= SIZE_FACTOR
# Each group is a polygon with a single facecolor
# with potentially an edgecolor for every edge.
verts = []
facecolor = []
edgecolor = []
linestyle = []
linewidth = []
# Some stats may order the data in ways that prevent
# objects from occluding other objects. We do not want
# to undo that order.
grouper = data.groupby("group", sort=False)
for group, df in grouper:
fill = to_rgba(df["fill"].iloc[0], df["alpha"].iloc[0])
verts.append(tuple(zip(df["x"], df["y"])))
facecolor.append("none" if fill is None else fill)
edgecolor.append(df["color"].iloc[0] or "none")
linestyle.append(df["linetype"].iloc[0])
linewidth.append(df["size"].iloc[0])
col = PolyCollection(
verts,
facecolors=facecolor,
edgecolors=edgecolor,
linestyles=linestyle,
linewidths=linewidth,
zorder=params["zorder"],
rasterized=params["raster"],
)
ax.add_collection(col)
@staticmethod
def draw_legend(
data: pd.Series[Any], da: DrawingArea, lyr: Layer
) -> DrawingArea:
"""
Draw a rectangle in the box
Parameters
----------
data : Series
Data Row
da : DrawingArea
Canvas
lyr : layer
Layer
Returns
-------
out : DrawingArea
"""
from matplotlib.patches import Rectangle
data["size"] *= SIZE_FACTOR
# We take into account that the linewidth
# bestrides the boundary of the rectangle
linewidth = np.min([data["size"], da.width / 4, da.height / 4])
if data["color"] is None:
linewidth = 0
facecolor = to_rgba(data["fill"], data["alpha"])
if facecolor is None:
facecolor = "none"
rect = Rectangle(
(0 + linewidth / 2, 0 + linewidth / 2),
width=da.width - linewidth,
height=da.height - linewidth,
linewidth=linewidth,
linestyle=data["linetype"],
facecolor=facecolor,
edgecolor=data["color"],
capstyle="projecting",
)
da.add_artist(rect)
return da
| [
"[email protected]"
]
| |
87fdd739df1b7fb5761a475d96ca9d5090f05545 | f81099738d3ab7d4a4773a04ed9e36e493632590 | /tools/__init__.py | 25656f148c2d467f8c89a6f10feea6f9a961d8f5 | [
"MIT"
]
| permissive | kristoffer-paulsson/angelos | eff35753e4d7e4465d2aadac39265f206b09fcf9 | d789f47766fe3a63a6752b92e4ea955f420dbaf9 | refs/heads/master | 2022-05-05T15:16:59.340527 | 2022-03-27T16:05:51 | 2022-03-27T16:05:51 | 142,691,235 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | #
# Copyright (c) 2021 by Kristoffer Paulsson <[email protected]>.
#
# This software is available under the terms of the MIT license. Parts are licensed under
# different terms if stated. The legal terms are attached to the LICENSE file and are
# made available on:
#
# https://opensource.org/licenses/MIT
#
# SPDX-License-Identifier: MIT
#
# Contributors:
# Kristoffer Paulsson - initial implementation
#
"""Tools package to help out administrating the project and namespace packages."""
| [
"[email protected]"
]
| |
f430a0b423f886e43adc90f779d2941422feaf60 | 6f99c51a186227a127ce8ca2cf3231b15519b70c | /apps/users/__init__.py | 1646949ec2996b661435d74a3f67fcba7fb2d7e4 | []
| no_license | nicolefengs/mxonline | ce0a9748e64b7ecead53bc3c2aefea6269a0a4f9 | 67452439b7e073afaf25621d00085e2de05ffad9 | refs/heads/master | 2020-03-14T22:40:41.053409 | 2018-05-02T09:12:15 | 2018-05-02T09:12:15 | 131,824,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | #coding=utf-8
#配置后台中文
default_app_config="users.apps.UsersConfig" | [
"[email protected]"
]
| |
827109911306284e65d62529b7a0acc378edb770 | 43dbf438287d1ea426d1cc0e201d4a56bfd20e21 | /ABC/084/C.py | cef2926d97def3d85bbeacf0da42b1556a9743e9 | []
| no_license | hatopoppoK3/AtCoder-Practice | c98e5b0377c46b440a79dcc0bd1790b508555672 | c7385b0444baf18b7a7dc8e3003cc2074bc9b4ab | refs/heads/master | 2023-02-23T03:45:45.787964 | 2023-02-07T12:59:51 | 2023-02-07T12:59:51 | 184,423,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | N = int(input())
CSF = []
for i in range(0, N-1):
CSF.append(list(map(int, input().split())))
for i in range(0, N):
ans = 0
for j in range(i, N-1):
ans = max(CSF[j][1], ans)
if ans % CSF[j][2] != 0:
ans += (CSF[j][2]-ans % CSF[j][2])
ans += CSF[j][0]
print(ans)
| [
"[email protected]"
]
| |
213890a8d69a2c805267746ad519c8098d213948 | 4e8a55ff85a2aa26fbbf05bc9350028226dfde37 | /DProject/Manager/LoginManager.py | 2c0f7a5f5aa87f813dbd2c29683edac4b8132d44 | []
| no_license | DWaze/DisabledProject | dbb572fa47063011abc4f13e95c9e44ab24a5c55 | 6c16269c7722503226ba500d0216dc373ffad867 | refs/heads/master | 2020-03-17T09:02:28.913809 | 2018-06-22T10:07:55 | 2018-06-22T10:07:55 | 133,458,886 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,159 | py | from DProject.Manager.MainManager import createSession
from DProject.DAO.AccountDAO import AccountDAO
from DProject.DAO.UserDAO import UserDAO
import datetime
import uuid
from datetime import timedelta
from DProject.Models.Empty import Empty
from DProject.Models.Account import Account
from DProject.Models.User import User
import abc
import DProject.Models.base
import DProject.Models.Account
import DProject.Models.Action
import DProject.Models.ActionHistory
import DProject.Models.Agent
import DProject.Models.Building
import DProject.Models.Object
import DProject.Models.GPIOPins
import DProject.Models.Scenario
import DProject.Models.StateHistory
import DProject.Models.Area
import DProject.Models.User
from sqlalchemy import create_engine, DateTime
from sqlalchemy.orm import sessionmaker
class LoginManager(object):
DBSession = None
def __init__(self):
self.DBSession = createSession()
def normalize(self,account):
accountObject = Empty()
accountObject.id = account.id
accountObject.userName = account.userName
accountObject.password = account.password
accountObject.email = account.email
accountObject.type = account.type
accountObject.token = account.token
accountObject.tokenEndDate = account.tokenEndDate.strftime('%m/%d/%Y')
accountObject.enabled = account.enabled
accountObject.lastLogin = account.lastLogin.strftime('%m/%d/%Y')
accountObject.firstAccess = account.firstAccess.strftime('%m/%d/%Y')
accountObject.lastAccess = account.lastAccess.strftime('%m/%d/%Y')
return accountObject
def GetLoginInfo(self, username, password):
accountDAO = AccountDAO(self.DBSession)
# accountTest = Account("redha","1321","[email protected]")
# accountDAO.create(accountTest)
#
# userDAO = UserDAO(self.DBSession)
# userTest = User("Abbassen","Mohamed Redha",22,datetime.datetime.now(),"0665528461","cite 1000 logts","Constantine","Countery")
# userTest.addAccount(accountTest)
# userDAO.create(userTest)
account = accountDAO.findLogin(username,password)
dNow = datetime.datetime.now()
if hasattr(account, 'userName'):
dToken = account.tokenEndDate
if (dNow > dToken or account.token is None):
new_token = uuid.uuid4().hex
account.token = new_token
account.tokenEndDate = dNow + timedelta(days=15)
accountDAO.update(account)
accountObject = self.normalize(account)
return accountObject
else:
accountObject = self.normalize(account)
return accountObject
else:
return []
def check_token(self,token):
accountDAO = AccountDAO(self.DBSession)
account = accountDAO.findByToken(token)
dNow = datetime.datetime.now()
if hasattr(account, 'userName'):
dToken = account.tokenEndDate
if (dNow > dToken or account.token is None):
return False
else:
return account | [
"[email protected]"
]
| |
2fda900f5cc51238ea3c6228d0468d9999170f77 | 088e000eb5f16e6d0d56c19833b37de4e67d1097 | /model-optimizer/extensions/back/ReduceToPooling_test.py | 8861dab03c1313e4cf085504177fe531882a1b14 | [
"Apache-2.0"
]
| permissive | projectceladon/dldt | 614ba719a428cbb46d64ab8d1e845ac25e85a53e | ba6e22b1b5ee4cbefcc30e8d9493cddb0bb3dfdf | refs/heads/2019 | 2022-11-24T10:22:34.693033 | 2019-08-09T16:02:42 | 2019-08-09T16:02:42 | 204,383,002 | 1 | 1 | Apache-2.0 | 2022-11-22T04:06:09 | 2019-08-26T02:48:52 | C++ | UTF-8 | Python | false | false | 24,146 | py | """
Copyright (c) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from extensions.back.ReduceToPooling import ReduceReplacer
from mo.front.common.partial_infer.utils import int64_array
from mo.middle.passes.eliminate import shape_inference
from mo.middle.passes.eliminate_test import build_graph
from mo.middle.passes.fusing.fuse_linear_ops_test import compare_graphs
# The dictionary with nodes attributes used to build various graphs. A key is the name of the node and the value is the
# dictionary with node attributes.
nodes_attributes = {
# Placeholder layers
'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
# Reduce layers
'const': {'type': 'Const', 'value': None, 'kind': 'op'},
'const_data': {'kind': 'data', 'value': None, 'shape': None},
'reduce_1': {'type': 'Reduce', 'kind': 'op', 'op': 'Reduce'},
'reduce_1_data': {'value': None, 'shape': None, 'kind': 'data'},
# Reshape layers
'reshape_1': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
'reshape_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'reshape_1_const': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': None},
'reshape_1_const_data': {'kind': 'data', 'value': None, 'shape': None},
'reshape_2': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
'reshape_2_data': {'value': None, 'shape': None, 'kind': 'data'},
'reshape_2_const': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': None},
'reshape_2_const_data': {'kind': 'data', 'value': None, 'shape': None},
# Pooling
'pooling': {'type': 'Pooling', 'kind': 'op', 'op': 'Pooling'},
'pooling_data': {'value': None, 'shape': None, 'kind': 'data'},
# Power
'power': {'type': 'Power', 'kind': 'op', 'op': 'Power'},
'power_data': {'value': None, 'shape': None, 'kind': 'data'},
# Concat
'concat': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},
}
class ReduceReplacerTest(unittest.TestCase):
def test1(self):
# Original graph
# data(1,64,1)-->Reduce(axis=1,keep_dims=True)-->data(1,1,1)
#
# Reference graph
# data(1,61,1)->Reshape(1,1,64,1)->Pool(1,1,1,1)->Reshape(1,1,1)
#
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'reduce_1'),
('const', 'const_data'),
('const_data', 'reduce_1', {'in': 1}),
('reduce_1', 'reduce_1_data'),
('reduce_1_data', 'concat'),
],
{'placeholder_1_data': {'shape': int64_array([1, 64, 1])},
'reduce_1': {'keep_dims': True, 'type': 'ReduceMean'},
'const_data': {'value': int64_array([1])},
'reduce_1_data': {'shape': int64_array([1, 1, 1])},
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'reshape_1'),
('reshape_1_const', 'reshape_1_const_data'),
('reshape_1_const_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'pooling'),
('pooling', 'pooling_data'),
('pooling_data', 'reshape_2'),
('reshape_2_const', 'reshape_2_const_data'),
('reshape_2_const_data', 'reshape_2'),
('reshape_2', 'reshape_2_data'),
('reshape_2_data', 'concat'),
],
{'placeholder_1_data': {'shape': int64_array([1, 64, 1])},
'reshape_1_const': {'value': int64_array([1, 1, 64, 1]), 'shape': int64_array([4])},
'reshape_1_const_data': {'value': int64_array([1, 1, 64, 1]),
'shape': int64_array([4])},
'reshape_1_data': {'shape': int64_array([1, 1, 64, 1])},
'pooling': {'window': int64_array([1, 1, 64, 1])},
'pooling_data': {'shape': int64_array([1, 1, 1, 1])},
'reshape_2_const': {'value': int64_array([1, 1, 1]), 'shape': int64_array([3])},
'reshape_2_const_data': {'value': int64_array([1, 1, 1]), 'shape': int64_array([3])},
'reshape_2_data': {'shape': int64_array([1, 1, 1])},
}, nodes_with_edges_only=True)
ReduceReplacer().find_and_replace_pattern(graph)
shape_inference(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
self.assertTrue(flag, resp)
def test2(self):
# Original graph
# data(1,3,64,64)-->Reduce(axis=2,keep_dims=True)-->data(1,3,1,64)
#
# Reference graph
# data(1,3,64,64)->Reshape->Pool(1,3,1,64)->Reshape(1,3,1,64)
#
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'reduce_1'),
('const', 'const_data'),
('const_data', 'reduce_1', {'in': 1}),
('reduce_1', 'reduce_1_data'),
('reduce_1_data', 'concat'),
],
{'placeholder_1': {'shape': int64_array([1, 3, 64, 64])},
'placeholder_1_data': {'shape': int64_array([1, 3, 64, 64])},
'reduce_1': {'keep_dims': True, 'type': 'ReduceMean'},
'const_data': {'value': int64_array([2])},
'reduce_1_data': {'shape': int64_array([1, 3, 1, 64])},
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'reshape_1'),
('reshape_1_const', 'reshape_1_const_data'),
('reshape_1_const_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'pooling'),
('pooling', 'pooling_data'),
('pooling_data', 'reshape_2'),
('reshape_2_const', 'reshape_2_const_data'),
('reshape_2_const_data', 'reshape_2'),
('reshape_2', 'reshape_2_data'),
('reshape_2_data', 'concat'),
],
{'placeholder_1': {'shape': int64_array([1, 3, 64, 64])},
'placeholder_1_data': {'shape': int64_array([1, 3, 64, 64])},
'reshape_1_const': {'value': int64_array([1, 3, 64, 64]), 'shape': int64_array([4])},
'reshape_1_const_data': {'value': int64_array([1, 3, 64, 64]),
'shape': int64_array([4])},
'reshape_1_data': {'shape': int64_array([1, 3, 64, 64])},
'pooling': {'window': int64_array([1, 1, 64, 1])},
'pooling_data': {'shape': int64_array([1, 3, 1, 64])},
'reshape_2_const': {'value': int64_array([1, 3, 1, 64]), 'shape': int64_array([4])},
'reshape_2_const_data': {'value': int64_array([1, 3, 1, 64]),
'shape': int64_array([4])},
'reshape_2_data': {'shape': int64_array([1, 3, 1, 64])},
}, nodes_with_edges_only=True)
ReduceReplacer().find_and_replace_pattern(graph)
shape_inference(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
self.assertTrue(flag, resp)
def test3(self):
# Original graph
# data(1,3,64,64)-->Reduce(axis=[2,3],keep_dims=True)-->data(1,3,1,1)
#
# Reference graph
# data(1,3,64,64)->Reshape->Pool(1,3,1,1)->Reshape(1,3,1,1)
#
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'reduce_1'),
('const', 'const_data'),
('const_data', 'reduce_1', {'in': 1}),
('reduce_1', 'reduce_1_data'),
('reduce_1_data', 'concat'),
],
{'placeholder_1': {'shape': int64_array([1, 3, 64, 64])},
'placeholder_1_data': {'shape': int64_array([1, 3, 64, 64])},
'reduce_1': {'keep_dims': True, 'type': 'ReduceMean'},
'const_data': {'value': int64_array([2, 3])},
'reduce_1_data': {'shape': int64_array([1, 3, 1, 1])},
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'reshape_1'),
('reshape_1_const', 'reshape_1_const_data'),
('reshape_1_const_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'pooling'),
('pooling', 'pooling_data'),
('pooling_data', 'reshape_2'),
('reshape_2_const', 'reshape_2_const_data'),
('reshape_2_const_data', 'reshape_2'),
('reshape_2', 'reshape_2_data'),
('reshape_2_data', 'concat'),
],
{'placeholder_1': {'shape': int64_array([1, 3, 64, 64])},
'placeholder_1_data': {'shape': int64_array([1, 3, 64, 64])},
'reshape_1_const': {'value': int64_array([1, 3, 64 * 64, 1]),
'shape': int64_array([4])},
'reshape_1_const_data': {'value': int64_array([1, 3, 64 * 64, 1]),
'shape': int64_array([4])},
'reshape_1_data': {'shape': int64_array([1, 3, 64 * 64, 1])},
'pooling': {'window': int64_array([1, 1, 64 * 64, 1])},
'pooling_data': {'shape': int64_array([1, 3, 1, 1])},
'reshape_2_const': {'value': int64_array([1, 3, 1, 1]), 'shape': int64_array([4])},
'reshape_2_const_data': {'value': int64_array([1, 3, 1, 1]),
'shape': int64_array([4])},
'reshape_2_data': {'shape': int64_array([1, 3, 1, 1])},
}, nodes_with_edges_only=True)
ReduceReplacer().find_and_replace_pattern(graph)
shape_inference(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
self.assertTrue(flag, resp)
def test4(self):
# Original graph
# data(2,3,64,64)-->Reduce(axis=[1,2,3],keep_dims=False)-->data(2)
#
# Reference graph
# data(2,3,64,64)->Reshape(2,1,3*64*64,1)->Pool(2,1,1,1)->Reshape(2)
#
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'reduce_1'),
('const', 'const_data'),
('const_data', 'reduce_1', {'in': 1}),
('reduce_1', 'reduce_1_data'),
('reduce_1_data', 'concat'),
],
{'placeholder_1': {'shape': int64_array([2, 3, 64, 64])},
'placeholder_1_data': {'shape': int64_array([2, 3, 64, 64])},
'reduce_1': {'keep_dims': False, 'type': 'ReduceMean'},
'const_data': {'value': int64_array([1, 2, 3])},
'reduce_1_data': {'shape': int64_array([2])},
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'reshape_1'),
('reshape_1_const', 'reshape_1_const_data'),
('reshape_1_const_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'pooling'),
('pooling', 'pooling_data'),
('pooling_data', 'reshape_2'),
('reshape_2_const', 'reshape_2_const_data'),
('reshape_2_const_data', 'reshape_2'),
('reshape_2', 'reshape_2_data'),
('reshape_2_data', 'concat'),
],
{'placeholder_1': {'shape': int64_array([2, 3, 64, 64])},
'placeholder_1_data': {'shape': int64_array([2, 3, 64, 64])},
'reshape_1_const': {'value': int64_array([2, 1, 3 * 64 * 64, 1]),
'shape': int64_array([4])},
'reshape_1_const_data': {'value': int64_array([2, 1, 3 * 64 * 64, 1]),
'shape': int64_array([4])},
'reshape_1_data': {'shape': int64_array([2, 1, 3 * 64 * 64, 1])},
'pooling': {'window': int64_array([1, 1, 3 * 64 * 64, 1])},
'pooling_data': {'shape': int64_array([2, 1, 1, 1])},
'reshape_2_const': {'value': int64_array([2]), 'shape': int64_array([1])},
'reshape_2_const_data': {'value': int64_array([2]), 'shape': int64_array([1])},
'reshape_2_data': {'shape': int64_array([2])},
}, nodes_with_edges_only=True)
ReduceReplacer().find_and_replace_pattern(graph)
shape_inference(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
self.assertTrue(flag, resp)
def test5(self):
# Original graph
# data(1, 16, 64, 64, 64, 4)-->Reduce(axis=[5],keep_dims=False)-->data(1, 16, 64, 64, 64)
#
# Reference graph
# data(1, 16, 64, 64, 64, 4)->Reshape(1*16*64*64, 64, 4, 1)->Pool(1, 1, 4, 1)->Reshape(1, 16, 64, 64, 64)
#
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'reduce_1'),
('const', 'const_data'),
('const_data', 'reduce_1', {'in': 1}),
('reduce_1', 'reduce_1_data'),
('reduce_1_data', 'concat'),
],
{'placeholder_1': {'shape': int64_array([1, 16, 64, 64, 64, 4])},
'placeholder_1_data': {'shape': int64_array([1, 16, 64, 64, 64, 4])},
'reduce_1': {'keep_dims': False, 'type': 'ReduceMax'},
'const_data': {'value': int64_array([5])},
'reduce_1_data': {'shape': int64_array([1, 16, 64, 64, 64])},
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'reshape_1'),
('reshape_1_const', 'reshape_1_const_data'),
('reshape_1_const_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'pooling'),
('pooling', 'pooling_data'),
('pooling_data', 'reshape_2'),
('reshape_2_const', 'reshape_2_const_data'),
('reshape_2_const_data', 'reshape_2'),
('reshape_2', 'reshape_2_data'),
('reshape_2_data', 'concat'),
],
{'placeholder_1': {'shape': int64_array([1, 16, 64, 64, 64, 4])},
'placeholder_1_data': {'shape': int64_array([1, 16, 64, 64, 64, 4])},
'reshape_1_const': {'value': int64_array([65536, 64, 4, 1]),
'shape': int64_array([4])},
'reshape_1_const_data': {'value': int64_array([65536, 64, 4, 1]),
'shape': int64_array([4])},
'reshape_1_data': {'shape': int64_array([65536, 64, 4, 1])},
'pooling': {'window': int64_array([1, 1, 4, 1])},
'pooling_data': {'shape': int64_array([65536, 64, 1, 1])},
'reshape_2_const': {'value': int64_array([1, 16, 64, 64, 64]),
'shape': int64_array([5])},
'reshape_2_const_data': {'value': int64_array([1, 16, 64, 64, 64]),
'shape': int64_array([5])},
'reshape_2_data': {'shape': int64_array([1, 16, 64, 64, 64])},
}, nodes_with_edges_only=True)
ReduceReplacer().find_and_replace_pattern(graph)
shape_inference(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
self.assertTrue(flag, resp)
def test6(self):
# Original graph
# data(1,64,1)-->Reduce(axis=-2,keep_dims=True, reduce_type=Sum)-->data(1,1,1)
#
# Reference graph
# data(1,61,1)->Reshape(1,1,64,1)->Pool(1,1,1,1)->Reshape(1,1,1)->Power(scale=64)
#
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'reduce_1'),
('const', 'const_data'),
('const_data', 'reduce_1', {'in': 1}),
('reduce_1', 'reduce_1_data'),
('reduce_1_data', 'concat'),
],
{'placeholder_1': {'shape': int64_array([1, 64, 1])},
'placeholder_1_data': {'shape': int64_array([1, 64, 1])},
'reduce_1': {'keep_dims': True, 'type': 'ReduceSum'},
'const_data': {'value': int64_array([-2])},
'reduce_1_data': {'shape': int64_array([1, 1, 1])},
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'reshape_1'),
('reshape_1_const', 'reshape_1_const_data'),
('reshape_1_const_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'pooling'),
('pooling', 'pooling_data'),
('pooling_data', 'reshape_2'),
('reshape_2_const', 'reshape_2_const_data'),
('reshape_2_const_data', 'reshape_2'),
('reshape_2', 'reshape_2_data'),
('reshape_2_data', 'power'),
('power', 'power_data'),
('power_data', 'concat'),
],
{'placeholder_1': {'shape': int64_array([1, 64, 1])},
'placeholder_1_data': {'shape': int64_array([1, 64, 1])},
'reshape_1_const': {'value': int64_array([1, 1, 64, 1]), 'shape': int64_array([4])},
'reshape_1_const_data': {'value': int64_array([1, 1, 64, 1]),
'shape': int64_array([4])},
'reshape_1_data': {'shape': int64_array([1, 1, 64, 1])},
'pooling': {'window': int64_array([1, 1, 64, 1])},
'pooling_data': {'shape': int64_array([1, 1, 1, 1])},
'reshape_2_const': {'value': int64_array([1, 1, 1]), 'shape': int64_array([3])},
'reshape_2_const_data': {'value': int64_array([1, 1, 1]), 'shape': int64_array([3])},
'reshape_2_data': {'shape': int64_array([1, 1, 1])},
'power': {'scale': 64.0},
'power_data': {'shape': int64_array([1, 1, 1])},
}, nodes_with_edges_only=True)
ReduceReplacer().find_and_replace_pattern(graph)
shape_inference(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
self.assertTrue(flag, resp)
| [
"[email protected]"
]
| |
d427c13fa9959a66bb1f5351a06da9b53d663630 | 5f64d91dc45e58c8e73a52985c6db45d340d09cc | /Pibow_Zero_1.2/moving_vertical_rainbow_2.py | 940cbb7a0b456992dae98966351278b6229504f9 | []
| no_license | Breakfast-for-Pigeons/Unicorn-PHAT | e0343eb9a46c4b7be11d5028be07ea6b0f071efd | 6e70302eac995cd11821ecf2ee363a1b926df2ce | refs/heads/master | 2023-01-01T18:05:27.436081 | 2020-10-23T22:18:13 | 2020-10-23T22:18:13 | 74,320,010 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,859 | py | #!/usr/bin/python3
"""
Moving Vertical Rainbow 2 - Pibow Zero 1.2
Retrieves the rainbows and sends them to the move function.
With the GPIO pins at the top of the Raspberry Pi, the rainbows move
from the bottom to the top.
....................
Functions:
- moving_vertical_rainbow_2: Retrieves the rainbows and sends them to
the move function.
....................
Author: Paul Ryan
This program was written on a Raspberry Pi using the Geany IDE.
"""
########################################################################
# Import modules #
########################################################################
from bfp_unicornphat import print_header
from bfp_unicornphat import stop
from bfp_unicornphat import get_vertical_rainbow_00
from bfp_unicornphat import get_vertical_rainbows
from bfp_unicornphat import move_vertically
########################################################################
# Functions #
########################################################################
def moving_vertical_rainbow_2():
"""
Retrieves the rainbows, assigns them in reverse order, and then
sends them as an argument to the move function.
"""
rainbow00 = get_vertical_rainbow_00()
rainbow03, rainbow02, rainbow01 = get_vertical_rainbows()
mv_rainbows_2 = (rainbow00, rainbow01, rainbow02, rainbow03)
move_vertically(mv_rainbows_2)
if __name__ == '__main__':
try:
# STEP01: Print header
print_header()
# STEP02: Print instructions in white text
print("\033[1;37;40mPress Ctrl-C to stop the program.")
# STEP03:
moving_vertical_rainbow_2()
# STEP04: Exit the program.
stop()
except KeyboardInterrupt:
stop()
| [
"[email protected]"
]
| |
8e835c33a5fb8ddde4f544c04d02d8452b9410d3 | 5f1dee64fd88bd2237ef95ee894a37d8ddcdeb4e | /python/tools/slice_data.py | 56c0634900cb623ac83454b120888c0a466b13cb | []
| no_license | constantinpape/stuff_master | c893ae0c763801210db06bad548784c4b20d6680 | 68d80bd45a140cb0d30abf32a04365ca37c9694c | refs/heads/master | 2021-01-22T09:50:38.795259 | 2017-03-07T18:25:45 | 2017-03-07T18:25:45 | 33,936,452 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,201 | py | import sys , h5py
import vigra
import numpy as np
if __name__ == '__main__':
#path_in = "/home/constantin/Work/data_ssd/data_090515/2x2x2nm/data_sub.h5"
#path_out = "/home/constantin/Work/data_ssd/data_090515/2x2x2nm/data_sub_sliced.h5"
#key = "data"
#path_in = "/home/constantin/Work/data_ssd/data_090515/2x2x2nm/superpixel/watershed_voxel.h5"
#path_out = "/home/constantin/Work/data_ssd/data_090515/2x2x2nm/superpixel/watershed_voxel_sliced.h5"
#key = "superpixel"
#path_in = "/home/constantin/Work/data_ssd/data_080515/pedunculus/150401pedunculus_middle_512x512_first30_sliced.h5"
#path_out = "/home/constantin/Work/data_ssd/data_080515/pedunculus/150401pedunculus_middle_512x512_first30_slicedsliced.h5"
#key = "data"
path_in = "/home/constantin/Work/data_ssd/data_080515/pedunculus/superpixel/watershed_vigra2.h5"
path_out = "/home/constantin/Work/data_ssd/data_080515/pedunculus/superpixel/watershed_vigra2_sliced.h5"
key = "superpixel"
data_in = vigra.readHDF5(path_in,key)
f_out = h5py.File(path_out,"w")
dset = f_out.create_dataset(key, (512,512,29), dtype = 'f', chunks = True )
dset[:,:,:] = data_in[0:512,0:512,0:29]
| [
"[email protected]"
]
| |
c163125bb4a4bc3c9b37983dcb3043063551b6de | 3e534ac0d2053e72e8d6b67f96b42cf56464b5fd | /setup.py | 3f133a6855b66b3d43c23bc909e5b20a81fb79f4 | []
| no_license | marcoceppi/dorthy | 466b89574a940991ca86be752b36c876964df699 | 781bd2b60fa8551671cdb2fd681012dad7e24490 | refs/heads/master | 2020-05-05T06:43:58.330792 | 2014-05-17T03:20:33 | 2014-05-17T03:20:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | from setuptools import setup
install_requires = [
'tornado',
'PyYAML'
]
setup(
name='dorthy',
version='0.0.1',
description='A more express.js like framework built on top of Tornado',
install_requires=install_requires,
author='Marco Ceppi',
author_email='[email protected]',
url="https://github.com/marcoceppi/dorthy",
packages=['dorthy']
)
| [
"[email protected]"
]
| |
8f48b70fe931ef6ad194e43e39d05d0c1d2b58b9 | ff70ce8c57354ca54d193913ecb9d95555ac77fb | /app.py | 98097584f6c13838834d03556e5f3bf2aaf2dc0b | []
| no_license | hopetambala/SI507_F18_HW12_Flask_Guestbook | b1a3bf34dbcfa92b5fcd6f98e779ac9948a76148 | cc05f5c3648604726e61a5c27f51042f9447d4fa | refs/heads/master | 2020-04-09T21:10:38.823927 | 2018-12-06T01:27:10 | 2018-12-06T01:27:10 | 160,594,088 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | from flask import Flask, render_template, request, redirect
import model
app = Flask(__name__)
@app.route("/")
def index():
## print the guestbook
return render_template("index.html", entries=model.get_entries())
@app.route("/add")
def addentry():
## add a guestbook entry
return render_template("addentry.html")
@app.route("/admin")
def admin():
## add a guestbook entry
return render_template("admin.html", entries=model.get_entries())
@app.route("/postentry", methods=["POST"])
def postentry():
name = request.form["name"]
message = request.form["message"]
model.add_entry(name, message)
return redirect("/")
@app.route("/delete", methods=["POST"])
def delete():
id = request.form["id"]
model.delete_entry(id)
return redirect("/admin")
if __name__=="__main__":
model.init()
app.run(debug=True) | [
"[email protected]"
]
| |
c009c177e08c531d50eccbce517bd0d11cc63325 | 3f4464c932403615c1fbbaf82eaec096426b1ef5 | /StartOutPy4/CH8 Strings/count_Ts.py | c423d22ade2f77b8b3be5beced086e81104c62ed | []
| no_license | arcstarusa/prime | 99af6e3fed275982bf11ada7bf1297294d527e91 | 5f1102aa7b6eaba18f97eb388525d48ab4cac563 | refs/heads/master | 2020-03-22T14:07:08.079963 | 2019-05-09T11:45:21 | 2019-05-09T11:45:21 | 140,154,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | # This program counts the number of times
# the letter T (uppercase or lowercase)
# appear in a string.
def main():
# Create a variable to use to hold the count.
# The variable must start with 0.
count = 0
# Get a string from the user.
my_string = input('Enter a sentence: ')
# Count the Ts.
for ch in my_string:
if ch == 'T' or ch == 't':
count += 1
# Print the result.
print('The letter T appears', count, 'times.')
# Call the main function.
main() | [
"[email protected]"
]
| |
a6773e605bf4c54aaed2d80bf7035835a7dc232a | 2c95e0f7bb3f977306f479d5c99601ab1d5c61f2 | /olive/cmds/configure.py | defbdae6415842945c53e5dddcbf1a609cc704ab | [
"Apache-2.0"
]
| permissive | Olive-blockchain/Olive-blockchain-CLI | d62444f8456467f8105531178d2ae53d6e92087d | 8c4a9a382d68fc1d71c5b6c1da858922a8bb8808 | refs/heads/main | 2023-07-19T03:51:08.700834 | 2021-09-19T16:05:10 | 2021-09-19T16:05:10 | 406,045,499 | 0 | 0 | Apache-2.0 | 2021-09-19T16:05:10 | 2021-09-13T16:20:38 | Python | UTF-8 | Python | false | false | 8,679 | py | from pathlib import Path
from typing import Dict
import click
from olive.util.config import load_config, save_config, str2bool
from olive.util.default_root import DEFAULT_ROOT_PATH
def configure(
root_path: Path,
set_farmer_peer: str,
set_node_introducer: str,
set_fullnode_port: str,
set_harvester_port: str,
set_log_level: str,
enable_upnp: str,
set_outbound_peer_count: str,
set_peer_count: str,
testnet: str,
):
config: Dict = load_config(DEFAULT_ROOT_PATH, "config.yaml")
change_made = False
if set_node_introducer:
try:
if set_node_introducer.index(":"):
host, port = (
":".join(set_node_introducer.split(":")[:-1]),
set_node_introducer.split(":")[-1],
)
config["full_node"]["introducer_peer"]["host"] = host
config["full_node"]["introducer_peer"]["port"] = int(port)
config["introducer"]["port"] = int(port)
print("Node introducer updated")
change_made = True
except ValueError:
print("Node introducer address must be in format [IP:Port]")
if set_farmer_peer:
try:
if set_farmer_peer.index(":"):
host, port = (
":".join(set_farmer_peer.split(":")[:-1]),
set_farmer_peer.split(":")[-1],
)
config["full_node"]["farmer_peer"]["host"] = host
config["full_node"]["farmer_peer"]["port"] = int(port)
config["harvester"]["farmer_peer"]["host"] = host
config["harvester"]["farmer_peer"]["port"] = int(port)
print("Farmer peer updated, make sure your harvester has the proper cert installed")
change_made = True
except ValueError:
print("Farmer address must be in format [IP:Port]")
if set_fullnode_port:
config["full_node"]["port"] = int(set_fullnode_port)
config["full_node"]["introducer_peer"]["port"] = int(set_fullnode_port)
config["farmer"]["full_node_peer"]["port"] = int(set_fullnode_port)
config["timelord"]["full_node_peer"]["port"] = int(set_fullnode_port)
config["wallet"]["full_node_peer"]["port"] = int(set_fullnode_port)
config["wallet"]["introducer_peer"]["port"] = int(set_fullnode_port)
config["introducer"]["port"] = int(set_fullnode_port)
print("Default full node port updated")
change_made = True
if set_harvester_port:
config["harvester"]["port"] = int(set_harvester_port)
config["farmer"]["harvester_peer"]["port"] = int(set_harvester_port)
print("Default harvester port updated")
change_made = True
if set_log_level:
levels = ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"]
if set_log_level in levels:
config["logging"]["log_level"] = set_log_level
print(f"Logging level updated. Check {DEFAULT_ROOT_PATH}/log/debug.log")
change_made = True
else:
print(f"Logging level not updated. Use one of: {levels}")
if enable_upnp is not None:
config["full_node"]["enable_upnp"] = str2bool(enable_upnp)
if str2bool(enable_upnp):
print("uPnP enabled")
else:
print("uPnP disabled")
change_made = True
if set_outbound_peer_count is not None:
config["full_node"]["target_outbound_peer_count"] = int(set_outbound_peer_count)
print("Target outbound peer count updated")
change_made = True
if set_peer_count is not None:
config["full_node"]["target_peer_count"] = int(set_peer_count)
print("Target peer count updated")
change_made = True
if testnet is not None:
if testnet == "true" or testnet == "t":
print("Setting Testnet")
testnet_port = "56333"
testnet_introducer = "beta1_introducer.oliveblockchain.co"
testnet = "testnet7"
config["full_node"]["port"] = int(testnet_port)
config["full_node"]["introducer_peer"]["port"] = int(testnet_port)
config["farmer"]["full_node_peer"]["port"] = int(testnet_port)
config["timelord"]["full_node_peer"]["port"] = int(testnet_port)
config["wallet"]["full_node_peer"]["port"] = int(testnet_port)
config["wallet"]["introducer_peer"]["port"] = int(testnet_port)
config["introducer"]["port"] = int(testnet_port)
config["full_node"]["introducer_peer"]["host"] = testnet_introducer
config["selected_network"] = testnet
config["harvester"]["selected_network"] = testnet
config["pool"]["selected_network"] = testnet
config["farmer"]["selected_network"] = testnet
config["timelord"]["selected_network"] = testnet
config["full_node"]["selected_network"] = testnet
config["ui"]["selected_network"] = testnet
config["introducer"]["selected_network"] = testnet
config["wallet"]["selected_network"] = testnet
print("Default full node port, introducer and network setting updated")
change_made = True
elif testnet == "false" or testnet == "f":
print("Setting Mainnet")
mainnet_port = "7333"
mainnet_introducer = "introducer.oliveblockchain.co"
net = "mainnet"
config["full_node"]["port"] = int(mainnet_port)
config["full_node"]["introducer_peer"]["port"] = int(mainnet_port)
config["farmer"]["full_node_peer"]["port"] = int(mainnet_port)
config["timelord"]["full_node_peer"]["port"] = int(mainnet_port)
config["wallet"]["full_node_peer"]["port"] = int(mainnet_port)
config["wallet"]["introducer_peer"]["port"] = int(mainnet_port)
config["introducer"]["port"] = int(mainnet_port)
config["full_node"]["introducer_peer"]["host"] = mainnet_introducer
config["selected_network"] = net
config["harvester"]["selected_network"] = net
config["pool"]["selected_network"] = net
config["farmer"]["selected_network"] = net
config["timelord"]["selected_network"] = net
config["full_node"]["selected_network"] = net
config["ui"]["selected_network"] = net
config["introducer"]["selected_network"] = net
config["wallet"]["selected_network"] = net
print("Default full node port, introducer and network setting updated")
change_made = True
else:
print("Please choose True or False")
if change_made:
print("Restart any running olive services for changes to take effect")
save_config(root_path, "config.yaml", config)
return 0
@click.command("configure", short_help="Modify configuration")
@click.option(
"--testnet",
"-t",
help="configures for connection to testnet",
type=click.Choice(["true", "t", "false", "f"]),
)
@click.option("--set-node-introducer", help="Set the introducer for node - IP:Port", type=str)
@click.option("--set-farmer-peer", help="Set the farmer peer for harvester - IP:Port", type=str)
@click.option(
"--set-fullnode-port",
help="Set the port to use for the fullnode, useful for testing",
type=str,
)
@click.option(
"--set-harvester-port",
help="Set the port to use for the harvester, useful for testing",
type=str,
)
@click.option(
"--set-log-level",
"--log-level",
"-log-level",
help="Set the instance log level",
type=click.Choice(["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"]),
)
@click.option(
"--enable-upnp",
"--upnp",
"-upnp",
help="Enable or disable uPnP",
type=click.Choice(["true", "t", "false", "f"]),
)
@click.option(
"--set_outbound-peer-count",
help="Update the target outbound peer count (default 8)",
type=str,
)
@click.option("--set-peer-count", help="Update the target peer count (default 80)", type=str)
@click.pass_context
def configure_cmd(
ctx,
set_farmer_peer,
set_node_introducer,
set_fullnode_port,
set_harvester_port,
set_log_level,
enable_upnp,
set_outbound_peer_count,
set_peer_count,
testnet,
):
configure(
ctx.obj["root_path"],
set_farmer_peer,
set_node_introducer,
set_fullnode_port,
set_harvester_port,
set_log_level,
enable_upnp,
set_outbound_peer_count,
set_peer_count,
testnet,
)
| [
"[email protected]"
]
| |
642f4caad9f1b442926282ebde6954db9bfee7cc | 4c75f5ace0257e17879d8889ae3769a13e70b159 | /note14/code/merge_model.py | fac5e74e272d21eededbe39308d15abe8b115a0f | [
"Apache-2.0"
]
| permissive | songkunhuang/LearnPaddle | c8acb5de1bbf8cab0fd919f6b11ca81d3c2a3110 | c4500904615149115535b66a67d3e5d06f8435c4 | refs/heads/master | 2020-03-26T05:56:32.817406 | 2018-07-24T14:50:21 | 2018-07-24T14:50:21 | 144,581,839 | 1 | 0 | Apache-2.0 | 2018-08-13T13:13:37 | 2018-08-13T13:13:36 | null | UTF-8 | Python | false | false | 448 | py | # coding=utf-8
from paddle.utils.merge_model import merge_v2_model
# 导入神经网络
from mobilenet import mobile_net
from vgg import vgg_bn_drop
if __name__ == "__main__":
# 图像的大小
img_size = 3 * 32 * 32
# 总分类数
class_dim = 10
net = mobile_net(img_size, class_dim)
param_file = '../model/mobile_net.tar.gz'
output_file = '../model/mobile_net.paddle'
merge_v2_model(net, param_file, output_file) | [
"[email protected]"
]
| |
696028a039265f5b442871828b0233dd58aa0c12 | a96af1535c19244640b9d137ede80f61569d6823 | /tests/test_legacy/test_strat_model_legacy/test_strat_setup_legacy.py | cfb5d2a20721b12c9b25f42d88383736729e0859 | [
"BSD-2-Clause-Views"
]
| permissive | emmamcbryde/summer-1 | 260d2c2c0085b5181f592b3bd8a186902f923135 | 3ea377b3352c82edaed95ea1e5683b9a130fe9e6 | refs/heads/master | 2023-04-23T20:06:52.800992 | 2021-04-29T05:29:29 | 2021-04-29T05:29:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,441 | py | """
Test setup of the stratified model
"""
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from summer.legacy.constants import BirthApproach, Compartment, Flow, IntegrationType
from summer.legacy.model import StratifiedModel
@pytest.mark.skip
def test_stratify_flows_partial():
"""Ensure flows get stratified properly in partial strat"""
requested_flows = [
{
"type": Flow.INFECTION_FREQUENCY,
"parameter": "contact_rate",
"origin": "sus",
"to": "inf",
},
{
"type": Flow.STANDARD,
"parameter": "recovery_rate",
"origin": "inf",
"to": "sus",
},
{
"type": Flow.DEATH,
"parameter": "infect_death",
"origin": "inf",
},
]
parameters = {
"contact_rate": 1000,
"recovery_rate": "recovery_rate",
"infect_death": 10,
}
recovery_rate = lambda t: 2 * t
model = StratifiedModel(
times=[0, 1, 2, 3, 4, 5],
compartment_names=["sus", "inf"],
initial_conditions={"inf": 200},
parameters=parameters,
requested_flows=requested_flows,
starting_population=1000,
infectious_compartments=["inf"],
birth_approach=BirthApproach.NO_BIRTH,
entry_compartment="sus",
)
model.time_variants["recovery_rate"] = recovery_rate
assert model.compartment_names == ["sus", "inf"]
assert model.flows == requested_flows
assert model.parameters == parameters
assert model.time_variants == {"recovery_rate": recovery_rate}
assert model.overwrite_parameters == []
assert model.adaptation_functions == {}
vals = np.array([800, 200])
assert_array_equal(model.compartment_values, vals)
model.stratify(
"location",
strata_request=["home", "work", "other"],
compartments_to_stratify=["inf"],
requested_proportions={"home": 0.6},
)
assert model.flows == [
{
"origin": "sus",
"parameter": "contact_rateXlocation_home",
"to": "infXlocation_home",
"type": Flow.INFECTION_FREQUENCY,
},
{
"origin": "sus",
"parameter": "contact_rateXlocation_work",
"to": "infXlocation_work",
"type": Flow.INFECTION_FREQUENCY,
},
{
"origin": "sus",
"parameter": "contact_rateXlocation_other",
"to": "infXlocation_other",
"type": Flow.INFECTION_FREQUENCY,
},
{
"origin": "infXlocation_home",
"parameter": "recovery_rateXlocation_home",
"to": "sus",
"type": Flow.STANDARD,
},
{
"origin": "infXlocation_work",
"parameter": "recovery_rateXlocation_work",
"to": "sus",
"type": Flow.STANDARD,
},
{
"origin": "infXlocation_other",
"parameter": "recovery_rateXlocation_other",
"to": "sus",
"type": Flow.STANDARD,
},
{
"origin": "infXlocation_home",
"parameter": "infect_death",
"type": Flow.DEATH,
},
{
"origin": "infXlocation_work",
"parameter": "infect_death",
"type": Flow.DEATH,
},
{
"origin": "infXlocation_other",
"parameter": "infect_death",
"type": Flow.DEATH,
},
]
assert model.parameters["infect_death"] == 10
assert model.parameters["contact_rate"] == 1000
assert model.parameters["contact_rateXlocation_home"] == 1 / 3
assert model.parameters["contact_rateXlocation_other"] == 1 / 3
assert model.parameters["contact_rateXlocation_work"] == 1 / 3
assert model.parameters["recovery_rate"] == "recovery_rate"
assert model.parameters["recovery_rateXlocation_home"] == 1 / 3
assert model.parameters["recovery_rateXlocation_other"] == 1 / 3
assert model.parameters["recovery_rateXlocation_work"] == 1 / 3
assert model.time_variants == {"recovery_rate": recovery_rate}
assert model.adaptation_functions["contact_rateXlocation_home"](t=1, v=1) == 1 / 3
assert model.adaptation_functions["contact_rateXlocation_home"](t=1, v=2) == 2 / 3
assert model.adaptation_functions["contact_rateXlocation_work"](t=1, v=1) == 1 / 3
assert model.adaptation_functions["contact_rateXlocation_work"](t=1, v=2) == 2 / 3
assert model.adaptation_functions["contact_rateXlocation_other"](t=1, v=1) == 1 / 3
assert model.adaptation_functions["contact_rateXlocation_other"](t=1, v=2) == 2 / 3
assert model.adaptation_functions["recovery_rateXlocation_home"](t=1, v=1) == 1 / 3
assert model.adaptation_functions["recovery_rateXlocation_home"](t=1, v=2) == 2 / 3
assert model.adaptation_functions["recovery_rateXlocation_work"](t=1, v=1) == 1 / 3
assert model.adaptation_functions["recovery_rateXlocation_work"](t=1, v=2) == 2 / 3
assert model.adaptation_functions["recovery_rateXlocation_other"](t=1, v=1) == 1 / 3
assert model.adaptation_functions["recovery_rateXlocation_other"](t=1, v=2) == 2 / 3
# FIXME: WTF
# model.prepare_to_run()
@pytest.mark.skip
def test_stratify_flows_full():
"""Ensure flows get stratified properly in full strat"""
requested_flows = [
{
"type": Flow.INFECTION_FREQUENCY,
"parameter": "contact_rate",
"origin": "sus",
"to": "inf",
},
{
"type": Flow.STANDARD,
"parameter": "recovery_rate",
"origin": "inf",
"to": "sus",
},
{
"type": Flow.DEATH,
"parameter": "infect_death",
"origin": "inf",
},
]
parameters = {
"contact_rate": 1000,
"recovery_rate": "recovery_rate",
"infect_death": 10,
}
recovery_rate = lambda t: 2 * t
model = StratifiedModel(
times=[0, 1, 2, 3, 4, 5],
compartment_names=["sus", "inf"],
initial_conditions={"inf": 200},
parameters=parameters,
requested_flows=requested_flows,
starting_population=1000,
infectious_compartments=["inf"],
birth_approach=BirthApproach.NO_BIRTH,
entry_compartment="sus",
)
model.time_variants["recovery_rate"] = recovery_rate
assert model.compartment_names == ["sus", "inf"]
assert model.flows == requested_flows
assert model.parameters == parameters
assert model.time_variants == {"recovery_rate": recovery_rate}
assert model.overwrite_parameters == []
assert model.adaptation_functions == {}
vals = np.array([800, 200])
assert_array_equal(model.compartment_values, vals)
model.stratify(
"location",
strata_request=["home", "work", "other"],
compartments_to_stratify=[],
requested_proportions={"home": 0.6},
)
assert model.flows == [
{
"origin": "susXlocation_home",
"parameter": "contact_rate",
"to": "infXlocation_home",
"type": Flow.INFECTION_FREQUENCY,
},
{
"origin": "susXlocation_work",
"parameter": "contact_rate",
"to": "infXlocation_work",
"type": Flow.INFECTION_FREQUENCY,
},
{
"origin": "susXlocation_other",
"parameter": "contact_rate",
"to": "infXlocation_other",
"type": Flow.INFECTION_FREQUENCY,
},
{
"origin": "infXlocation_home",
"parameter": "recovery_rate",
"to": "susXlocation_home",
"type": Flow.STANDARD,
},
{
"origin": "infXlocation_work",
"parameter": "recovery_rate",
"to": "susXlocation_work",
"type": Flow.STANDARD,
},
{
"origin": "infXlocation_other",
"parameter": "recovery_rate",
"to": "susXlocation_other",
"type": Flow.STANDARD,
},
{
"origin": "infXlocation_home",
"parameter": "infect_death",
"type": Flow.DEATH,
},
{
"origin": "infXlocation_work",
"parameter": "infect_death",
"type": Flow.DEATH,
},
{
"origin": "infXlocation_other",
"parameter": "infect_death",
"type": Flow.DEATH,
},
]
assert model.parameters["contact_rate"] == 1000
assert model.parameters["infect_death"] == 10
assert model.parameters["recovery_rate"] == "recovery_rate"
assert model.time_variants == {"recovery_rate": recovery_rate}
@pytest.mark.skip
def test_stratify_flows_full__with_adjustment_requests():
"""Ensure flows get stratified properly in full strat"""
adjustment_requests = {
"contact_rate": {
"home": "home_contact_rate",
"work": 0.5,
},
"recovery_rate": {
"home": 1,
"work": 2,
},
"infect_death": {
"home": 1,
"work": 2,
},
}
requested_flows = [
{
"type": Flow.INFECTION_FREQUENCY,
"parameter": "contact_rate",
"origin": "sus",
"to": "inf",
},
{
"type": Flow.STANDARD,
"parameter": "recovery_rate",
"origin": "inf",
"to": "sus",
},
{
"type": Flow.DEATH,
"parameter": "infect_death",
"origin": "inf",
},
]
parameters = {
"contact_rate": 1000,
"recovery_rate": "recovery_rate",
"infect_death": 10,
}
home_contact_rate_func = lambda t: t
recovery_rate_func = lambda t: 2 * t
model = StratifiedModel(
times=[0, 1, 2, 3, 4, 5],
compartment_names=["sus", "inf"],
initial_conditions={"inf": 200},
parameters=parameters,
requested_flows=requested_flows,
starting_population=1000,
infectious_compartments=["inf"],
birth_approach=BirthApproach.NO_BIRTH,
entry_compartment="sus",
)
model.time_variants["recovery_rate"] = recovery_rate_func
model.time_variants["home_contact_rate"] = home_contact_rate_func
assert model.compartment_names == ["sus", "inf"]
assert model.flows == requested_flows
assert model.parameters == parameters
assert model.time_variants == {
"recovery_rate": recovery_rate_func,
"home_contact_rate": home_contact_rate_func,
}
assert model.overwrite_parameters == []
assert model.adaptation_functions == {}
vals = np.array([800, 200])
assert_array_equal(model.compartment_values, vals)
model.stratify(
"location",
strata_request=["home", "work"],
compartments_to_stratify=[],
requested_proportions={"home": 0.6},
adjustment_requests=adjustment_requests,
)
assert model.flows == [
{
"origin": "susXlocation_home",
"parameter": "contact_rateXlocation_home",
"to": "infXlocation_home",
"type": Flow.INFECTION_FREQUENCY,
},
{
"origin": "susXlocation_work",
"parameter": "contact_rateXlocation_work",
"to": "infXlocation_work",
"type": Flow.INFECTION_FREQUENCY,
},
{
"origin": "infXlocation_home",
"parameter": "recovery_rateXlocation_home",
"to": "susXlocation_home",
"type": Flow.STANDARD,
},
{
"origin": "infXlocation_work",
"parameter": "recovery_rateXlocation_work",
"to": "susXlocation_work",
"type": Flow.STANDARD,
},
{
"origin": "infXlocation_home",
"parameter": "infect_deathXlocation_home",
"type": Flow.DEATH,
},
{
"origin": "infXlocation_work",
"parameter": "infect_deathXlocation_work",
"type": Flow.DEATH,
},
]
assert model.time_variants == {
"recovery_rate": recovery_rate_func,
"home_contact_rate": home_contact_rate_func,
}
assert model.parameters["contact_rate"] == 1000
assert model.parameters["infect_death"] == 10
assert model.parameters["recovery_rate"] == "recovery_rate"
assert model.parameters["contact_rateXlocation_home"] == "home_contact_rate"
assert model.parameters["contact_rateXlocation_work"] == 0.5
assert model.parameters["recovery_rateXlocation_home"] == 1
assert model.parameters["recovery_rateXlocation_work"] == 2
assert model.parameters["infect_deathXlocation_home"] == 1
assert model.parameters["infect_deathXlocation_work"] == 2
@pytest.mark.skip
def test_stratify_compartments_full():
"""Ensure compartment names and values get fully stratified properly"""
model = StratifiedModel(
times=[0, 1, 2, 3, 4, 5],
compartment_names=["sus", "inf"],
initial_conditions={"inf": 200},
parameters={},
requested_flows=[],
starting_population=1000,
infectious_compartments=["inf"],
birth_approach=BirthApproach.NO_BIRTH,
entry_compartment="sus",
)
assert model.compartment_names == ["sus", "inf"]
vals = np.array([800, 200])
assert_array_equal(model.compartment_values, vals)
model.stratify(
"location",
strata_request=["home", "work", "other"],
compartments_to_stratify=[],
requested_proportions={"home": 0.6},
)
assert model.compartment_names == [
"susXlocation_home",
"susXlocation_work",
"susXlocation_other",
"infXlocation_home",
"infXlocation_work",
"infXlocation_other",
]
vals = np.array([480, 160, 160, 120, 40, 40])
assert_array_equal(model.compartment_values, vals)
assert model.all_stratifications == {"location": ["home", "work", "other"]}
assert model.full_stratification_list == ["location"]
@pytest.mark.skip
def test_stratify_compartments_partial():
"""Ensure compartment names and values get partially stratified properly"""
model = StratifiedModel(
times=[0, 1, 2, 3, 4, 5],
compartment_names=["sus", "inf"],
initial_conditions={"inf": 200},
parameters={},
requested_flows=[],
starting_population=1000,
infectious_compartments=["inf"],
birth_approach=BirthApproach.NO_BIRTH,
entry_compartment="sus",
)
assert model.compartment_names == ["sus", "inf"]
vals = np.array([800, 200])
assert_array_equal(model.compartment_values, vals)
model.stratify(
"location",
strata_request=["home", "work", "other"],
compartments_to_stratify=["sus"],
requested_proportions={"home": 0.6},
)
assert model.compartment_names == [
"susXlocation_home",
"susXlocation_work",
"susXlocation_other",
"inf",
]
vals = np.array([480, 160, 160, 200])
assert_array_equal(model.compartment_values, vals)
assert model.all_stratifications == {"location": ["home", "work", "other"]}
assert model.full_stratification_list == []
| [
"[email protected]"
]
| |
71fdad854f57ec04453d5c7adb2360b1f8c20fbb | 153da69b35f032f5b83a06f17008ba41a1b336b4 | /src/app/vault/src/main/entity/__init__.py | 270705fcf68ccca631f0df8206ad7e2b306c43e7 | [
"MIT"
]
| permissive | TrendingTechnology/hspylib | 6400cadf9dfe6ab5733712dcfeccf8022d61c589 | c79a2c17e89fe21d00ccd9c1646a03407cd61839 | refs/heads/master | 2023-06-20T15:47:35.962661 | 2021-07-19T22:12:18 | 2021-07-19T23:45:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | # _*_ coding: utf-8 _*_
#
# HSPyLib v0.11.1
#
# Package: app.vault.src.main.entity
"""Package initialization."""
__all__ = [
'validator',
'vault_entry'
]
| [
"[email protected]"
]
| |
bd805a463c372e859e12a602bef4b4003685b198 | 95310128d287ed510b354216efffb2bbb03cdbb4 | /subsetsum.py | 229656a8f91d2647f3d3b986035037fa63923290 | []
| no_license | babiswas2020/python-practise | a35fd2378fbd0213168b811303f70c52fabb31ef | 06a4525d5ad2037cf3064f497a7a08dafae13f9c | refs/heads/master | 2022-11-10T23:58:05.672114 | 2020-06-20T09:08:18 | 2020-06-20T09:08:18 | 273,674,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | def subsetsum(arr,sum,N):
if N==0 and sum!=0:
return False
elif N==0 and sum==0:
return True
if N-1>=0:
if arr[N-1]<=sum:
return subsetsum(arr,sum-arr[N-1],N-1) or subsetsum(arr,sum,N-1)
elif arr[N-1]>sum:
return subsetsum(arr,sum,N-1)
if __name__=="__main__":
l=[12,4,6,8,0,23,14]
print(subsetsum(l,11,7))
| [
"[email protected]"
]
| |
67069710247e7000932a7a6a8e171dd88d51b76d | b500996a0b29829fde6afe8b23178ca9df4a239d | /rydinfap/src/apps/fbaptestpar.py | a88c136c37fa263588f63980ab645485c9e77942 | []
| no_license | eocampo2000/test-code | 48c4d444e323eef5e6fe7e61b018952ef3cd4134 | 49328664243e1a9daf9c567d1aaaa19fd4654c02 | refs/heads/master | 2016-08-11T07:35:31.346464 | 2016-02-13T12:33:55 | 2016-02-13T12:33:55 | 51,642,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,801 | py | '''
Created on Jan 30, 2014
@author: eocampo
To schedule FBAP for parallel testing.
'''
__version__ = '20140130'
import sys
import procdata.procinfa as pi
from apps.infbaseapp import _InfaBaseApp
# Mandatory to define self.cmdStep
class FBAPTestPar(_InfaBaseApp):
exitOnError = True
def __init__(self):
super(FBAPTestPar,self).__init__()
self.landDir = 'SrcFiles'
self.incFileSet = [] # Incoming Files. Contains full path name.
self.incFiles = []
self.workFiles = [] # Files that were moved to the working dir (ideally same than incSetFile).
self.trigFiles = [] # Incoming Trigger File.
# Allowable commands for this application. Make sure to Set
self.cmdStep = { 'A' : self.getLock ,
'B' : self.getTrigFiles ,
'C' : self.wkfTISStgFBAPLoc ,
'D' : self.wkfTISTgtFBAP ,
}
# Infa Environmental variables/
self.infaEnvVar = {
'PMCMD' : 'mg.pmcmd' ,
'INFA_USER' : 'self.ib.rep_user' ,
'INFA_XPWD' : 'self.ib.rep_xpwd' ,
'DOMAIN' : 'self.ib.dom_name' ,
'INT_SERV' : 'self.ib.IS' ,
'INFA_SHARE' : 'self.ib.shareDir' ,
'INFA_APP_CFG' : 'self.ib.cfgDir' ,
'INFA_APP_LCK' : 'self.ib.lckDir' ,
'INFA_APP_CTL' : 'self.ib.ctlDir' ,
}
def wkfTISStgFBAPLoc(self):
self.ib.fld = 'TIS'
self.ib.wkf = 'wkf_TIS_STG_FBAP_LOCAL'
#rc = pi.runWkflWait(self.ib,self.log)
rc=0
if rc != 0 :
self.log.error('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
else :
self.log.info('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
return rc
def wkfTISTgtFBAP(self):
self.ib.fld = 'TIS'
self.ib.wkf = 'wkf_TIS_TGT_FBAP'
rc=0
#rc = pi.runWkflWait(self.ib,self.log)
if rc != 0 :
self.log.error('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
else :
self.log.info('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
return rc
def main(Args):
a = FBAPTestPar()
rc = a.main(Args)
return rc
if __name__ == '__main__':
from setwinenv import setEnvVars # Remove in UX
setEnvVars() # Remove in UX
rc= main(sys.argv)
| [
"[email protected]"
]
| |
e28751ed914f028052ec7108aba16f1762e09330 | 97eaaa88e0094cfbb40d38937e49b013468b6dce | /proyecto/Scripts/MiProyecto/tracks/admin.py | 9a620507ef545d15cc00a87bf617aaae634fa816 | []
| no_license | didiacosta/Backend | cd652bdd1a7ec971157a539cb066efe50b024670 | c3bc7053cb579244ad7c9b0c9690d983381f9b15 | refs/heads/master | 2022-11-09T21:23:43.053107 | 2016-04-06T15:44:48 | 2016-04-06T15:44:48 | 55,620,174 | 0 | 1 | null | 2022-10-19T14:56:09 | 2016-04-06T15:58:32 | Python | UTF-8 | Python | false | false | 117 | py | from django.contrib import admin
# Register your models here.
from .models import Track
admin.site.register(Track)
| [
"[email protected]"
]
| |
8a66e6dfc999127f04b86f88629b3443a4a2f5ab | a4435e31cdfbe68aebfdb241bb82ed33dd4f5a30 | /chapter13/Multiinherit.py | 8fc30d31404fbb978886b4165250b8dcd66189f6 | []
| no_license | vonzhou/Core-Python-Programming | 804ce8ade8ca1af6b2b2effb0b78ec30a124314d | 749d4dff8d2158c8be706bca1a82a283150c629a | refs/heads/master | 2021-01-10T01:29:23.918561 | 2015-10-20T09:38:19 | 2015-10-20T09:38:19 | 44,003,197 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | #P360
class P1:
def foo(self):
print "calling P1-foo()...."
class P2:
def foo(self):
print "calling P2-foo()..."
def bar(self):
print "calling P2-bar()..."
class C1(P1,P2):
pass
class C2(P1,P2):
def bar(self):
print "calling C2-bar()...."
class GC(C1,C2):
pass
g1 = GC()
g1.foo()
print "-----------------"
g1.bar()
| [
"[email protected]"
]
| |
51f0076c78fa9f6ff59c75ea2c3f76af8166726a | b37b1e809a055bfbab1c7a017e1ae5b572555827 | /carmesi/users/migrations/0003_user_modified_by.py | 89898ae4683ff811967fd82c0cee18acd51131f8 | [
"MIT"
]
| permissive | raultr/CarmesiAnt | 828285ee389d1ed76c53a6cf504eb4ca2567cfe3 | 4ce143482001e015584943a5ed6f93adfb3dd520 | refs/heads/master | 2021-10-18T05:38:06.578438 | 2019-02-14T05:03:19 | 2019-02-14T05:03:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | # Generated by Django 2.1.1 on 2019-02-10 03:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0002_user_created_by'),
]
operations = [
migrations.AddField(
model_name='user',
name='modified_by',
field=models.ForeignKey(blank=True, help_text='Usuario última actualización', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user_modificado_por', to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
]
| |
e2fb6eeb73d6f91f850faec33aa1c97db748d904 | 32cf9c3099c36a46804e393dd1491a8954f50263 | /2019.01.28 - 문자열, 패턴매칭 알고리즘/GNS.py | 4d0b1294c67827e9b64dfe1799200bcc304b39c4 | []
| no_license | ash92kr/s_code | ce3bda6a403600892750e181dca5ed8c4caebcb1 | 92eace551d132b91ee91db6c0afd38b93f9b647b | refs/heads/master | 2020-04-12T00:27:07.043091 | 2019-05-21T08:17:39 | 2019-05-21T08:17:39 | 162,200,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | import sys
sys.stdin = open("GNS_test_input.txt")
T = int(input())
for testcase in range(T):
temp = input() # #1 7041을 dummy로 처리(len 함수 이용)
data = list(map(str, input().split()))
num = ['ZRO', 'ONE', 'TWO', 'THR', 'FOR', 'FIV', 'SIX', 'SVN', 'EGT', 'NIN']
repeat = []
for i in range(len(num)):
repeat.append(data.count(num[i])) # num 리스트에 있는 표현이 data에 몇 번 있는지 count 함수를 사용한 값을 repeat 리스트에 넣음
print(f'#{testcase + 1}') # #1 출력용 문구
for j in range(len(repeat)): # repeat 리스트에는 모두 10개의 원소가 존재
# if repeat[j] > 0:
for k in range(repeat[j]): # repeat의 해당 원소의 숫자만큼 ZRO 등을 반복해서 출력
print(num[j], end=" ")
print()
| [
"[email protected]"
]
| |
71edda26f88591b56d8e37183583de483d196be6 | 1e4c14ae893dc15e612224287f908396fca40cbc | /src/utils/img_utils.py | fe4a357382b0879be931797a117cbbfcc6efbc1f | []
| no_license | yezhengkai/ground-based-cloud | 2958bf5ed6adfa04fe903ffba8122e9f98ad19bb | 619730b668f695f1066253a4ff39be282484201a | refs/heads/master | 2023-02-26T23:52:06.988713 | 2021-01-30T13:39:52 | 2021-01-30T13:39:52 | 316,887,420 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
def get_img_shape(img_path, channel="last"):
"""
References
----------
https://stackoverflow.com/questions/52962969/number-of-channels-in-pil-pillow-image
"""
img = Image.open(img_path)
if channel == "last":
return (*img.size, len(img.getbands()))
elif channel == "first":
return (len(img.getbands()), *img.size)
| [
"[email protected]"
]
| |
45ef835da94d1a72a2d9226888229051b094d822 | 74fbdb84b7accafd534704faca4532651296a8ea | /shortlink/settings.py | 3a4321555c654ffeeb9873582f69e726c32d17a5 | []
| no_license | danielmichaels/shortlink | cffd81be68ea737561eb553f04c670e9021c3fe4 | 22ad8f5ea01923e4702e2230e5d167eca145da7a | refs/heads/master | 2022-11-30T15:37:21.423663 | 2020-08-17T02:12:40 | 2020-08-17T02:12:40 | 288,063,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,097 | py | """
Django settings for shortlink project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7qj6aku31)2ma!kc6vab21lqkytofvrnd_k*y99kw-*uf(*ba('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'shortlink.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'shortlink.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
]
| |
9ff4c53986ac39154a15d0186f7c061db359ce51 | 01b745e6f45960d0aca3e4612ce810a7f40227a8 | /pyslet/http/client.py | 9c5f7391391050fe7ca2cc63d4caf28d5c7706f9 | [
"BSD-3-Clause"
]
| permissive | stevehof/pyslet | fbad07998642e2297420e4ac211c5f1ca4d7307d | f27d55bc6ecd3b9ee0037f11aa1104fe0ad0dbfd | refs/heads/master | 2021-01-15T18:26:31.667495 | 2016-01-22T10:34:11 | 2016-01-22T10:56:59 | 50,014,377 | 0 | 0 | null | 2016-01-20T08:00:02 | 2016-01-20T08:00:00 | Python | UTF-8 | Python | false | false | 88,849 | py | #! /usr/bin/env python
import logging
import string
import time
import socket
import ssl
import select
import types
import threading
import io
import errno
import os
import random
try:
import OpenSSL
from OpenSSL import SSL, crypto
except ImportError:
OpenSSL = None
import pyslet.info as info
import pyslet.rfc2396 as uri
from pyslet.pep8 import PEP8Compatibility
import auth
import cookie
import grammar
import messages
import params
import server as pipe
class RequestManagerBusy(messages.HTTPException):
"""The HTTP client is busy
Raised when attempting to queue a request and no connections become
available within the specified timeout."""
pass
class ConnectionClosed(messages.HTTPException):
"""The HTTP client has been closed
Raised when attempting to queue a request for a :py:class:`Client`
object that is in the process of closing."""
pass
USER_AGENT = params.ProductToken('pyslet', info.version)
"""A :py:class:`ProductToken` instance
This value is used as the default UserAgent string and is based on the
current version of the Pyslet library. E.g.::
pyslet-0.5.20120801"""
class Connection(object):
"""Represents an HTTP connection.
manager
The :py:class:`Client` instance that owns us
scheme
The scheme we are being opened for ('http' or 'https')
hostname
The host name we should connect to
port
The port we should connect to
timeout
The maximum time to wait for a connection. If the connection
has not been able to read or write data for more than this
timeout value it will assume the server has hung up. Defaults
to None, no timeout.
Used internally by the :py:class:`Client` to manage connections to
HTTP servers. Each connection is assigned a unique :py:attr:`id` on
construction. In normal use you won't need to call any of these
methods yourself but the interfaces are documented to make it easier
to override the behaviour of the
:py:class:`messages.Message` object that *may* call some of these
connection methods to indicate protocol exceptions.
Connections define comparison methods, if c1 and c2 are both
instances then::
c1 < c2 == True
...if c1 was last active before c2. The connection's active time is
updated each time :py:meth:`connection_task` is called.
Connections are shared across threads but are never in use by more
than one thread at a time. The thread currently bound to a
connection is indicated by :py:attr:`thread_id`. The value of this
attribute is managed by the associated
:py:class:`Client`. Methods *must only* be called
from this thread unless otherwise stated.
The scheme, hostname and port are defined on construction and do not
change."""
# mode constant: ready to start a request
REQ_READY = 0
# mode constant: waiting to send the request body
REQ_BODY_WAITING = 1
# mode constant: sending the request body
REQ_BODY_SENDING = 2
# mode constant: waiting to disconnect
CLOSE_WAIT = 3
# mode constant: waiting to upgrade the connection
REQ_UPGRADE_WAITING = 4
# mode constant: tunnelling this connection
TUNNELLING = 5
# a mapping to make debugging messages easier to read
MODE_STRINGS = {0: "Ready", 1: "Waiting", 2: "Sending", 3: "Closing",
4: "Upgrading"}
def __init__(self, manager, scheme, hostname, port, timeout=None):
#: the RequestManager that owns this connection
self.manager = manager
#: the id of this connection object
self.id = self.manager._nextid()
#: the http scheme in use, 'http' or 'https'
self.scheme = scheme
#: the target host of this connection
self.host = hostname
#: the target port of this connection
self.port = port
#: the protocol version of the last response from the server
self.protocol = None
#: the thread we're currently bound to
self.thread_id = None
#: time at which this connection was last active
self.last_active = 0
#: timeout (seconds) for our connection
self.timeout = timeout
#: time of the last successful read or write operation
self.last_rw = None
#: the queue of requests we are waiting to process
self.request_queue = []
#: the current request we are processing
self.request = None
#: the queue of responses we are waiting to process
self.response_queue = []
#: the current response we are processing
self.response = None
self.request_mode = self.REQ_READY
# If we don't get a continue in 1 minute, send the data anyway
if timeout is None:
self.continue_waitmax = 60.0
else:
# this rather odd simplification is based on a typical
# request timeout of 90s on a server corresponding to a wait
# of 15s for the 100 Continue response.
self.continue_waitmax = timeout / 6
self.continue_waitstart = 0
# a lock for our structures to help us go multi-threaded
self.lock = threading.RLock()
# True if we are closed or closing
self.closed = False
# Low-level socket members
self.socket = None
self.socket_file = None
self.send_buffer = []
# the number of bytes buffered for sending
self.buffered_bytes = 0
#: The number of bytes sent to the server since the connection
#: was last established
self.sent_bytes = 0
self.recv_buffer = []
self.recv_buffer_size = 0
def thread_target_key(self):
return (self.thread_id, self.scheme, self.host, self.port)
def target_key(self):
return (self.scheme, self.host, self.port)
def __cmp__(self, other):
if not isinstance(other, Connection):
raise TypeError
return cmp(self.last_active, other.last_active)
def __repr__(self):
return "Connection(%s,%i)" % (self.host, self.port)
def queue_request(self, request):
self.request_queue.append(request)
def connection_task(self):
"""Processes the requests and responses for this connection.
This method is mostly non-blocking. It returns a (r,w,wait)
triple consisting of two sockets or file numbers and a wait time
(in seconds). The first two values are suitable for passing
to select and indicate whether the connection is waiting to read
and/or write data. Either or both may be None. The third value
indicates the desired maximum amount of time to wait before the next
call and is usually set to the connection's timeout.
The connection object acts as a small buffer between the HTTP
message itself and the server. The implementation breaks down
in to a number of phases:
1. Start processing a request if one is queued and we're ready
for it. For idempotent requests (in practice, everything
except POST) we take advantage of HTTP pipelining to send
the request without waiting for the previous response(s).
The only exception is when the request has an Expect:
100-continue header. In this case the pipeline stalls until
the server has caught up with us and sent the 100 response
code.
2. Send as much data to the server as we can without blocking.
3. Read and process as much data from the server as we can
without blocking.
The above steps are repeated until we are blocked at which point
we return.
Although data is streamed in a non-blocking manner there are
situations in which the method will block. DNS name resolution
and creation/closure of sockets may block."""
while True:
rbusy = False
wbusy = False
tbusy = None
self.last_active = time.time()
if self.request_queue and self.request_mode == self.REQ_READY:
request = self.request_queue[0]
if (request.is_idempotent() or
(self.response is None and
not self.send_buffer)):
# only pipeline idempotent methods, our pipelining
# is strict for POST requests, wait for the
# response, request and buffer to be finished.
wait_time = request.retry_time - time.time()
if wait_time <= 0:
self.request_queue = self.request_queue[1:]
self._start_request(request)
elif tbusy is None or wait_time < tbusy:
tbusy = wait_time
if self.request or self.response:
if self.socket is None:
self.new_socket()
rbusy = False
wbusy = False
# The first section deals with the sending cycle, we
# pass on to the response section only if we are in a
# waiting mode or we are waiting for the socket to be
# ready before we can write data
if self.send_buffer:
send_rbusy, send_wbusy = self._send_request_data()
rbusy = rbusy or send_rbusy
wbusy = wbusy or send_wbusy
if rbusy or wbusy:
if (self.last_rw is not None and
self.timeout is not None and
self.last_rw + self.timeout < time.time()):
# assume we're dead in the water
raise IOError(
errno.ETIMEDOUT,
os.strerror(errno.ETIMEDOUT),
"pyslet.http.client.Connection")
else:
continue
elif self.request_mode == self.REQ_BODY_WAITING:
# empty buffer and we're waiting for a 100-continue
# (that may never come)
if self.continue_waitstart:
logging.info("%s waiting for 100-Continue...",
self.host)
wait_time = (self.continue_waitmax -
(time.time() - self.continue_waitstart))
if (wait_time < 0):
logging.warn("%s timeout while waiting for "
"100-Continue response",
self.host)
self.request_mode = self.REQ_BODY_SENDING
# change of mode, restart the loop
continue
else:
# we need to be called again in at most
# wait_time seconds so we can give up
# waiting for the 100 response
if tbusy is None or tbusy > wait_time:
tbusy = wait_time
else:
self.continue_waitstart = time.time()
wait_time = self.continue_waitmax
if tbusy is None or tbusy > wait_time:
tbusy = wait_time
elif self.request_mode == self.REQ_BODY_SENDING:
# Buffer is empty, refill it from the request
data = self.request.send_body()
if data:
logging.debug("Sending to %s: \n%s", self.host, data)
self.send_buffer.append(data)
self.buffered_bytes += len(data)
# Go around again to send the buffer
continue
elif data is None:
logging.debug("send_body blocked "
"waiting for message body")
# continue on to the response section
elif "upgrade" in self.request.get_connection():
self.request_mode = self.REQ_UPGRADE_WAITING
else:
# Buffer is empty, request is exhausted, we're
# done with it! we might want to tell the
# associated respone that it is now waiting, but
# matching is hard when pipelining!
# self.response.StartWaiting()
self.request.disconnect(self.sent_bytes)
self.request = None
self.request_mode = self.REQ_READY
elif self.request_mode == self.REQ_UPGRADE_WAITING:
# empty buffer and we're waiting for a 101 response
# to switch protocols
if self.request.status:
# a failed upgrade - we're done
self.request.disconnect(self.sent_bytes)
self.request = None
self.request_mode = self.REQ_READY
logging.debug("waiting for connection upgrade")
# This section deals with the response cycle, we only
# get here once the buffer is empty or we're blocked on
# sending.
if self.response:
recv_done, recv_rbusy, recv_wbusy = self._recv_task()
rbusy = rbusy or recv_rbusy
wbusy = wbusy or recv_wbusy
if rbusy or wbusy:
if (self.last_rw is not None and
self.timeout is not None and
self.last_rw + self.timeout < time.time()):
# assume we're dead in the water
raise IOError(
errno.ETIMEDOUT,
os.strerror(errno.ETIMEDOUT),
"pyslet.http.client.Connection")
else:
if recv_done:
# The response is done
close_connection = False
if self.response:
self.protocol = self.response.protocol
close_connection = not self.response.keep_alive
if self.response_queue:
self.response = self.response_queue[0]
self.response_queue = self.response_queue[1:]
self.response.start_receiving()
elif self.response:
self.response = None
if self.request_mode == self.CLOSE_WAIT:
# no response and waiting to close the
# connection
close_connection = True
if close_connection:
self.close()
# Any data received on the connection could
# change the request state, so we loop round
# again
continue
break
elif self.request_queue:
# no request or response but we're waiting for a retry
rbusy = False
wbusy = False
break
else:
# no request or response, we're idle or tunnelling
rbusy = False
wbusy = False
if self.request_mode == self.CLOSE_WAIT:
# clean up if necessary
self.close()
elif self.request_mode == self.TUNNELLING:
# don't deactivate the connection
break
self.manager._deactivate_connection(self)
break
if (rbusy or wbusy) and (tbusy is None or tbusy > self.timeout):
# waiting for i/o, make sure the timeout is capped
tbusy = self.timeout
if rbusy:
rbusy = self.socket_file
if wbusy:
wbusy = self.socket_file
logging.debug("connection_task returning %s, %s, %s",
repr(rbusy), repr(wbusy), str(tbusy))
return rbusy, wbusy, tbusy
def request_disconnect(self):
"""Disconnects the connection, aborting the current request."""
self.request.disconnect(self.sent_bytes)
self.request = None
if self.response:
self.send_buffer = []
self.request_mode = self.CLOSE_WAIT
else:
self.close()
def continue_sending(self, request):
"""Instructs the connection to start sending any pending request body.
If a request had an "Expect: 100-continue" header then the
connection will not send the data until instructed to do so by a
call to this method, or
:py:attr:`continue_waitmax` seconds have elapsed."""
logging.debug("100 Continue received... ready to send request")
if (request is self.request and
self.request_mode == self.REQ_BODY_WAITING):
self.request_mode = self.REQ_BODY_SENDING
def switch_protocols(self, request):
"""Instructs the connection to switch protocols
If a request had a "Connection: upgrade" header then the
connection will wait after sending the request in
REQ_UPGRADE_WAITING mode until instructed to switch protocols
(or the request gets some other response)."""
logging.debug("101 Switching protocols...")
if (request is self.request and
self.request_mode == self.REQ_UPGRADE_WAITING):
# remove this connection from it's manager
self.manager._upgrade_connection(self)
# we will not send any future requests, requeue them
for r in self.request_queue:
self.manager.queue_request(r, self.timeout)
self.request_queue = []
# the response queue should be empty!
self.response_queue = []
# we're done with the manager
self.manager = None
# set up some pipes for the connection
request.send_pipe = pipe.Pipe(
10 * io.DEFAULT_BUFFER_SIZE, rblocking=False,
timeout=self.timeout, name="%s:Sending" % self.host)
request.recv_pipe = pipe.Pipe(
10 * io.DEFAULT_BUFFER_SIZE, wblocking=False,
timeout=self.timeout, name="%s:Receiving" % self.host)
# spawn the threads that will drive the data flow
send_t = threading.Thread(target=self._send_tunnel,
args=(request, ))
recv_t = threading.Thread(target=self._recv_tunnel,
args=(request, ))
send_t.start()
recv_t.start()
# we've finished with the HTTP part of this connection
self.request.disconnect(self.sent_bytes)
# but reconnect for the tunnel phase to allow closing
self.request.connection = self
self.request = None
self.request_mode = self.TUNNELLING
def _send_tunnel(self, request):
self.thread_id = threading.current_thread().ident
while True:
if self.send_buffer:
rbusy, wbusy = self._send_request_data()
if rbusy or wbusy:
if (self.last_rw is not None and
self.timeout is not None and
self.last_rw + self.timeout < time.time()):
# assume we're dead in the water
raise IOError(
errno.ETIMEDOUT,
os.strerror(errno.ETIMEDOUT),
"pyslet.http.client.Connection")
# wait on the socket
try:
readers = []
if rbusy:
readers.append(self.socket_file)
writers = []
if wbusy:
writers.append(self.socket_file)
logging.debug("_send_tunnel waiting for select: "
"readers=%s, writers=%s, timeout=%f",
repr(readers), repr(writers),
self.timeout)
r, w, e = self.socketSelect(readers, writers, [],
self.timeout)
except select.error, err:
logging.error("Socket error from select: %s", str(err))
else:
# not waiting for i/o, re-fill the send buffer
data = request.send_pipe.read(io.DEFAULT_BUFFER_SIZE)
if data:
logging.debug("Sending to %s: \n%s", self.host, data)
self.send_buffer.append(data)
self.buffered_bytes += len(data)
elif data is None:
# we're blocked, wait forever
try:
request.send_pipe.read_wait()
except IOError as err:
# closed pipe = forced EOF
break
else:
# EOF
break
request.send_pipe.close()
request.send_pipe = None
# at this point the recv_tunnel is probably stuck waiting
# forever for data to arrive from the server (or for the server
# to hang up). The correct behaviour is that the client should
# read what it wants from request.recv_pipe and then close the
# pipe to indicate that it is finished with it. As a result
# we need to wait for the pipe to close.
rflag = threading.Event()
while not request.recv_pipe.closed:
# we'll go around this loop once per read until the
# pipe is closed
rflag.clear()
request.recv_pipe.set_rflag(rflag)
logging.debug("_send_tunnel waiting for recv_pipe close")
rflag.wait()
# closing the connection will close the pipes and interrupt
# any hanging recv or select call.
self.close()
def _recv_tunnel(self, request):
# start monitoring both read & write, in case we're SSL
readers = [self.socket_file]
writers = [self.socket_file]
while True:
# wait until we can write to the pipe
try:
request.recv_pipe.write_wait()
except IOError as err:
# closed pipe or we already wrote the EOF
break
if self.recv_buffer:
data = self.recv_buffer[0]
nbytes = request.recv_pipe.write(data)
if nbytes is None:
# shouldn't happen, as we waited until writable
pass
elif nbytes < len(data):
# couldn't write all the data, trim the recv_buffer
self.recv_buffer[0] = self.recv_buffer[0][nbytes:]
else:
self.recv_buffer = self.recv_buffer[1:]
continue
# empty recv_buffer, wait for the socket to be ready
try:
logging.debug("_recv_tunnel waiting for select: "
"readers=%s, writers=%s, timeout=None",
repr(readers), repr(writers))
r, w, e = self.socketSelect(readers, writers, [], None)
except select.error, err:
logging.error("Socket error from select: %s", str(err))
try:
data = None
data = self.socket.recv(io.DEFAULT_BUFFER_SIZE)
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
# we're blocked on recv (only)
readers = [self.socket_file]
writers = []
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
# we're blocked on send, really this can happen!
readers = []
writers = [self.socket_file]
else:
# we're going to swallow this error, log it
logging.error("socket.recv raised %s", str(err))
data = None
except IOError as err:
if not r and err.errno == errno.EAGAIN:
# we're blocked on recv, select did not return a reader
readers = [self.socket_file]
writers = []
continue
# We can't truly tell if the server hung-up except by
# getting an error here so this error could be fairly benign.
logging.warn("socket.recv raised %s", str(err))
data = None
logging.debug("Reading from %s: \n%s", self.host, repr(data))
if data:
self.last_rw = time.time()
nbytes = len(data)
self.recv_buffer.append(data)
self.recv_buffer_size += nbytes
logging.debug("Read buffer size: %i" % self.recv_buffer_size)
elif r:
logging.debug("%s: closing connection after recv returned no "
"data on ready to read socket", self.host)
request.recv_pipe.write_eof()
break
else:
readers = [self.socket_file]
writers = []
def close(self, err=None):
"""Closes this connection nicelly, optionally logging the
exception *err*
The connection disconnects from the current request and
terminates any responses we are waiting for by calling their
:py:meth:`ClientResponse.handle_disconnect` methods.
Finally, the socket is closed and all internal structures are
reset ready to reconnect when the next request is queued."""
if (self.thread_id and
self.thread_id != threading.current_thread().ident):
# closing from a thread other than the one we expect: kill
self.kill()
return
if err:
logging.error(
"%s: closing connection after error %s", self.host, str(err))
else:
logging.debug("%s: closing connection", self.host)
if self.request:
self.request.disconnect(self.sent_bytes)
self.request = None
self.request_mode = self.CLOSE_WAIT
resend = True
while self.response:
response = self.response
# remove it from the queue
if self.response_queue:
self.response = self.response_queue[0]
self.response_queue = self.response_queue[1:]
else:
self.response = None
# check for resends
if err or response.status is None and resend:
# terminated by an error or before we read the response
if response.request.can_retry():
# resend this request
logging.warn("retrying %s", response.request.get_start())
self.queue_request(response.request)
continue
else:
resend = False
response.handle_disconnect(err)
with self.lock:
if self.socket:
olds = self.socket
self.socket = None
if olds is not None:
self._close_socket(olds)
self.send_buffer = []
self.buffered_bytes = 0
self.sent_bytes = 0
self.recv_buffer = []
self.recv_buffer_size = 0
self.request_mode = self.REQ_READY
def kill(self):
"""Kills the connection, typically called from a different
thread than the one currently bound (if any).
No request methods are invoked, it is assumed that after this
method the manager will relinquish control of the connection
object creating space in the pool for other connections. Once
killed, a connection is never reconnected.
If the owning thread calls connection_task after kill completes
it will get a socket error or unexpectedly get zero-bytes on
recv indicating the connection is broken. We don't close the
socket here, just shut it down to be nice to the server.
If the owning thread really died, Python's garbage collection
will take care of actually closing the socket and freeing up the
file descriptor."""
with self.lock:
logging.debug("Killing connection to %s", self.host)
if not self.closed and self.socket:
try:
logging.warn(
"Connection.kill forcing socket shutdown for %s",
self.host)
self.socket.shutdown(socket.SHUT_RDWR)
except IOError:
# ignore errors, most likely the server has stopped
# listening
pass
self.closed = True
def _start_request(self, request):
# Starts processing the request. Returns True if the request
# has been accepted for processing, False otherwise.
self.request = request
self.request.connect(self, self.buffered_bytes)
self.request.start_sending(self.protocol)
headers = self.request.send_start() + self.request.send_header()
logging.debug("Sending to %s: \n%s", self.host, headers)
self.send_buffer.append(headers)
self.buffered_bytes += len(headers)
# Now check to see if we have an expect header set
if self.request.get_expect_continue():
self.request_mode = self.REQ_BODY_WAITING
self.continue_waitstart = 0
else:
self.request_mode = self.REQ_BODY_SENDING
logging.debug("%s: request mode=%s", self.host,
self.MODE_STRINGS[self.request_mode])
if self.response:
# Queue a response as we're still handling the last one!
self.response_queue.append(request.response)
else:
# if there is no response, we may have been idle for some
# time, check the connection...
if self.socket:
self._check_socket()
self.response = request.response
self.response.start_receiving()
return True
def _check_socket(self):
# Checks to see if the socket has been shutdown
logging.debug("_check_socket: attempting to read two bytes...")
data = None
try:
# we're not expecting any data, read two bytes just in case
# we get a stray CRLF that we're supposed to be ignoring.
data = self.socket.recv(2)
except ssl.SSLError as err:
if (err.args[0] == ssl.SSL_ERROR_WANT_READ or
err.args[0] == ssl.SSL_ERROR_WANT_WRITE):
# blocked on SSL read, when did we last read or write?
# we can only guess the server's keep alive timeout,
# Apache defaults to 5s, anything more than 2s and we'll
# recycle the socket.
if self.last_rw + 2 > time.time():
return
else:
logging.warn("socket.recv raised %s", str(err))
except IOError as err:
if err.errno == errno.EAGAIN:
# we're blocked on recv
return
else:
logging.warn("socket.recv raised %s", str(err))
if data:
logging.error("Unexpected data in _check_socket: %s: \n%s",
self.host, repr(data))
logging.debug("%s: _check_socket detected closed socket", self.host)
with self.lock:
olds = self.socket
self.socket = None
if olds is not None:
self._close_socket(olds)
def _send_request_data(self):
# Sends the next chunk of data in the buffer
if not self.send_buffer:
return
data = self.send_buffer[0]
if data:
try:
nbytes = self.socket.send(data)
self.sent_bytes += nbytes
self.last_rw = time.time()
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
# we're blocked on recv, really this can happen!
return (True, False)
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
# we're blocked on send, really this can happen!
return (False, True)
else:
# we're going to swallow this error, log it
logging.error("socket.recv raised %s", str(err))
data = None
except IOError as err:
if err.errno == errno.EAGAIN:
# we're blocked on send
return (False, True)
# stop everything
self.close(err)
return (False, False)
if nbytes == 0:
# We can't send any more data to the socket
# The other side has closed the connection
# Strangely, there is nothing much to do here,
# if the server fails to send a response that
# will be handled more seriously. However,
# we do change to a mode that prevents future
# requests!
self.request.disconnect(self.sent_bytes)
self.request = None
self.request_mode == self.CLOSE_WAIT
self.send_buffer = []
elif nbytes < len(data):
# Some of the data went:
self.send_buffer[0] = data[nbytes:]
else:
del self.send_buffer[0]
else:
# shouldn't get empty strings in the buffer but if we do, delete
# them, no change to the buffer size!
del self.send_buffer[0]
return (False, False)
def _recv_task(self):
# We ask the response what it is expecting and try and
# satisfy that, we return True when the response has been
# received completely, False otherwise"""
err = None
try:
data = self.socket.recv(io.DEFAULT_BUFFER_SIZE)
self.last_rw = time.time()
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
# we're blocked on recv
return (False, True, False)
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
# we're blocked on send, really this can happen!
return (False, False, True)
else:
# we're going to swallow this error, log it
logging.error("socket.recv raised %s", str(err))
data = None
except IOError as err:
if err.errno == errno.EAGAIN:
# we're blocked on recv
return (False, True, False)
# We can't truly tell if the server hung-up except by
# getting an error here so this error could be fairly benign.
logging.warn("socket.recv raised %s", str(err))
data = None
logging.debug("Reading from %s: \n%s", self.host, repr(data))
if data:
nbytes = len(data)
self.recv_buffer.append(data)
self.recv_buffer_size += nbytes
logging.debug("Read buffer size: %i" % self.recv_buffer_size)
else:
logging.debug("%s: closing connection after recv returned no "
"data on ready to read socket", self.host)
self.close()
return (True, False, False)
# Now loop until we can't satisfy the response anymore (or the response
# is done)
while self.response is not None:
recv_needs = self.response.recv_mode()
if recv_needs is None:
# We don't need any bytes at all, the response is done
return (True, False, False)
elif recv_needs == messages.Message.RECV_HEADERS:
# scan for CRLF, consolidate first
data = string.join(self.recv_buffer, '')
pos = data.find(grammar.CRLF)
if pos == 0:
# just a blank line, no headers
lines = [grammar.CRLF]
data = data[2:]
elif pos > 0:
# we need CRLFCRLF actually
pos = data.find(grammar.CRLF + grammar.CRLF)
# pos can't be 0 now...
if pos > 0:
# split the data into lines
lines = map(
lambda x: x + grammar.CRLF,
data[0:pos + 2].split(grammar.CRLF))
data = data[pos + 4:]
elif err:
self.close(err)
return (True, False, False)
elif pos < 0:
# We didn't find the data we wanted this time
break
if data:
self.recv_buffer = [data]
self.recv_buffer_size = len(data)
else:
self.recv_buffer = []
self.recv_buffer_size = 0
if lines:
# logging.debug("Response Headers: %s", repr(lines))
self.response.recv(lines)
elif recv_needs == messages.Message.RECV_LINE:
# scan for CRLF, consolidate first
data = string.join(self.recv_buffer, '')
pos = data.find(grammar.CRLF)
if pos >= 0:
line = data[0:pos + 2]
data = data[pos + 2:]
elif err:
self.close(err)
return (True, False, False)
else:
# We didn't find the data we wanted this time
break
if data:
self.recv_buffer = [data]
self.recv_buffer_size = len(data)
else:
self.recv_buffer = []
self.recv_buffer_size = 0
if line:
# logging.debug("Response Header: %s", repr(line))
self.response.recv(line)
elif recv_needs == messages.Message.RECV_ALL:
# As many as possible please
logging.debug("Response reading until connection closes")
if self.recv_buffer_size > 0:
data = self.recv_buffer[0]
self.recv_buffer = self.recv_buffer[1:]
self.recv_buffer_size -= len(data)
# logging.debug("Response Data: %s", repr(data))
self.response.recv(data)
else:
# recv_buffer is empty but we still want more
break
elif recv_needs == 0:
# we're blocked
logging.debug("Response blocked on write")
self.response.recv(None)
elif recv_needs > 0:
if self.recv_buffer_size:
logging.debug("Response waiting for %s bytes",
str(recv_needs - self.recv_buffer_size))
data = self.recv_buffer[0]
if len(data) <= recv_needs:
self.recv_buffer = self.recv_buffer[1:]
self.recv_buffer_size -= len(data)
else:
# we only want part of the data
self.recv_buffer[0] = data[recv_needs:]
self.recv_buffer_size -= recv_needs
data = data[:recv_needs]
# logging.debug("Response Data: %s", repr(data))
self.response.recv(data)
else:
# We can't satisfy the response
break
else:
raise RuntimeError("Unexpected recv mode: %s" %
repr(recv_needs))
return (False, False, False)
def new_socket(self):
with self.lock:
if self.closed:
logging.error(
"new_socket called on dead connection to %s", self.host)
raise messages.HTTPException("Connection closed")
self.socket = None
self.socket_file = None
self.socketSelect = select.select
try:
for target in self.manager.dnslookup(self.host, self.port):
family, socktype, protocol, canonname, address = target
try:
snew = socket.socket(family, socktype, protocol)
snew.connect(address)
except IOError:
if snew:
snew.close()
snew = None
continue
break
except socket.gaierror, e:
snew = None
raise messages.HTTPException(
"failed to connect to %s (%s)" % (self.host, e[1]))
if not snew:
raise messages.HTTPException("failed to connect to %s" % self.host)
else:
with self.lock:
if self.closed:
# This connection has been killed
self._close_socket(snew)
logging.error(
"Connection killed while connecting to %s", self.host)
raise messages.HTTPException("Connection closed")
else:
self.socket = snew
self.socket_file = self.socket.fileno()
self.socket.setblocking(False)
self.socketSelect = select.select
def _close_socket(self, s):
try:
s.shutdown(socket.SHUT_RDWR)
except IOError:
# ignore errors, most likely the server has stopped listening
pass
try:
s.close()
except IOError:
pass
class SecureConnection(Connection):
def __init__(self, manager, scheme, hostname, port, ca_certs=None):
super(SecureConnection, self).__init__(manager, scheme, hostname, port)
self.ca_certs = ca_certs
def new_socket(self):
super(SecureConnection, self).new_socket()
try:
with self.lock:
if self.socket is not None:
self.socket.setblocking(True)
socket_ssl = ssl.wrap_socket(
self.socket, ca_certs=self.ca_certs,
cert_reqs=ssl.CERT_REQUIRED if
self.ca_certs is not None else ssl.CERT_NONE)
self.socketTransport = self.socket
self.socket.setblocking(False)
self.socket = socket_ssl
logging.info(
"Connected to %s with %s, %s, key length %i",
self.host, *self.socket.cipher())
except IOError as e:
logging.warn(str(e))
raise messages.HTTPException(
"failed to build secure connection to %s" % self.host)
class Client(PEP8Compatibility, object):
"""An HTTP client
.. note::
In Pyslet 0.4 and earlier the name HTTPRequestManager was used,
this name is still available as an alias for Client.
The object manages the sending and receiving of HTTP/1.1 requests
and responses respectively. There are a number of keyword arguments
that can be used to set operational parameters:
max_connections
The maximum number of HTTP connections that may be open at any
one time. The method :py:meth:`queue_request` will block (or
raise :py:class:`RequestManagerBusy`) if an attempt to queue a
request would cause this limit to be exceeded.
timeout
The maximum wait time on the connection. This is not the same
as a limit on the total time to receive a request but a limit on
the time the client will wait with no activity on the connection
before assuming that the server is no longer responding.
Defaults to None, no timeout.
max_inactive (None)
The maximum time to keep a connection inactive before
terminating it. By default, HTTP connections are kept open when
the protocol allows. These idle connections are kept in a pool
and can be reused by any thread. This is useful for web-service
type use cases (for which Pyslet has been optimised) but it is
poor practice to keep these connections open indefinitely and
anyway, most servers will hang up after a fairly short period of
time anyway.
If not None, this setting causes a cleanup thread to be created
that calls the :meth:`idle_cleanup` method periodically passing
this setting value as its argument.
ca_certs
The file name of a certificate file to use when checking SSL
connections. For more information see
http://docs.python.org/2.7/library/ssl.html
In practice, there seem to be serious limitations on SSL
connections and certificate validation in Python distributions
linked to earlier versions of the OpenSSL library (e.g., Python
2.6 installed by default on OS X and Windows).
.. warning::
By default, ca_certs is optional and can be passed as None. In
this mode certificates will not be checked and your connections
are not secure from man in the middle attacks. In production
use you should always specify a certificate file if you expect
to use the object to make calls to https URLs.
Although max_connections allows you to make multiple connections to
the same host+port the request manager imposes an additional
restriction. Each thread can make at most 1 connection to each
host+port. If multiple requests are made to the same host+port from
the same thread then they are queued and will be sent to the server
over the same connection using HTTP/1.1 pipelining. The manager
(mostly) takes care of the following restriction imposed by RFC2616:
Clients SHOULD NOT pipeline requests using non-idempotent
methods or non-idempotent sequences of methods
In other words, a POST (or CONNECT) request will cause the
pipeline to stall until all the responses have been received. Users
should beware of non-idempotent sequences as these are not
automatically detected by the manager. For example, a GET,PUT
sequence on the same resource is not idempotent. Users should wait
for the GET request to finish fetching the resource before queuing a
PUT request that overwrites it.
In summary, to take advantage of multiple simultaneous connections
to the same host+port you must use multiple threads."""
ConnectionClass = Connection
SecureConnectionClass = SecureConnection
def __init__(self, max_connections=100, ca_certs=None, timeout=None,
max_inactive=None):
PEP8Compatibility.__init__(self)
self.managerLock = threading.Condition()
# the id of the next connection object we'll create
self.nextId = 1
self.cActiveThreadTargets = {}
# A dict of active connections keyed on thread and target (always
# unique)
self.cActiveThreads = {}
# A dict of dicts of active connections keyed on thread id then
# connection id
self.cIdleTargets = {}
# A dict of dicts of idle connections keyed on target and then
# connection id
self.cIdleList = {}
# A dict of idle connections keyed on connection id (for keeping count)
self.closing = threading.Event() # set if we are closing
# maximum number of connections to manage (set only on construction)
self.max_connections = max_connections
# maximum wait time on connections
self.timeout = timeout
# cached results from socket.getaddrinfo keyed on (hostname,port)
self.dnsCache = {}
self.ca_certs = ca_certs
self.credentials = []
self.cookie_store = None
self.socketSelect = select.select
self.httpUserAgent = "%s (http.client.Client)" % str(USER_AGENT)
"""The default User-Agent string to use, defaults to a string
derived from the installed version of Pyslet, e.g.::
pyslet 0.5.20140727 (http.client.Client)"""
# start the connection cleaner thread if required
if max_inactive is not None:
t = threading.Thread(
target=self._run_cleanup,
kwargs={'max_inactive': max_inactive})
t.setDaemon(True)
t.start()
logging.info("Starting cleaner thread with max_inactive=%f" %
float(max_inactive))
def set_cookie_store(self, cookie_store):
self.cookie_store = cookie_store
@classmethod
def get_server_certificate_chain(cls, url, method=None, options=None):
"""Returns the certificate chain for an https URL
url
A :class:`~pyslet.rfc2396.URI` instance. This must use the
https scheme or ValueError will be raised.
method (SSL.TLSv1_METHOD)
The SSL method to use, one of the constants from the
pyOpenSSL module.
options (None)
The SSL options to use, as defined by the pyOpenSSL module.
For example, SSL.OP_NO_SSLv2.
This method requires pyOpenSSL to be installed, if it isn't then
a RuntimeError is raised.
The address and port is extracted from the URL and interrogated
for its certificate chain. No validation is performed. The
result is a string containing the concatenated PEM format
certificate files. This string is equivalent to the output of
the following UNIX command::
echo | openssl s_client -showcerts -connect host:port 2>&1 |
sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p'
The purpose of this method is to provide something like the
ssh-style trust whereby you can download the chain the first
time you connect, store it to a file and then use that file for
the ca_certs argument for SSL validation in future.
If the site certificate changes to one that doesn't validate to
a certificate in the same chain then the SSL connection will
fail.
As this method does no validation there is no protection against
a man-in-the-middle attack when you use this method. You should
only use this method when you trust the machine and connection
you are using or when you have some other way to independently
verify that the certificate chain is good."""
if OpenSSL is None:
raise RuntimeError(
"get_server_certificate_chain requires pyOpenSSL")
if method is None:
method = SSL.TLSv1_METHOD
if not isinstance(url, uri.URI) or not url.scheme.lower() == 'https':
raise ValueError(str(url))
addr = url.get_addr()
context = SSL.Context(method)
if options is not None:
context.set_options(options)
sock = socket.socket()
connection = SSL.Connection(context, sock)
connection.connect(addr)
connection.do_handshake()
chain = connection.get_peer_cert_chain()
output = []
for cert in chain:
if not output:
if cert.get_subject().commonName != addr[0].lower():
logging.warning("Certificate common name: %s",
cert.get_subject().commonName)
output.append(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
connection.shutdown()
connection.close()
return string.join(output, '')
def queue_request(self, request, timeout=None):
"""Starts processing an HTTP *request*
request
A :py:class:`messages.Request` object.
timeout
Number of seconds to wait for a free connection before
timing out. A timeout raises :py:class:`RequestManagerBusy`
None means wait forever, 0 means don't block.
The default implementation adds a User-Agent header from
:py:attr:`httpUserAgent` if none has been specified already.
You can override this method to add other headers appropriate
for a specific context but you must pass this call on to this
implementation for proper processing."""
if self.httpUserAgent and not request.has_header('User-Agent'):
request.set_header('User-Agent', self.httpUserAgent)
# assign this request to a connection straight away
start = time.time()
thread_id = threading.current_thread().ident
thread_target = (
thread_id, request.scheme, request.hostname, request.port)
target = (request.scheme, request.hostname, request.port)
with self.managerLock:
if self.closing.is_set():
raise ConnectionClosed
while True:
# Step 1: search for an active connection to the same
# target already bound to our thread
if thread_target in self.cActiveThreadTargets:
connection = self.cActiveThreadTargets[thread_target]
break
# Step 2: search for an idle connection to the same
# target and bind it to our thread
elif target in self.cIdleTargets:
cidle = self.cIdleTargets[target].values()
cidle.sort()
# take the youngest connection
connection = cidle[-1]
self._activate_connection(connection, thread_id)
break
# Step 3: create a new connection
elif (len(self.cActiveThreadTargets) + len(self.cIdleList) <
self.max_connections):
connection = self._new_connection(target)
self._activate_connection(connection, thread_id)
break
# Step 4: delete the oldest idle connection and go round again
elif len(self.cIdleList):
cidle = self.cIdleList.values()
cidle.sort()
connection = cidle[0]
self._delete_idle_connection(connection)
# Step 5: wait for something to change
else:
now = time.time()
if timeout == 0:
logging.warn(
"non-blocking call to queue_request failed to "
"obtain an HTTP connection")
raise RequestManagerBusy
elif timeout is not None and now > start + timeout:
logging.warn(
"queue_request timed out while waiting for "
"an HTTP connection")
raise RequestManagerBusy
logging.debug(
"queue_request forced to wait for an HTTP connection")
self.managerLock.wait(timeout)
logging.debug(
"queue_request resuming search for an HTTP connection")
# add this request to the queue on the connection
connection.queue_request(request)
request.set_client(self)
def active_count(self):
"""Returns the total number of active connections."""
with self.managerLock:
return len(self.cActiveThreadTargets)
def thread_active_count(self):
"""Returns the total number of active connections associated
with the current thread."""
thread_id = threading.current_thread().ident
with self.managerLock:
return len(self.cActiveThreads.get(thread_id, {}))
def _activate_connection(self, connection, thread_id):
# safe if connection is new and not in the idle list
connection.thread_id = thread_id
target = connection.target_key()
thread_target = connection.thread_target_key()
with self.managerLock:
self.cActiveThreadTargets[thread_target] = connection
if thread_id in self.cActiveThreads:
self.cActiveThreads[thread_id][connection.id] = connection
else:
self.cActiveThreads[thread_id] = {connection.id: connection}
if connection.id in self.cIdleList:
del self.cIdleList[connection.id]
del self.cIdleTargets[target][connection.id]
if not self.cIdleTargets[target]:
del self.cIdleTargets[target]
def _deactivate_connection(self, connection):
# called when connection goes idle, it is possible that this
# connection has been killed and just doesn't know it (like
# Bruce Willis in Sixth Sense) so we take care to return it
# to the idle pool only if it was in the active pool
target = connection.target_key()
thread_target = connection.thread_target_key()
with self.managerLock:
if thread_target in self.cActiveThreadTargets:
del self.cActiveThreadTargets[thread_target]
self.cIdleList[connection.id] = connection
if target in self.cIdleTargets:
self.cIdleTargets[target][connection.id] = connection
else:
self.cIdleTargets[target] = {connection.id: connection}
# tell any threads waiting for a connection
self.managerLock.notify()
if connection.thread_id in self.cActiveThreads:
if connection.id in self.cActiveThreads[connection.thread_id]:
del self.cActiveThreads[
connection.thread_id][connection.id]
if not self.cActiveThreads[connection.thread_id]:
del self.cActiveThreads[connection.thread_id]
connection.thread_id = None
def _upgrade_connection(self, connection):
# Removes connection from the active pool
#
# Called following a negotiated connection upgrade. The
# connection is taken out of the pool system completely. The
# implementation is similar to deactivation.
thread_target = connection.thread_target_key()
with self.managerLock:
if thread_target in self.cActiveThreadTargets:
del self.cActiveThreadTargets[thread_target]
if connection.thread_id in self.cActiveThreads:
if connection.id in self.cActiveThreads[connection.thread_id]:
del self.cActiveThreads[
connection.thread_id][connection.id]
if not self.cActiveThreads[connection.thread_id]:
del self.cActiveThreads[connection.thread_id]
connection.thread_id = None
def _delete_idle_connection(self, connection):
if connection.id in self.cIdleList:
target = connection.target_key()
del self.cIdleList[connection.id]
del self.cIdleTargets[target][connection.id]
if not self.cIdleTargets[target]:
del self.cIdleTargets[target]
connection.close()
def _nextid(self):
# Used internally to manage auto-incrementing connection ids
with self.managerLock:
id = self.nextId
self.nextId += 1
return id
def _new_connection(self, target, timeout=None):
# Called by a connection pool when a new connection is required
scheme, host, port = target
if scheme == 'http':
connection = self.ConnectionClass(self, scheme, host, port,
timeout=self.timeout)
elif scheme == 'https':
connection = self.SecureConnectionClass(
self, scheme, host, port, self.ca_certs)
else:
raise NotImplementedError(
"Unsupported connection scheme: %s" % scheme)
return connection
def thread_task(self, timeout=None):
"""Processes all connections bound to the current thread then
blocks for at most timeout (0 means don't block) while waiting
to send/receive data from any active sockets.
Each active connection receives one call to
:py:meth:`Connection.connection_task` There are some situations
where this method may still block even with timeout=0. For
example, DNS name resolution and SSL handshaking. These may be
improved in future.
Returns True if at least one connection is active, otherwise
returns False."""
thread_id = threading.current_thread().ident
with self.managerLock:
connections = self.cActiveThreads.get(thread_id, {}).values()
if not connections:
return False
readers = []
writers = []
wait_time = None
for c in connections:
try:
r, w, tmax = c.connection_task()
if wait_time is None or (tmax is not None and
wait_time > tmax):
# shorten the timeout
wait_time = tmax
if r:
readers.append(r)
if w:
writers.append(w)
except Exception as err:
c.close(err)
pass
if readers or writers:
if timeout is not None:
if wait_time is not None:
if timeout < wait_time:
wait_time = timeout
try:
logging.debug("thread_task waiting for select: "
"readers=%s, writers=%s, timeout=%f",
repr(readers), repr(writers), timeout)
r, w, e = self.socketSelect(readers, writers, [], wait_time)
except select.error, err:
logging.error("Socket error from select: %s", str(err))
elif wait_time is not None:
# not waiting for i/o, let time pass
logging.debug("thread_task waiting to retry: %f", wait_time)
time.sleep(wait_time)
return True
def thread_loop(self, timeout=60):
"""Repeatedly calls :py:meth:`thread_task` until it returns False."""
while self.thread_task(timeout):
continue
# self.close()
def process_request(self, request, timeout=60):
"""Process an :py:class:`messages.Message` object.
The request is queued and then :py:meth:`thread_loop` is called
to exhaust all HTTP activity initiated by the current thread."""
self.queue_request(request, timeout)
self.thread_loop(timeout)
def _run_cleanup(self, max_inactive=15):
# run this thread at most once per second
if max_inactive < 1:
max_inactive = 1
while not self.closing.is_set():
self.closing.wait(max_inactive)
self.idle_cleanup(max_inactive)
def idle_cleanup(self, max_inactive=15):
"""Cleans up any idle connections that have been inactive for
more than *max_inactive* seconds."""
clist = []
now = time.time()
with self.managerLock:
for connection in self.cIdleList.values():
if connection.last_active < now - max_inactive:
clist.append(connection)
del self.cIdleList[connection.id]
target = connection.target_key()
if target in self.cIdleTargets:
del self.cIdleTargets[target][connection.id]
if not self.cIdleTargets[target]:
del self.cIdleTargets[target]
# now we can clean up these connections in a more leisurely fashion
if clist:
logging.debug("idle_cleanup closing connections...")
for connection in clist:
connection.close()
def active_cleanup(self, max_inactive=90):
"""Clean up active connections that have been inactive for
more than *max_inactive* seconds.
This method can be called from any thread and can be used to
remove connections that have been abandoned by their owning
thread. This can happen if the owning thread stops calling
:py:meth:`thread_task` leaving some connections active.
Inactive connections are killed using :py:meth:`Connection.kill`
and then removed from the active list. Should the owning thread
wake up and attempt to finish processing the requests a socket
error or :py:class:`messages.HTTPException` will be reported."""
clist = []
now = time.time()
with self.managerLock:
for thread_id in self.cActiveThreads:
for connection in self.cActiveThreads[thread_id].values():
if connection.last_active < now - max_inactive:
# remove this connection from the active lists
del self.cActiveThreads[thread_id][connection.id]
del self.cActiveThreadTargets[
connection.thread_target_key()]
clist.append(connection)
if clist:
# if stuck threads were blocked waiting for a connection
# then we can wake them up, one for each connection
# killed
self.managerLock.notify(len(clist))
if clist:
logging.debug("active_cleanup killing connections...")
for connection in clist:
connection.kill()
def close(self):
"""Closes all connections and sets the manager to a state where
new connections cannot not be created.
Active connections are killed, idle connections are closed."""
while True:
with self.managerLock:
self.closing.set()
if len(self.cActiveThreadTargets) + len(self.cIdleList) == 0:
break
self.active_cleanup(0)
self.idle_cleanup(0)
def add_credentials(self, credentials):
"""Adds a :py:class:`pyslet.http.auth.Credentials` instance to
this manager.
Credentials are used in response to challenges received in HTTP
401 responses."""
with self.managerLock:
self.credentials.append(credentials)
def remove_credentials(self, credentials):
"""Removes credentials from this manager.
credentials
A :py:class:`pyslet.http.auth.Credentials` instance
previously added with :py:meth:`add_credentials`.
If the credentials can't be found then they are silently ignored
as it is possible that two threads may independently call the
method with the same credentials."""
with self.managerLock:
for i in xrange(len(self.credentials)):
if self.credentials[i] is credentials:
del self.credentials[i]
def dnslookup(self, host, port):
"""Given a host name (string) and a port number performs a DNS lookup
using the native socket.getaddrinfo function. The resulting value is
added to an internal dns cache so that subsequent calls for the same
host name and port do not use the network unnecessarily.
If you want to flush the cache you must do so manually using
:py:meth:`flush_dns`."""
with self.managerLock:
result = self.dnsCache.get((host, port), None)
if result is None:
# do not hold the lock while we do the DNS lookup, this may
# result in multiple overlapping DNS requests but this is
# better than a complete block.
logging.debug("Looking up %s", host)
result = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
with self.managerLock:
# blindly populate the cache
self.dnsCache[(host, port)] = result
return result
def flush_dns(self):
"""Flushes the DNS cache."""
with self.managerLock:
self.dnsCache = {}
def find_credentials(self, challenge):
"""Searches for credentials that match *challenge*"""
logging.debug("Client searching for credentials in "
"%s with challenge %s",
challenge.protectionSpace, str(challenge))
with self.managerLock:
for c in self.credentials:
if c.match_challenge(challenge):
return c
def find_credentials_by_url(self, url):
"""Searches for credentials that match *url*"""
with self.managerLock:
for c in self.credentials:
if c.test_url(url):
return c
HTTPRequestManager = Client
class ClientRequest(messages.Request):
"""Represents an HTTP request.
To make an HTTP request, create an instance of this class and then
pass it to an :py:class:`Client` instance using either
:py:meth:`Client.queue_request` or
:py:meth:`Client.process_request`.
url
An absolute URI using either http or https schemes. A
:py:class:`pyslet.rfc2396.URI` instance or an object that can be
passed to its constructor.
And the following keyword arguments:
method
A string. The HTTP method to use, defaults to "GET"
entity_body
A string or stream-like object containing the request body.
Defaults to None meaning no message body. For stream-like
objects the tell and seek methods must be supported to enable
resending the request if required.
res_body
A stream-like object to write data to. Defaults to None, in
which case the response body is returned as a string in the
:py:attr:`res_body`.
protocol
An :py:class:`params.HTTPVersion` object, defaults to
HTTPVersion(1,1)
autoredirect
Whether or not the request will follow redirects, defaults to
True.
max_retries
The maximum number of times to attempt to resend the request
following an error on the connection or an unexpected hang-up.
Defaults to 3, you should not use a value lower than 1 because,
when pipelining, it is always possible that the server has
gracefully closed the socket and we won't notice until we've
sent the request and get 0 bytes back on recv. Although
'normal' this scenario counts as a retry."""
def __init__(self, url, method="GET", res_body=None,
protocol=params.HTTP_1p1, auto_redirect=True,
max_retries=3, min_retry_time=5, **kwargs):
super(ClientRequest, self).__init__(**kwargs)
#: the :py:class:`Client` object that is managing us
self.manager = None
#: the :py:class:`Connection` object that is currently sending us
self.connection = None
# private member used to determine if we've been sent
self._send_pos = 0
#: the status code received, 0 indicates a failed or unsent request
self.status = 0
#: If status == 0, the error raised during processing
self.error = None
#: the scheme of the request (http or https)
self.scheme = None
#: the hostname of the origin server
self.hostname = None
#: the port on the origin server
self.port = None
#: the full URL of the requested resource
self.url = None
self.set_url(url)
# copy over the keyword arguments
self.method = method
if type(protocol) in types.StringTypes:
self.protocol = params.HTTPVersion.from_str(protocol)
elif isinstance(protocol, params.HTTPVersion):
self.protocol = protocol
else:
raise TypeError("illegal value for protocol")
#: the response body received (only used if not streaming)
self.res_body = ''
if res_body is not None:
# assume that the res_body is a stream like object
self.res_bodystream = res_body
else:
self.res_bodystream = None
#: whether or not auto redirection is in force for 3xx responses
self.auto_redirect = auto_redirect
#: the maximum number of retries we'll attempt
self.max_retries = max_retries
#: the number of retries we've had
self.nretries = 0
self.retry_time = 0
self._rt1 = 0
self._rt2 = min_retry_time
#: the associated :py:class:`ClientResponse`
self.response = ClientResponse(request=self)
# the credentials we're using in this request, this attribute is
# used when we are responding to a 401 and the managing Client
# has credentials that meet the challenge received in the
# response. We keep track of them here to avoid constantly
# looping with the same broken credentials. to set the
# Authorization header and
self.tried_credentials = None
#: the send pipe to use on upgraded connections
self.send_pipe = None
#: the recv pipe to use on upgraded connections
self.recv_pipe = None
def set_url(self, url):
"""Sets the URL for this request
This method sets the Host header and the following local
attributes:
:py:attr:`scheme`, :py:attr:`hostname`, :py:attr:`port` and
:py:attr:`request_uri`."""
with self.lock:
if not isinstance(url, uri.URI):
url = uri.URI.from_octets(url)
self.url = url
if self.url.userinfo:
raise NotImplementedError(
"username(:password) in URL not yet supported")
if self.url.abs_path:
self.request_uri = self.url.abs_path
else:
self.request_uri = "/"
if self.url.query is not None:
self.request_uri = self.request_uri + '?' + self.url.query
if not isinstance(self.url, params.HTTPURL):
raise messages.HTTPException(
"Scheme not supported: %s" % self.url.scheme)
elif isinstance(self.url, params.HTTPSURL):
self.scheme = 'https'
else:
self.scheme = 'http'
self.hostname = self.url.host
custom_port = False
if self.url.port:
# custom port, perhaps
self.port = int(self.url.port)
if self.port != self.url.DEFAULT_PORT:
custom_port = True
else:
self.port = self.url.DEFAULT_PORT
# The Host request-header field (section 14.23) MUST
# accompany all HTTP/1.1 requests.
if self.hostname:
if not custom_port:
self.set_host(self.hostname)
else:
self.set_host("%s:%i" % (self.hostname, self.port))
else:
raise messages.HTTPException("No host in request URL")
def can_retry(self):
"""Returns True if we reconnect and retry this request"""
if self.nretries > self.max_retries:
logging.error("%s retry limit exceeded", self.get_start())
return False
else:
return True
def resend(self, url=None):
self.status = 0
self.error = None
if url is not None:
# if the host, port or scheme is different, strip any
# Authorization header
if self.url.get_canonical_root() != url.get_canonical_root():
self.set_authorization(None)
self.set_url(url)
# always strip cookies, as these are added at the last
# minute from the cookie store
self.set_cookie(None)
logging.info("Resending request to: %s", str(self.url))
self.manager.queue_request(self)
def set_client(self, client):
"""Called when we are queued for processing.
client
an :py:class:`Client` instance"""
self.manager = client
def connect(self, connection, send_pos):
"""Called when we are assigned to an HTTPConnection"
connection
A :py:class:`Connection` object
send_pos
The position of the sent bytes pointer after which this
request has been (or at least has started to be) sent."""
self.connection = connection
self._send_pos = send_pos
def disconnect(self, send_pos):
"""Called when the connection has finished sending us
This may be before or after the response is received and
handled!
send_pos
The number of bytes sent on this connection before the
disconnect. This value is compared with the value passed to
:py:meth:`connect` to determine if the request was actually
sent to the server or abandoned without a byte being sent.
For idempotent methods we lose a life every time. For
non-idempotent methods (e.g., POST) we do the same except
that if we been (at least partially) sent then we lose all
lives to prevent "indeterminate results"."""
self.nretries += 1
if self.is_idempotent() or send_pos <= self._send_pos:
self.retry_time = (time.time() +
self._rt1 * (5 - 2 * random.random()) / 4)
rtnext = self._rt1 + self._rt2
self._rt2 = self._rt1
self._rt1 = rtnext
else:
self.max_retries = 0
self.connection = None
if self.status > 0:
# The response has finished
self.finished()
def send_header(self):
# Check authorization and add credentials if the manager has them
if not self.has_header("Authorization"):
credentials = self.manager.find_credentials_by_url(self.url)
if credentials:
self.set_authorization(credentials)
if (self.manager.cookie_store is not None and
not self.has_header("Cookie")):
# add some cookies to this request
cookie_list = self.manager.cookie_store.search(self.url)
if cookie_list:
self.set_cookie(cookie_list)
return super(ClientRequest, self).send_header()
def response_finished(self, err=None):
# called when the response has been received
self.status = self.response.status
self.error = err
if self.status is None:
logging.error("Error receiving response, %s", str(self.error))
self.status = 0
self.finished()
else:
logging.info("Finished Response, status %i", self.status)
# we grab the cookies early in the flow, they may help
# improve the chances of a success follow-up call in the
# case of an error
if self.manager.cookie_store:
try:
cookie_list = self.response.get_set_cookie()
except ValueError as e:
# ignore these cookies
logging.warn("Ignoring cookies after %s", str(e))
cookie_list = None
if cookie_list:
for icookie in cookie_list:
try:
self.manager.cookie_store.set_cookie(
self.url, icookie)
logging.info("Stored cookie: %s", str(icookie))
except cookie.CookieError as e:
logging.warn("Error setting cookie %s: %s",
icookie.name, str(e))
if self.res_bodystream:
self.res_bodystream.flush()
else:
self.res_body = self.response.entity_body.getvalue()
if self.response.status >= 100 and self.response.status <= 199:
"""Received after a 100 continue or other 1xx status
response, we may be waiting for the connection to call
our send_body method. We need to tell it not to
wait any more!"""
if self.connection:
if (self.response.status == 101 and
"upgrade" in self.get_connection()):
self.connection.switch_protocols(self)
# We're not finished - the request will now be
# bound to this connection until it closes
self.response.keep_alive = True
else:
self.connection.continue_sending(self)
# We're not finished though, wait for the final
# response to be sent. No need to reset as the
# 100 response should not have a body
elif self.connection:
# The response was received before the connection
# finished with us
if self.status >= 300:
# Some type of error condition....
if isinstance(self.send_body(), str):
# There was more data to send in the request but we
# don't plan to send it so we have to hang up!
self.connection.request_disconnect()
# else, we were finished anyway... the connection will
# discover this itself
elif self.response >= 200:
# For 2xx result codes we let the connection finish
# spooling and disconnect from us when it is done
pass
else:
# A bad information response (with body) or a bad status
# code
self.connection.request_disconnect()
else:
# The request is already disconnected, we're done
self.finished()
def finished(self):
"""Called when we have a final response *and* have disconnected
from the connection There is no guarantee that the server got
all of our data, it might even have returned a 2xx series code
and then hung up before reading the data, maybe it already had
what it needed, maybe it thinks a 2xx response is more likely to
make us go away. Whatever. The point is that you can't be sure
that all the data was transmitted just because you got here and
the server says everything is OK"""
if self.tried_credentials is not None:
# we were trying out some credentials, if this is not a 401 assume
# they're good
if self.status == 401:
# we must remove these credentials, they matched the challenge
# but still resulted in 401
self.manager.remove_credentials(self.tried_credentials)
else:
if isinstance(self.tried_credentials, auth.BasicCredentials):
# path rule only works for BasicCredentials
self.tried_credentials.add_success_path(self.url.abs_path)
self.tried_credentials = None
if (self.auto_redirect and self.status >= 300 and
self.status <= 399 and
(self.status != 302 or
self.method.upper() in ("GET", "HEAD"))):
# If the 302 status code is received in response to a
# request other than GET or HEAD, the user agent MUST NOT
# automatically redirect the request unless it can be
# confirmed by the user
location = self.response.get_location()
if location:
if not location.host:
# This is an error but a common one (thanks IIS!)
location = location.resolve(self.url)
self.resend(location)
elif self.status == 401:
challenges = self.response.get_www_authenticate()
for c in challenges:
c.protectionSpace = self.url.get_canonical_root()
self.tried_credentials = self.manager.find_credentials(c)
if self.tried_credentials:
self.set_authorization(self.tried_credentials)
self.resend() # to the same URL
class ClientResponse(messages.Response):
def __init__(self, request, **kwargs):
super(ClientResponse, self).__init__(
request=request, entity_body=request.res_bodystream, **kwargs)
def handle_headers(self):
"""Hook for response header processing.
This method is called when a set of response headers has been
received from the server, before the associated data is
received! After this call, recv will be called zero or more
times until handle_message or handle_disconnect is called
indicating the end of the response.
Override this method, for example, if you want to reject or
invoke special processing for certain responses (e.g., based on
size) before the data itself is received. To abort the
response, close the connection using
:py:meth:`Connection.request_disconnect`.
Override the :py:meth:`Finished` method instead to clean up and
process the complete response normally."""
logging.debug(
"Request: %s %s %s", self.request.method, self.request.url,
str(self.request.protocol))
logging.debug(
"Got Response: %i %s", self.status, self.reason)
logging.debug("Response headers: %s", repr(self.headers))
super(ClientResponse, self).handle_headers()
def handle_message(self):
"""Hook for normal completion of response"""
self.finished()
super(ClientResponse, self).handle_message()
def handle_disconnect(self, err):
"""Hook for abnormal completion of the response
Called when the server disconnects before we've completed
reading the response. Note that if we are reading forever this
may be expected behaviour and *err* may be None.
We pass this information on to the request."""
if err is not None:
self.reason = str(err)
self.request.response_finished(err)
def finished(self):
self.request.response_finished()
if self.status >= 100 and self.status <= 199 and self.status != 101:
# Re-read this response, we may not be done!
self.start_receiving()
| [
"[email protected]"
]
| |
ccb02a3fa1531b24900ff7930c73705f40159f31 | ff8f55e26d4b9742e7c9766435898411f45254dd | /Dynamic_Programming/01_Paint_Fence.py | 3ac2cb20da29448a36cb75038deda92288fdaa7f | []
| no_license | saranyab9064/leetcode-geeks | 38ded6532ed91d4893a8ccee2147faf02e820554 | ca6ffffcb775c4caacc4dc907b9912b40a48a343 | refs/heads/master | 2021-06-27T22:27:53.591871 | 2021-03-10T01:49:55 | 2021-03-10T01:49:55 | 217,812,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,797 | py |
# ============================================================================
# Name : 01_Paint_Fence.py
# Author : Saranya Balakrishnan
# Mail Id : [email protected]
# ==========================================================================
"""
There is a fence with n posts, each post can be painted with one of the k colors.
You have to paint all the posts such that no more than two adjacent fence posts have the same color.
Return the total number of ways you can paint the fence.
Note:
n and k are non-negative integers.
Example:
Input: n = 3, k = 2
Output: 6
Explanation: Take c1 as color 1, c2 as color 2. All possible ways are:
post1 post2 post3
----- ----- ----- -----
1 c1 c1 c2
2 c1 c2 c1
3 c1 c2 c2
4 c2 c1 c1
5 c2 c1 c2
6 c2 c2 c1
"""
class Solution(object):
def numWays(self, n, k):
"""
:type n: int
:type k: int
:rtype: int
"""
# if there is zero post
if n == 0:
return 0
# if there is only one post, no of ways to paint that post will be k
elif n == 1:
return k
# if n is 2 or more, there are k times 1 possiblities
same = k
# if posts are different ,there are k time k-1 possiblities
different = k * (k-1)
for i in range(3,n+1):
prev_diff_value = different
# case 2 scenario
different = (same + different) * (k-1)
# case 1 scenario
same = prev_diff_value * 1
return same + different
if __name__ == '__main__':
n = 3
k = 2
test = Solution()
ans = test.numWays(n,k)
print(ans)
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.