blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ff8f86292617a8597edc809076063b0f6261283c | 444a9480bce2035565332d4d4654244c0b5cd47b | /research/cv/SiamFC/src/dataset.py | 83c8e8c6e3e8ce864c6d87af664d12aa08b25bc6 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] | permissive | mindspore-ai/models | 7ede9c6454e77e995e674628204e1c6e76bd7b27 | eab643f51336dbf7d711f02d27e6516e5affee59 | refs/heads/master | 2023-07-20T01:49:34.614616 | 2023-07-17T11:43:18 | 2023-07-17T11:43:18 | 417,393,380 | 301 | 92 | Apache-2.0 | 2023-05-17T11:22:28 | 2021-10-15T06:38:37 | Python | UTF-8 | Python | false | false | 5,068 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""VID dataset"""
import os
import pickle
import hashlib
import cv2
import numpy as np
from src.config import config
class ImagnetVIDDataset():
"""
used in GeneratorDataset to deal with image pair
Args:
db : lmdb file
video_names : all video name
data_dir : the location of image pair
z_transforms : the transforms list used in exemplar
x_transforms : the transforms list used in instance
training : status of training
"""
def __init__(self, db, video_names, data_dir, z_transforms, x_transforms, training=True):
self.video_names = video_names
self.data_dir = data_dir
self.z_transforms = z_transforms
self.x_transforms = x_transforms
meta_data_path = os.path.join(data_dir, 'meta_data.pkl')
self.meta_data = pickle.load(open(meta_data_path, 'rb'))
self.meta_data = {x[0]: x[1] for x in self.meta_data}
for key in self.meta_data.keys():
trajs = self.meta_data[key]
for trkid in list(trajs.keys()):
if len(trajs[trkid]) < 2:
del trajs[trkid]
self.txn = db.begin(write=False)
self.num = len(self.video_names) if config.num_per_epoch is None or not \
training else config.num_per_epoch
def imread(self, path):
"""
read iamges according to path
Args :
path : the image path
"""
key = hashlib.md5(path.encode()).digest()
img_buffer = self.txn.get(key)
img_buffer = np.frombuffer(img_buffer, np.uint8)
img = cv2.imdecode(img_buffer, cv2.IMREAD_COLOR)
return img
def _sample_weights(self, center, low_idx, high_idx, s_type='uniform'):
"""
According to the center image to pick another image,setting the weights
will be used in different type distribution
Args:
center : the position of center image
low_idx : the minimum of id
high_idx : the max of id
s_type : choose different distribution. "uniform", "sqrt", "linear"
can be chosen
"""
weights = list(range(low_idx, high_idx))
weights.remove(center)
weights = np.array(weights)
if s_type == 'linear':
weights = abs(weights - center)
elif s_type == 'sqrt':
weights = np.sqrt(abs(weights - center))
elif s_type == 'uniform':
weights = np.ones_like(weights)
return weights / sum(weights)
def __getitem__(self, idx):
idx = idx % len(self.video_names)
video = self.video_names[idx]
trajs = self.meta_data[video]
trkid = np.random.choice(list(trajs.keys()))
traj = trajs[trkid]
assert len(traj) > 1, "video_name: {}".format(video)
exemplar_idx = np.random.choice(list(range(len(traj))))
exemplar_name = os.path.join(self.data_dir, video,
traj[exemplar_idx] + ".{:02d}.x.jpg".format(trkid))
exemplar_img = self.imread(exemplar_name)
exemplar_img = cv2.cvtColor(exemplar_img, cv2.COLOR_BGR2RGB)
# sample instance
low_idx = max(0, exemplar_idx - config.frame_range)
up_idx = min(len(traj), exemplar_idx + config.frame_range)
weights = self._sample_weights(exemplar_idx, low_idx, up_idx, config.sample_type)
instance = np.random.choice(traj[low_idx:exemplar_idx] + traj[exemplar_idx + 1:up_idx],
p=weights)
instance_name = os.path.join(self.data_dir, video, instance + ".{:02d}.x.jpg".format(trkid))
instance_img = self.imread(instance_name)
instance_img = cv2.cvtColor(instance_img, cv2.COLOR_BGR2RGB)
if np.random.rand(1) < config.gray_ratio:
exemplar_img = cv2.cvtColor(exemplar_img, cv2.COLOR_RGB2GRAY)
exemplar_img = cv2.cvtColor(exemplar_img, cv2.COLOR_GRAY2RGB)
instance_img = cv2.cvtColor(instance_img, cv2.COLOR_RGB2GRAY)
instance_img = cv2.cvtColor(instance_img, cv2.COLOR_GRAY2RGB)
exemplar_img = self.z_transforms(exemplar_img)
instance_img = self.x_transforms(instance_img)
return exemplar_img, instance_img
def __len__(self):
return self.num
| [
"[email protected]"
] | |
8eeaa0ca64e1bf2b2d43b5a3ce16af064f666d4a | 67a442ecabcdca9f54f5920874d0095d57f98ede | /gewittergefahr/gg_utils/dilation_test.py | ffb5a2725b02b9dd7ffdc08e4b856685a7be3f54 | [
"MIT"
] | permissive | thunderhoser/GewitterGefahr | 58ba3446c1cc154f56c12c4354dff05b34c12b13 | 1835a71ababb7ad7e47bfa19e62948d466559d56 | refs/heads/master | 2022-07-23T06:47:13.883598 | 2022-07-15T12:43:48 | 2022-07-15T12:43:48 | 104,016,785 | 29 | 13 | MIT | 2020-12-18T20:44:33 | 2017-09-19T02:37:21 | Python | UTF-8 | Python | false | false | 2,662 | py | """Unit tests for dilation.py."""
import unittest
import numpy
from gewittergefahr.gg_utils import dilation
TOLERANCE = 1e-6
SMALL_PERCENTILE = 12.5
LARGE_PERCENTILE = 87.5
DILATION_HALF_WIDTH_IN_PIXELS = 1
INPUT_MATRIX = numpy.array(
[[-20., -15., -10., -5., 0.],
[-10., -5., 0., 5., 10.],
[0., 5., 10., numpy.nan, numpy.nan],
[10., 15., 20., numpy.nan, numpy.nan]])
OUTPUT_MATRIX_SMALL_PERCENTILE = numpy.array(
[[-15., -15., -10., -5., numpy.nan],
[-15., -15., -10., -5., numpy.nan],
[-5., -5., numpy.nan, numpy.nan, numpy.nan],
[numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan]])
OUTPUT_MATRIX_LARGE_PERCENTILE = numpy.array(
[[numpy.nan, numpy.nan, numpy.nan, 5., 5.],
[numpy.nan, 5., 5., 10., 5.],
[10., 15., 15., 10., 5.],
[10., 15., 15., 10., numpy.nan]])
OUTPUT_MATRIX_LARGEST_ABS_VALUE = numpy.array(
[[-15., -15., -10., 5., 5.],
[-15., -15., -10., 10., 5.],
[10., 15., 15., 10., 5.],
[10., 15., 15., 10., numpy.nan]])
class DilationTests(unittest.TestCase):
"""Each method is a unit test for dilation.py."""
def test_dilate_2d_matrix_small_percentile(self):
"""Ensures correct output from dilate_2d_matrix with small prctile."""
this_output_matrix = dilation.dilate_2d_matrix(
INPUT_MATRIX, percentile_level=SMALL_PERCENTILE,
half_width_in_pixels=DILATION_HALF_WIDTH_IN_PIXELS)
self.assertTrue(numpy.allclose(
this_output_matrix, OUTPUT_MATRIX_SMALL_PERCENTILE, atol=TOLERANCE,
equal_nan=True))
def test_dilate_2d_matrix_large_percentile(self):
"""Ensures correct output from dilate_2d_matrix with large prctile."""
this_output_matrix = dilation.dilate_2d_matrix(
INPUT_MATRIX, percentile_level=LARGE_PERCENTILE,
half_width_in_pixels=DILATION_HALF_WIDTH_IN_PIXELS)
self.assertTrue(numpy.allclose(
this_output_matrix, OUTPUT_MATRIX_LARGE_PERCENTILE, atol=TOLERANCE,
equal_nan=True))
def test_dilate_2d_matrix_take_largest_abs_value(self):
"""Ensures correct output from dilate_2d_matrix.
In this case, take_largest_absolute_value = True.
"""
this_output_matrix = dilation.dilate_2d_matrix(
INPUT_MATRIX, percentile_level=LARGE_PERCENTILE,
half_width_in_pixels=DILATION_HALF_WIDTH_IN_PIXELS,
take_largest_absolute_value=True)
self.assertTrue(numpy.allclose(
this_output_matrix, OUTPUT_MATRIX_LARGEST_ABS_VALUE, atol=TOLERANCE,
equal_nan=True))
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
570d5e82d5c6785c52442478d82a296869969b12 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/authorization/v20180501/policy_set_definition_at_management_group.py | 319473b88f6f90a3be594135a1f64be8fef76212 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 16,443 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['PolicySetDefinitionAtManagementGroupArgs', 'PolicySetDefinitionAtManagementGroup']
@pulumi.input_type
class PolicySetDefinitionAtManagementGroupArgs:
def __init__(__self__, *,
management_group_id: pulumi.Input[str],
policy_definitions: pulumi.Input[Sequence[pulumi.Input['PolicyDefinitionReferenceArgs']]],
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
metadata: Optional[Any] = None,
parameters: Optional[Any] = None,
policy_set_definition_name: Optional[pulumi.Input[str]] = None,
policy_type: Optional[pulumi.Input[Union[str, 'PolicyType']]] = None):
"""
The set of arguments for constructing a PolicySetDefinitionAtManagementGroup resource.
:param pulumi.Input[str] management_group_id: The ID of the management group.
:param pulumi.Input[Sequence[pulumi.Input['PolicyDefinitionReferenceArgs']]] policy_definitions: An array of policy definition references.
:param pulumi.Input[str] description: The policy set definition description.
:param pulumi.Input[str] display_name: The display name of the policy set definition.
:param Any metadata: The policy set definition metadata.
:param Any parameters: The policy set definition parameters that can be used in policy definition references.
:param pulumi.Input[str] policy_set_definition_name: The name of the policy set definition to create.
:param pulumi.Input[Union[str, 'PolicyType']] policy_type: The type of policy definition. Possible values are NotSpecified, BuiltIn, and Custom.
"""
pulumi.set(__self__, "management_group_id", management_group_id)
pulumi.set(__self__, "policy_definitions", policy_definitions)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if policy_set_definition_name is not None:
pulumi.set(__self__, "policy_set_definition_name", policy_set_definition_name)
if policy_type is not None:
pulumi.set(__self__, "policy_type", policy_type)
@property
@pulumi.getter(name="managementGroupId")
def management_group_id(self) -> pulumi.Input[str]:
"""
The ID of the management group.
"""
return pulumi.get(self, "management_group_id")
@management_group_id.setter
def management_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "management_group_id", value)
@property
@pulumi.getter(name="policyDefinitions")
def policy_definitions(self) -> pulumi.Input[Sequence[pulumi.Input['PolicyDefinitionReferenceArgs']]]:
"""
An array of policy definition references.
"""
return pulumi.get(self, "policy_definitions")
@policy_definitions.setter
def policy_definitions(self, value: pulumi.Input[Sequence[pulumi.Input['PolicyDefinitionReferenceArgs']]]):
pulumi.set(self, "policy_definitions", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The policy set definition description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
The display name of the policy set definition.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def metadata(self) -> Optional[Any]:
"""
The policy set definition metadata.
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[Any]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def parameters(self) -> Optional[Any]:
"""
The policy set definition parameters that can be used in policy definition references.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[Any]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="policySetDefinitionName")
def policy_set_definition_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the policy set definition to create.
"""
return pulumi.get(self, "policy_set_definition_name")
@policy_set_definition_name.setter
def policy_set_definition_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_set_definition_name", value)
@property
@pulumi.getter(name="policyType")
def policy_type(self) -> Optional[pulumi.Input[Union[str, 'PolicyType']]]:
"""
The type of policy definition. Possible values are NotSpecified, BuiltIn, and Custom.
"""
return pulumi.get(self, "policy_type")
@policy_type.setter
def policy_type(self, value: Optional[pulumi.Input[Union[str, 'PolicyType']]]):
pulumi.set(self, "policy_type", value)
class PolicySetDefinitionAtManagementGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
management_group_id: Optional[pulumi.Input[str]] = None,
metadata: Optional[Any] = None,
parameters: Optional[Any] = None,
policy_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PolicyDefinitionReferenceArgs']]]]] = None,
policy_set_definition_name: Optional[pulumi.Input[str]] = None,
policy_type: Optional[pulumi.Input[Union[str, 'PolicyType']]] = None,
__props__=None):
"""
The policy set definition.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The policy set definition description.
:param pulumi.Input[str] display_name: The display name of the policy set definition.
:param pulumi.Input[str] management_group_id: The ID of the management group.
:param Any metadata: The policy set definition metadata.
:param Any parameters: The policy set definition parameters that can be used in policy definition references.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PolicyDefinitionReferenceArgs']]]] policy_definitions: An array of policy definition references.
:param pulumi.Input[str] policy_set_definition_name: The name of the policy set definition to create.
:param pulumi.Input[Union[str, 'PolicyType']] policy_type: The type of policy definition. Possible values are NotSpecified, BuiltIn, and Custom.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PolicySetDefinitionAtManagementGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The policy set definition.
:param str resource_name: The name of the resource.
:param PolicySetDefinitionAtManagementGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PolicySetDefinitionAtManagementGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
management_group_id: Optional[pulumi.Input[str]] = None,
metadata: Optional[Any] = None,
parameters: Optional[Any] = None,
policy_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PolicyDefinitionReferenceArgs']]]]] = None,
policy_set_definition_name: Optional[pulumi.Input[str]] = None,
policy_type: Optional[pulumi.Input[Union[str, 'PolicyType']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PolicySetDefinitionAtManagementGroupArgs.__new__(PolicySetDefinitionAtManagementGroupArgs)
__props__.__dict__["description"] = description
__props__.__dict__["display_name"] = display_name
if management_group_id is None and not opts.urn:
raise TypeError("Missing required property 'management_group_id'")
__props__.__dict__["management_group_id"] = management_group_id
__props__.__dict__["metadata"] = metadata
__props__.__dict__["parameters"] = parameters
if policy_definitions is None and not opts.urn:
raise TypeError("Missing required property 'policy_definitions'")
__props__.__dict__["policy_definitions"] = policy_definitions
__props__.__dict__["policy_set_definition_name"] = policy_set_definition_name
__props__.__dict__["policy_type"] = policy_type
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:authorization/v20180501:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-native:authorization:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-nextgen:authorization:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-native:authorization/v20170601preview:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-nextgen:authorization/v20170601preview:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-native:authorization/v20180301:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-nextgen:authorization/v20180301:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-native:authorization/v20190101:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-nextgen:authorization/v20190101:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-native:authorization/v20190601:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-nextgen:authorization/v20190601:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-native:authorization/v20190901:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-nextgen:authorization/v20190901:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-native:authorization/v20200301:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-nextgen:authorization/v20200301:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-native:authorization/v20200901:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-nextgen:authorization/v20200901:PolicySetDefinitionAtManagementGroup")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PolicySetDefinitionAtManagementGroup, __self__).__init__(
'azure-native:authorization/v20180501:PolicySetDefinitionAtManagementGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PolicySetDefinitionAtManagementGroup':
"""
Get an existing PolicySetDefinitionAtManagementGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PolicySetDefinitionAtManagementGroupArgs.__new__(PolicySetDefinitionAtManagementGroupArgs)
__props__.__dict__["description"] = None
__props__.__dict__["display_name"] = None
__props__.__dict__["metadata"] = None
__props__.__dict__["name"] = None
__props__.__dict__["parameters"] = None
__props__.__dict__["policy_definitions"] = None
__props__.__dict__["policy_type"] = None
__props__.__dict__["type"] = None
return PolicySetDefinitionAtManagementGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The policy set definition description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[Optional[str]]:
"""
The display name of the policy set definition.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def metadata(self) -> pulumi.Output[Optional[Any]]:
"""
The policy set definition metadata.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the policy set definition.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parameters(self) -> pulumi.Output[Optional[Any]]:
"""
The policy set definition parameters that can be used in policy definition references.
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter(name="policyDefinitions")
def policy_definitions(self) -> pulumi.Output[Sequence['outputs.PolicyDefinitionReferenceResponse']]:
"""
An array of policy definition references.
"""
return pulumi.get(self, "policy_definitions")
@property
@pulumi.getter(name="policyType")
def policy_type(self) -> pulumi.Output[Optional[str]]:
"""
The type of policy definition. Possible values are NotSpecified, BuiltIn, and Custom.
"""
return pulumi.get(self, "policy_type")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource (Microsoft.Authorization/policySetDefinitions).
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
22393b19c5fb7be5e9fe08ffa8f211847a997248 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/network/v20201101/get_load_balancer.py | fd9af3d72c16c5bbcb5d9fd47aa696d0901401e7 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,378 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetLoadBalancerResult',
'AwaitableGetLoadBalancerResult',
'get_load_balancer',
'get_load_balancer_output',
]
@pulumi.output_type
class GetLoadBalancerResult:
"""
LoadBalancer resource.
"""
def __init__(__self__, backend_address_pools=None, etag=None, extended_location=None, frontend_ip_configurations=None, id=None, inbound_nat_pools=None, inbound_nat_rules=None, load_balancing_rules=None, location=None, name=None, outbound_rules=None, probes=None, provisioning_state=None, resource_guid=None, sku=None, tags=None, type=None):
if backend_address_pools and not isinstance(backend_address_pools, list):
raise TypeError("Expected argument 'backend_address_pools' to be a list")
pulumi.set(__self__, "backend_address_pools", backend_address_pools)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if extended_location and not isinstance(extended_location, dict):
raise TypeError("Expected argument 'extended_location' to be a dict")
pulumi.set(__self__, "extended_location", extended_location)
if frontend_ip_configurations and not isinstance(frontend_ip_configurations, list):
raise TypeError("Expected argument 'frontend_ip_configurations' to be a list")
pulumi.set(__self__, "frontend_ip_configurations", frontend_ip_configurations)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if inbound_nat_pools and not isinstance(inbound_nat_pools, list):
raise TypeError("Expected argument 'inbound_nat_pools' to be a list")
pulumi.set(__self__, "inbound_nat_pools", inbound_nat_pools)
if inbound_nat_rules and not isinstance(inbound_nat_rules, list):
raise TypeError("Expected argument 'inbound_nat_rules' to be a list")
pulumi.set(__self__, "inbound_nat_rules", inbound_nat_rules)
if load_balancing_rules and not isinstance(load_balancing_rules, list):
raise TypeError("Expected argument 'load_balancing_rules' to be a list")
pulumi.set(__self__, "load_balancing_rules", load_balancing_rules)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if outbound_rules and not isinstance(outbound_rules, list):
raise TypeError("Expected argument 'outbound_rules' to be a list")
pulumi.set(__self__, "outbound_rules", outbound_rules)
if probes and not isinstance(probes, list):
raise TypeError("Expected argument 'probes' to be a list")
pulumi.set(__self__, "probes", probes)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="backendAddressPools")
def backend_address_pools(self) -> Optional[Sequence['outputs.BackendAddressPoolResponse']]:
"""
Collection of backend address pools used by a load balancer.
"""
return pulumi.get(self, "backend_address_pools")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="extendedLocation")
def extended_location(self) -> Optional['outputs.ExtendedLocationResponse']:
"""
The extended location of the load balancer.
"""
return pulumi.get(self, "extended_location")
@property
@pulumi.getter(name="frontendIPConfigurations")
def frontend_ip_configurations(self) -> Optional[Sequence['outputs.FrontendIPConfigurationResponse']]:
"""
Object representing the frontend IPs to be used for the load balancer.
"""
return pulumi.get(self, "frontend_ip_configurations")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="inboundNatPools")
def inbound_nat_pools(self) -> Optional[Sequence['outputs.InboundNatPoolResponse']]:
"""
Defines an external port range for inbound NAT to a single backend port on NICs associated with a load balancer. Inbound NAT rules are created automatically for each NIC associated with the Load Balancer using an external port from this range. Defining an Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound Nat rules. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an inbound NAT pool. They have to reference individual inbound NAT rules.
"""
return pulumi.get(self, "inbound_nat_pools")
@property
@pulumi.getter(name="inboundNatRules")
def inbound_nat_rules(self) -> Optional[Sequence['outputs.InboundNatRuleResponse']]:
"""
Collection of inbound NAT Rules used by a load balancer. Defining inbound NAT rules on your load balancer is mutually exclusive with defining an inbound NAT pool. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an Inbound NAT pool. They have to reference individual inbound NAT rules.
"""
return pulumi.get(self, "inbound_nat_rules")
@property
@pulumi.getter(name="loadBalancingRules")
def load_balancing_rules(self) -> Optional[Sequence['outputs.LoadBalancingRuleResponse']]:
"""
Object collection representing the load balancing rules Gets the provisioning.
"""
return pulumi.get(self, "load_balancing_rules")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="outboundRules")
def outbound_rules(self) -> Optional[Sequence['outputs.OutboundRuleResponse']]:
"""
The outbound rules.
"""
return pulumi.get(self, "outbound_rules")
@property
@pulumi.getter
def probes(self) -> Optional[Sequence['outputs.ProbeResponse']]:
"""
Collection of probe objects used in the load balancer.
"""
return pulumi.get(self, "probes")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the load balancer resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resource GUID property of the load balancer resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.LoadBalancerSkuResponse']:
"""
The load balancer SKU.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetLoadBalancerResult(GetLoadBalancerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetLoadBalancerResult(
backend_address_pools=self.backend_address_pools,
etag=self.etag,
extended_location=self.extended_location,
frontend_ip_configurations=self.frontend_ip_configurations,
id=self.id,
inbound_nat_pools=self.inbound_nat_pools,
inbound_nat_rules=self.inbound_nat_rules,
load_balancing_rules=self.load_balancing_rules,
location=self.location,
name=self.name,
outbound_rules=self.outbound_rules,
probes=self.probes,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_load_balancer(expand: Optional[str] = None,
load_balancer_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLoadBalancerResult:
"""
LoadBalancer resource.
:param str expand: Expands referenced resources.
:param str load_balancer_name: The name of the load balancer.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['loadBalancerName'] = load_balancer_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20201101:getLoadBalancer', __args__, opts=opts, typ=GetLoadBalancerResult).value
return AwaitableGetLoadBalancerResult(
backend_address_pools=__ret__.backend_address_pools,
etag=__ret__.etag,
extended_location=__ret__.extended_location,
frontend_ip_configurations=__ret__.frontend_ip_configurations,
id=__ret__.id,
inbound_nat_pools=__ret__.inbound_nat_pools,
inbound_nat_rules=__ret__.inbound_nat_rules,
load_balancing_rules=__ret__.load_balancing_rules,
location=__ret__.location,
name=__ret__.name,
outbound_rules=__ret__.outbound_rules,
probes=__ret__.probes,
provisioning_state=__ret__.provisioning_state,
resource_guid=__ret__.resource_guid,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
@_utilities.lift_output_func(get_load_balancer)
def get_load_balancer_output(expand: Optional[pulumi.Input[Optional[str]]] = None,
load_balancer_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetLoadBalancerResult]:
"""
LoadBalancer resource.
:param str expand: Expands referenced resources.
:param str load_balancer_name: The name of the load balancer.
:param str resource_group_name: The name of the resource group.
"""
...
| [
"[email protected]"
] | |
2282c152c06546a82f340c485eafb5b3ed595424 | f124cb2443577778d8708993c984eafbd1ae3ec3 | /saleor/graphql/checkout/mutations/__init__.py | 7718967b325810e573ce4fd4fe617dcc87905933 | [
"BSD-3-Clause"
] | permissive | quangtynu/saleor | ac467193a7779fed93c80251828ac85d92d71d83 | 5b0e5206c5fd30d81438b6489d0441df51038a85 | refs/heads/master | 2023-03-07T19:41:20.361624 | 2022-10-20T13:19:25 | 2022-10-20T13:19:25 | 245,860,106 | 1 | 0 | BSD-3-Clause | 2023-03-06T05:46:25 | 2020-03-08T17:44:18 | Python | UTF-8 | Python | false | false | 1,556 | py | from .checkout_add_promo_code import CheckoutAddPromoCode
from .checkout_billing_address_update import CheckoutBillingAddressUpdate
from .checkout_complete import CheckoutComplete
from .checkout_create import CheckoutCreate
from .checkout_customer_attach import CheckoutCustomerAttach
from .checkout_customer_detach import CheckoutCustomerDetach
from .checkout_delivery_method_update import CheckoutDeliveryMethodUpdate
from .checkout_email_update import CheckoutEmailUpdate
from .checkout_language_code_update import CheckoutLanguageCodeUpdate
from .checkout_line_delete import CheckoutLineDelete
from .checkout_lines_add import CheckoutLinesAdd
from .checkout_lines_delete import CheckoutLinesDelete
from .checkout_lines_update import CheckoutLinesUpdate
from .checkout_remove_promo_code import CheckoutRemovePromoCode
from .checkout_shipping_address_update import CheckoutShippingAddressUpdate
from .checkout_shipping_method_update import CheckoutShippingMethodUpdate
from .order_create_from_checkout import OrderCreateFromCheckout
__all__ = [
"CheckoutAddPromoCode",
"CheckoutBillingAddressUpdate",
"CheckoutComplete",
"CheckoutCreate",
"CheckoutCustomerAttach",
"CheckoutCustomerDetach",
"CheckoutDeliveryMethodUpdate",
"CheckoutEmailUpdate",
"CheckoutLanguageCodeUpdate",
"CheckoutLineDelete",
"CheckoutLinesAdd",
"CheckoutLinesDelete",
"CheckoutLinesUpdate",
"CheckoutRemovePromoCode",
"CheckoutShippingAddressUpdate",
"CheckoutShippingMethodUpdate",
"OrderCreateFromCheckout",
]
| [
"[email protected]"
] | |
838f2f8902ca4fdcf743b209c0a1ff7c7ab3412d | 229ed0dad61f9e855de604c230d034a0bd9b3882 | /EdabitPractice/evenOddCounter.py | 06c675245b08e406cfb9c3b1124f90e1dd4de379 | [] | no_license | Darrenrodricks/EdabitPythonPractice | 987d534dd149ddaef6219df381df850eabbe80b2 | c1be8b10a6fcc1085640a1128f022c05fb2890a9 | refs/heads/main | 2023-07-17T00:36:43.772435 | 2021-08-31T16:24:07 | 2021-08-31T16:24:07 | 400,630,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | # Python program to count Even and Odd numbers in a List
# Input: list1 = [2, 7, 5, 64, 14]
# Output: Even = 3, odd = 2
a = 0
b = 0
list1 = [2, 7, 5, 64, 14]
for i in range(0, len(list1)):
if i % 2 == 0:
a += 1
else:
b += 1
print("There are {} Even, and {} Odd".format(a, b)) | [
"[email protected]"
] | |
9cca242910678dbdb4fce620cc6f69091f65087c | 539b031a4edd1aec31af5b6658f25a0de03776a4 | /strings_and_text/sub_re_groups1.py | b91fbc11289d3b5f5a17a2b714d35dde5bec785c | [] | no_license | leogtzr/python-cookbook-code-snippets | c517e7f14e468e1aa8def71d3389348150d43085 | a3f189c26ba38bc982dd140b3b4d6326b39671dc | refs/heads/main | 2023-01-23T07:16:30.292456 | 2020-11-28T04:29:42 | 2020-11-28T04:29:42 | 309,217,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | import re
from calendar import month_abbr
def change_date(m):
mon_name = month_abbr[int(m.group(1))]
return '[{}] ({}) |{}|'.format(m.group(2), mon_name, m.group(3))
datepat = re.compile(r'(\d+)/(\d+)/(\d+)')
text = 'Today is 11/27/2012. PyCon starts 3/13/2013.'
# a substitution callback function
print(datepat.sub(change_date, text))
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
# print(month_abbr[12])
for month_num in range(1, 13):
print(month_abbr[month_num])
| [
"[email protected]"
] | |
0998ba915d80be9aaf99b9cef30acdb467528d1c | 2e145222a18d4509d937951f5cec4df0e26ee86f | /vas/gemfire/CacheServerInstances.py | 9405a719dd10335d0ccd1c552ba07c8a6ef1c57d | [
"Apache-2.0"
] | permissive | vdreamakitex/vas-python-api | 7627b7e3fcf76c16b1ea8b9fb670fdb708eff083 | ce7148a2044863e078e78b47abbaafc426f732ee | refs/heads/master | 2021-01-18T05:13:25.459916 | 2012-11-05T09:58:45 | 2012-11-05T09:58:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,867 | py | # vFabric Administration Server API
# Copyright (c) 2012 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from vas.shared.Instance import Instance
from vas.shared.MutableCollection import MutableCollection
from vas.util.LinkUtils import LinkUtils
class CacheServerInstances(MutableCollection):
"""Used to enumerate, create, and delete cache server instances
:ivar `vas.shared.Security.Security` security: The resource's security
"""
def __init__(self, client, location):
super(CacheServerInstances, self).__init__(client, location, 'cache-server-group-instances',
CacheServerInstance)
def create(self, installation, name):
"""Creates a new cache server instance
:param `vas.gemfire.Installations.Installation` installation: The installation to be used by the instance
:param str name: The name of the instance
:rtype: :class:`vas.gemfire.CacheServerInstances.CacheServerInstance`
:return: The new cache server instance
"""
payload = {'installation': installation._location, 'name': name}
return self._create(payload, 'cache-server-group-instance')
class CacheServerInstance(Instance):
"""A cache server instance
:ivar `vas.gemfire.Groups.Group` group: The group that contains this instance
:ivar `vas.gemfire.Installations.Installation` installation: The installation that this instance is using
:ivar `vas.gemfire.LiveApplicationCodes.LiveApplicationCodes` live_application_code: The instance's live
application code
:ivar `vas.gemfire.CacheServerLiveConfigurations.CacheServerLiveConfigurations` live_configurations: The instance's live
configurations
:ivar str name: The instance's name
:ivar list node_instances: The instance's individual node instances
:ivar `vas.gemfire.PendingApplicationCodes.PendingApplicationCodes` pending_application_code: The instance's
pending application
code
:ivar `vas.gemfire.CacheServerPendingConfigurations.CacheServerPendingConfigurations` pending_configurations: The instance's
pending configurations
:ivar `vas.shared.Security.Security` security: The resource's security
:ivar str state: Retrieves the state of the resource from the server.
Will be one of:
* ``STARTING``
* ``STARTED``
* ``STOPPING``
* ``STOPPED``
"""
__live_application_code = None
__pending_application_code = None
@property
def live_application_code(self):
self.__live_application_code = self.__live_application_code or LiveApplicationCodes(self._client,
self.__live_application_code_location)
return self.__live_application_code
@property
def pending_application_code(self):
self.__pending_application_code = self.__pending_application_code or PendingApplicationCodes(self._client,
self.__pending_application_code_location)
return self.__pending_application_code
def __init__(self, client, location):
super(CacheServerInstance, self).__init__(client, location, Group, Installation, CacheServerLiveConfigurations,
CacheServerPendingConfigurations, CacheServerNodeInstance, 'cache-server-node-instance')
self.__live_application_code_location = LinkUtils.get_link_href(self._details, 'live-application-code')
self.__pending_application_code_location = LinkUtils.get_link_href(self._details, 'pending-application-code')
def update(self, installation):
"""Updates the instance to use a different installation
:param `vas.gemfire.Installations.Installation` installation: The installation that the instance should use
"""
self._client.post(self._location, {'installation': installation._location})
self.reload()
from vas.gemfire.CacheServerLiveConfigurations import CacheServerLiveConfigurations
from vas.gemfire.CacheServerNodeInstances import CacheServerNodeInstance
from vas.gemfire.CacheServerPendingConfigurations import CacheServerPendingConfigurations
from vas.gemfire.Groups import Group
from vas.gemfire.Installations import Installation
from vas.gemfire.LiveApplicationCodes import LiveApplicationCodes
from vas.gemfire.PendingApplicationCodes import PendingApplicationCodes
| [
"[email protected]"
] | |
53bc8edebb6fabc73a2cacad23ca6d8b08fa9b0a | 16450d59c820298f8803fd40a1ffa2dd5887e103 | /baekjoon/2667.py | b85ee7659586696418e866ced977042046429337 | [] | no_license | egyeasy/TIL_public | f78c11f81d159eedb420f5fa177c05d310c4a039 | e2f40eda09cb0a65cc064d9ba9b0e2fa7cbbcb38 | refs/heads/master | 2021-06-21T01:22:16.516777 | 2021-02-02T13:16:21 | 2021-02-02T13:16:21 | 167,803,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,257 | py | """
<그림 1>과 같이 정사각형 모양의 지도가 있다. 1은 집이 있는 곳을, 0은 집이 없는 곳을 나타낸다. 철수는 이 지도를 가지고 연결된 집들의 모임인 단지를 정의하고, 단지에 번호를 붙이려 한다. 여기서 연결되었다는 것은 어떤 집이 좌우, 혹은 아래위로 다른 집이 있는 경우를 말한다. 대각선상에 집이 있는 경우는 연결된 것이 아니다. <그림 2>는 <그림 1>을 단지별로 번호를 붙인 것이다. 지도를 입력하여 단지수를 출력하고, 각 단지에 속하는 집의 수를 오름차순으로 정렬하여 출력하는 프로그램을 작성하시오.
> input
첫 번째 줄에는 지도의 크기 N(정사각형이므로 가로와 세로의 크기는 같으며 5≤N≤25)이 입력되고, 그 다음 N줄에는 각각 N개의 자료(0혹은 1)가 입력된다.
7
0110100
0110101
1110101
0000111
0100000
0111110
0111000
> output
첫 번째 줄에는 총 단지수를 출력하시오. 그리고 각 단지내 집의 수를 오름차순으로 정렬하여 한 줄에 하나씩 출력하시오.
3
7
8
9
"""
import sys
sys.stdin = open('2667.txt', 'r')
each_cnt = 0
def DFS(s):
global each_cnt
visited[s[0]][s[1]] = 1
each_cnt += 1
go_list = [[-1, 0], [0, 1], [1, 0], [0, -1]]
for go in go_list:
if matrix[s[0] + go[0]][s[1] + go[1]] == 1 and not visited[s[0] + go[0]][s[1] + go[1]]:
DFS([s[0] + go[0], s[1] + go[1]])
m = int(input())
matrix = [[0] * (m + 2) for i in range(m + 2)]
visited = [[0] * (m + 2) for i in range(m + 2)]
for i in range(m):
aline = list(map(int, input()))
for j in range(m):
matrix[i + 1][j + 1] = aline[j]
# for i in matrix:
# print(i)
total_cnt = 0
each_cnt = 0
cnts = [0] * (m**2)
idx = 0
for i in range(1, m + 2):
for j in range(1, m + 2):
if matrix[i][j] == 1 and not visited[i][j]:
each_cnt = 0
total_cnt += 1
DFS([i, j])
# print(each_cnt)
cnts[idx] = each_cnt
idx += 1
print(total_cnt)
for i in sorted(cnts[:total_cnt]):
print(i)
# idea
# 1. Some details are added in DFS problem.
# 2. Most important: Catching this is DFS problem. | [
"[email protected]"
] | |
49f6016496073d31808c5ceda4ff0bb6ac102c09 | 974c5a4f101d0e6f4dfa5fc2f7c641c9d2bd8184 | /sdk/ml/azure-ai-ml/tests/compute/unittests/test_compute_operations.py | ecfc11bd07cf4aafd9e8a34abaa324d6be10f0af | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | gaoyp830/azure-sdk-for-python | 4816f04c554dcffb7510a6b7044b0c86a2dd32e1 | 1c66defa502b754abcc9e5afa444ca03c609342f | refs/heads/master | 2022-10-20T21:33:44.281041 | 2022-09-29T17:03:13 | 2022-09-29T17:03:13 | 250,355,505 | 0 | 0 | MIT | 2020-03-26T19:42:13 | 2020-03-26T19:42:12 | null | UTF-8 | Python | false | false | 4,501 | py | from typing import Callable
from unittest.mock import Mock
import pytest
import vcr
from pytest_mock import MockFixture
from azure.ai.ml import load_compute
from azure.ai.ml._scope_dependent_operations import OperationConfig, OperationScope
from azure.ai.ml.entities import AmlCompute, Compute, ComputeInstance, IdentityConfiguration, UserAssignedIdentity
from azure.ai.ml.operations import ComputeOperations
from azure.identity import DefaultAzureCredential
@pytest.fixture
def mock_compute_operation(
mock_workspace_scope: OperationScope, mock_operation_config: OperationConfig, mock_aml_services_2021_10_01: Mock
) -> ComputeOperations:
yield ComputeOperations(
operation_scope=mock_workspace_scope,
operation_config=mock_operation_config,
service_client=mock_aml_services_2021_10_01,
)
class funny:
def __init__(self):
self.location = "somelocation"
@pytest.mark.unittest
class TestComputeOperation:
def test_list(self, mock_compute_operation: ComputeOperations) -> None:
mock_compute_operation.list()
mock_compute_operation._operation.list.assert_called_once()
def test_create_compute_instance(
self, mock_compute_operation: ComputeOperations, mocker: MockFixture
) -> None:
mocker.patch(
"azure.ai.ml._restclient.v2021_10_01.workspaces.get",
return_value=funny(),
)
mocker.patch(
"azure.ai.ml.entities.Compute._from_rest_object",
return_value=ComputeInstance(name="name", resource_id="test_resource_id"),
)
compute = load_compute("./tests/test_configs/compute/compute-ci-unit.yaml")
mock_compute_operation.begin_create_or_update(compute=compute)
mock_compute_operation._operation.begin_create_or_update.assert_called_once()
def test_create_aml_compute(
self, mock_compute_operation: ComputeOperations, mocker: MockFixture
) -> None:
mocker.patch("azure.ai.ml._restclient.v2021_10_01.workspaces.get", return_value=funny())
compute = load_compute("./tests/test_configs/compute/compute-aml.yaml")
mock_compute_operation.begin_create_or_update(compute=compute)
mock_compute_operation._operation.begin_create_or_update.assert_called_once()
def test_delete(self, mock_compute_operation: ComputeOperations) -> None:
mock_compute_operation.begin_delete("randstr")
mock_compute_operation._operation.begin_delete.assert_called_once()
def test_show(self, mock_compute_operation: ComputeOperations) -> None:
mock_compute_operation.get("randstr")
mock_compute_operation._operation.get.assert_called_once()
def test_start(self, mock_compute_operation: ComputeOperations) -> None:
mock_compute_operation.begin_start("randstr")
mock_compute_operation._operation.begin_start.assert_called_once()
def test_stop(self, mock_compute_operation: ComputeOperations) -> None:
mock_compute_operation.begin_stop("randstr")
mock_compute_operation._operation.begin_stop.assert_called_once()
def test_restart(self, mock_compute_operation: ComputeOperations) -> None:
mock_compute_operation.begin_restart("randstr")
mock_compute_operation._operation.begin_restart.assert_called_once()
def test_update_aml_compute(
self, mock_compute_operation: ComputeOperations, mocker: MockFixture
) -> None:
compute = AmlCompute(
name="name",
tags={"key1": "value1", "key2": "value2"},
min_instances=0,
max_instances=10,
idle_time_before_scale_down=100,
identity=IdentityConfiguration(
type="UserAssigned",
user_assigned_identities=[
UserAssignedIdentity(
resource_id="/subscriptions/b17253fa-f327-42d6-9686-f3e553e24763/resourcegroups/MC_banibatch_bani-aks_eastus/providers/Microsoft.ManagedIdentity/userAssignedIdentities/omsagent-bani-aks"
)
],
),
)
mock_compute_operation.begin_update(compute)
mock_compute_operation._operation.begin_create_or_update.assert_called_once()
def test_detach(self, mock_compute_operation: ComputeOperations) -> None:
mock_compute_operation.begin_delete(
name="randstr",
action="Detach",
)
mock_compute_operation._operation.begin_delete.assert_called_once()
| [
"[email protected]"
] | |
6229d71ac4298b44124dd4b8e60fbc94f362f721 | 22f57701df31b3182f3bcb83da729ecc584f8fb6 | /December-12/py_anuppriya_revsinglylinkedlist.py | eca33e40110bce5169894193a53714e455c02d79 | [] | no_license | Prashant-Bharaj/A-December-of-Algorithms | e88640c711abbe2e6cac71cb4652dac243984484 | 7bbd56572f4ddc9648e90615ee810765544c56e4 | refs/heads/master | 2023-08-05T15:37:20.362561 | 2021-09-19T05:51:53 | 2021-09-19T05:51:53 | 287,055,360 | 0 | 0 | null | 2020-08-12T15:53:05 | 2020-08-12T15:53:04 | null | UTF-8 | Python | false | false | 995 | py | class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def reverse(self):
prev = None
current = self.head
while(current is not None):
next = current.next
current.next = prev
prev = current
current = next
self.head = prev
def push(self, new_data):
new_node = Node(new_data)
new_node.next = self.head
self.head = new_node
def printList(self):
temp = self.head
while(temp):
print (temp.data)
temp = temp.next
llist = LinkedList()
llist.push(13)
llist.push(18)
llist.push(22)
llist.push(48)
print( "Given Linked List")
llist.printList()
llist.reverse()
print ("\nReversed Linked List")
llist.printList()
| [
"[email protected]"
] | |
21e0c6271798aca723cc58befb65b2e755533138 | 3f95904666cbecc5a65605e86f8b4dfe4797f8c5 | /seven/timeseries.py | e08993380af5ac04c418f257f7f805c290abffe5 | [] | no_license | rlowrance/test7 | 60b2778e19d91c357304637d3e73d74c9bcd3b79 | 3535bd46bff602fc3ba35c080d38b30e75a97fe7 | refs/heads/master | 2021-07-18T11:42:20.784873 | 2017-10-24T14:27:52 | 2017-10-24T14:27:52 | 97,166,588 | 2 | 5 | null | 2017-08-16T15:06:16 | 2017-07-13T21:32:41 | Python | UTF-8 | Python | false | false | 11,582 | py | '''time series prediction
Copyright 2017 Roy E. Lowrance
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on as "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing premission and
limitation under the license.
'''
from abc import ABCMeta, abstractmethod
import collections
import pdb
import numbers
import numpy as np
import unittest
class CreateFeatures(object):
'create features file from a master file and possibly many associated information files'
def __init__(self):
self.n_input_records = None
self.n_output_records = None
self.skipped = None # collections.Counter for reasons input records were skipped
pass
def create(
self,
feature_makers=None,
master_file_records=None,
selected=None, # lambda index, master_record -> (use_record: Bool, maybe_error_message)
report_skipped_master_record=None, # lambda index, master_record, [None|feature_maker], msg -> None
# start optional arguments
verbose=False,
):
'yield sequence (features:Dict, index, master_record) of output records with features derived from the master file'
def error(msg):
print('error in feature maker %s feature_name %s feature_value %s' % (
feature_maker.name,
feature_name,
feature_value,
))
print(msg)
print('entering pdb')
pdb.set_trace()
assert feature_makers is not None
assert master_file_records is not None
assert selected is not None
assert report_skipped_master_record is not None
self.n_input_records = 0
self.n_output_records = 0
self.skipped = collections.Counter()
for index, master_record in master_file_records:
self.n_input_records += 1
if self.n_input_records % 10000 == 1:
print('creating features from master record %d index %s' % (self.n_input_records, index))
(use_record, msg) = selected(index, master_record)
if not use_record:
report_skipped_master_record(index, master_record, None, msg)
continue
# create features from the master_record
# the feature makers may incorporate data from other records
features_made = {}
stopped_early = False
# accumulate all the features from the feature makers
# check for errors on the way
for feature_maker in feature_makers:
maybe_features = feature_maker.make_features(index, master_record)
if isinstance(maybe_features, str):
report_skipped_master_record(index, master_record, feature_maker, maybe_features)
stopped_early = True
break
elif isinstance(maybe_features, dict):
for feature_name, feature_value in maybe_features.items():
if feature_name in features_made:
error('duplicate feature name')
elif not feature_name.startswith('id_') and not isinstance(feature_value, numbers.Number):
error('feature value is not numeric')
elif feature_name.endswith('_size') and feature_value < 0.0:
error('size feature is negative')
else:
features_made[feature_name] = feature_value
else:
print(feature_maker.name)
print(feature_maker)
print(maybe_features)
print(type(maybe_features))
error('unexpected return type from a feature_maker')
if stopped_early:
continue
self.n_output_records += 1
yield features_made, index, master_record
class FeatureMaker(object, metaclass=ABCMeta):
def __init__(self, name=None):
self.name = name # used in error message; informal name of the feature maker
@abstractmethod
def make_features(ticker_index, tickercusip, ticker_record):
'return errors:str or Dict[feature_name:str, feature_value:nmber]'
pass
FitPredictResult = collections.namedtuple(
'FitPredictResult',
(
'query_index',
'query_features',
'predicted_feature_name',
'predicted_feature_value',
'model_spec',
'prediction',
'fitted_model',
'n_training_samples',
)
)
class ExceptionFit(Exception):
def __init__(self, parameter):
self.parameter = parameter
def __str__(self):
return 'ExceptionFit(%s)' % str(self.parameter)
class FitPredict(object):
'fit models and predict targets'
def fit_predict(
self,
df_features=None,
df_targets=None,
make_model=None,
model_specs=None,
timestamp_feature_name=None,
already_seen_lambda=None, # lambda query_index, model_spec, predicted_feature_name: Bool
):
'yield either (True, result:FitPredictResult) or (False, error_msg:str)'
# df_targets: a sorted sequence of targets, sorted in timestamp order (y values)
sorted_targets = df_targets.sort_values(by=timestamp_feature_name)
for query_index in sorted_targets.index:
if query_index not in df_features.index:
yield False, 'no query feature for query index %s' % query_index
continue
query_features = df_features.loc[[query_index]] # must be a DataFrame
assert len(query_features) == 1
timestamp = query_features.iloc[0][timestamp_feature_name]
mask = sorted_targets[timestamp_feature_name] < timestamp
training_targets = sorted_targets.loc[mask]
if len(training_targets) == 0:
yield False, 'no training_targets for query index %s timestamp %s' % (query_index, timestamp)
continue
training_features = df_features.loc[training_targets.index]
if len(training_features) == 0:
yield False, 'no training_features for query index %s timestamp %s' % (query_index, timestamp)
continue
for predicted_feature_name, predicted_feature_value in sorted_targets.loc[query_index].items():
if predicted_feature_name.startswith('id_'):
continue # skip identifiers, as these are not features
if predicted_feature_name.endswith('_decreased') or predicted_feature_name.endswith('_increased'):
yield False, 'classification not yet implemented; target feature name %s' % predicted_feature_name
continue
for model_spec in model_specs:
if already_seen_lambda(query_index, model_spec, predicted_feature_name):
yield False, 'already seen: %s %s %s' % (query_index, model_spec, predicted_feature_name)
continue
m = make_model(model_spec, predicted_feature_name)
try:
# TODO: turn into keywords
m.fit(training_features, training_targets)
except ExceptionFit as e:
yield False, 'exception raised during fitting: %s' % str(e)
continue
predictions = m.predict(query_features)
assert len(predictions) == 1
prediction = predictions[0]
if np.isnan(prediction):
print('prediction is NaN', prediction)
print(model_spec)
pdb.set_trace()
if prediction is None:
yield False, 'predicted value was None: %s %s %s' % (query_index, model_spec, predicted_feature_name)
else:
yield (
True,
FitPredictResult(
query_index=query_index,
query_features=query_features,
predicted_feature_name=predicted_feature_name,
predicted_feature_value=predicted_feature_value,
model_spec=model_spec,
prediction=predictions[0],
fitted_model=m,
n_training_samples=len(training_features),
)
)
class FitPredictOutput(object, metaclass=ABCMeta):
'content of output file for program fit_predict.py'
@abstractmethod
def as_dict(self):
'return a dict with all the fields'
pass
class HpChoices(object, metaclass=ABCMeta):
'iterated over HpSpec instances'
@abstractmethod
def __iter__(self):
'yield sequence of HpSpec objects'
pass
class Model(object, metaclass=ABCMeta):
@abstractmethod
def fit(self, df_training_samples_features, df_training_samples_targets):
'mutate self; set attribute importances: Dict[feature_name:str, feature_importance:Number]'
pass
@abstractmethod
def predict(self, df_query_samples_features):
'return predictions'
pass
class ModelSpec(object, metaclass=ABCMeta):
'specification of a model name and its associated hyperparamters'
@abstractmethod
def __str__(self):
'return parsable string representation'
# Hint: Use method self._to_str(value) to convert individual values to strings
# That will make all the string representations use the same encoding of values to strings
pass
@staticmethod
@abstractmethod
def make_from_str(s):
'parse the representation returned by str(self) to create an instance'
pass
@abstractmethod
def iteritems(self):
'yield each (hyparameter name:str, hyperparameter value)'
pass
@abstractmethod
def __eq__(self, other):
pass
@abstractmethod
def __hash__(self):
pass
@abstractmethod
def __lt__(self, other):
pass
def _to_str(self, value):
'internal method. Convert value to a string. Use me in your __str__ method'
def remove_trailing_zeroes(s):
return (
s if s[-1] != '0' else
remove_trailing_zeroes(s[:-1])
)
if value is None:
return ''
elif isinstance(value, float):
return remove_trailing_zeroes(('%f' % value).replace('.', '_'))
elif isinstance(value, int):
return '%d' % value
else:
return str(value)
class TestHpChoices(unittest.TestCase):
def test_construction(self):
self.assertRaises(Exception, HpChoices, None)
class TestHpSpeC(unittest.TestCase):
def test_construction(self):
self.assertRaises(Exception, HpSpec, None)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
e8c2786ad69cfccec2ad37b66382443519baed1a | 59fb17c240b261040026d713a6ac9c97d6a9f265 | /gym/gym/envs/registration.py | 18519749167fe193d8d2cb3b3348653ae837fd17 | [
"MIT"
] | permissive | dmeger/TeachingImitation | 3fb97499e76929959913266f127154f6ae5a8e99 | 5f4dba7e49987924c3d55cd27579cad4c71ef7a4 | refs/heads/master | 2023-03-28T13:25:01.307382 | 2021-04-06T15:07:08 | 2021-04-06T15:07:08 | 355,223,500 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,883 | py | import re
import copy
import importlib
import warnings
from gym import error, logger
# This format is true today, but it's *not* an official spec.
# [username/](env-name)-v(version) env-name is group 1, version is group 2
#
# 2016-10-31: We're experimentally expanding the environment ID format
# to include an optional username.
env_id_re = re.compile(r'^(?:[\w:-]+\/)?([\w:.-]+)-v(\d+)$')
def load(name):
mod_name, attr_name = name.split(":")
mod = importlib.import_module(mod_name)
fn = getattr(mod, attr_name)
return fn
class EnvSpec(object):
"""A specification for a particular instance of the environment. Used
to register the parameters for official evaluations.
Args:
id (str): The official environment ID
entry_point (Optional[str]): The Python entrypoint of the environment class (e.g. module.name:Class)
reward_threshold (Optional[int]): The reward threshold before the task is considered solved
nondeterministic (bool): Whether this environment is non-deterministic even after seeding
max_episode_steps (Optional[int]): The maximum number of steps that an episode can consist of
kwargs (dict): The kwargs to pass to the environment class
"""
def __init__(self, id, entry_point=None, reward_threshold=None, nondeterministic=False, max_episode_steps=None, kwargs=None):
self.id = id
self.entry_point = entry_point
self.reward_threshold = reward_threshold
self.nondeterministic = nondeterministic
self.max_episode_steps = max_episode_steps
self._kwargs = {} if kwargs is None else kwargs
match = env_id_re.search(id)
if not match:
raise error.Error('Attempted to register malformed environment ID: {}. (Currently all IDs must be of the form {}.)'.format(id, env_id_re.pattern))
self._env_name = match.group(1)
def make(self, **kwargs):
"""Instantiates an instance of the environment with appropriate kwargs"""
if self.entry_point is None:
raise error.Error('Attempting to make deprecated env {}. (HINT: is there a newer registered version of this env?)'.format(self.id))
_kwargs = self._kwargs.copy()
_kwargs.update(kwargs)
if callable(self.entry_point):
env = self.entry_point(**_kwargs)
else:
cls = load(self.entry_point)
env = cls(**_kwargs)
# Make the environment aware of which spec it came from.
spec = copy.deepcopy(self)
spec._kwargs = _kwargs
env.unwrapped.spec = spec
return env
def __repr__(self):
return "EnvSpec({})".format(self.id)
class EnvRegistry(object):
"""Register an env by ID. IDs remain stable over time and are
guaranteed to resolve to the same environment dynamics (or be
desupported). The goal is that results on a particular environment
should always be comparable, and not depend on the version of the
code that was running.
"""
def __init__(self):
self.env_specs = {}
def make(self, path, **kwargs):
if len(kwargs) > 0:
logger.info('Making new env: %s (%s)', path, kwargs)
else:
logger.info('Making new env: %s', path)
spec = self.spec(path)
env = spec.make(**kwargs)
# We used to have people override _reset/_step rather than
# reset/step. Set _gym_disable_underscore_compat = True on
# your environment if you use these methods and don't want
# compatibility code to be invoked.
if hasattr(env, "_reset") and hasattr(env, "_step") and not getattr(env, "_gym_disable_underscore_compat", False):
patch_deprecated_methods(env)
if env.spec.max_episode_steps is not None:
from gym.wrappers.time_limit import TimeLimit
env = TimeLimit(env, max_episode_steps=env.spec.max_episode_steps)
return env
def all(self):
return self.env_specs.values()
def spec(self, path):
if ':' in path:
mod_name, _sep, id = path.partition(':')
try:
importlib.import_module(mod_name)
# catch ImportError for python2.7 compatibility
except ImportError:
raise error.Error('A module ({}) was specified for the environment but was not found, make sure the package is installed with `pip install` before calling `gym.make()`'.format(mod_name))
else:
id = path
match = env_id_re.search(id)
if not match:
raise error.Error('Attempted to look up malformed environment ID: {}. (Currently all IDs must be of the form {}.)'.format(id.encode('utf-8'), env_id_re.pattern))
try:
return self.env_specs[id]
except KeyError:
# Parse the env name and check to see if it matches the non-version
# part of a valid env (could also check the exact number here)
env_name = match.group(1)
matching_envs = [valid_env_name for valid_env_name, valid_env_spec in self.env_specs.items()
if env_name == valid_env_spec._env_name]
if matching_envs:
raise error.DeprecatedEnv('Env {} not found (valid versions include {})'.format(id, matching_envs))
else:
raise error.UnregisteredEnv('No registered env with id: {}'.format(id))
def register(self, id, **kwargs):
if id in self.env_specs:
raise error.Error('Cannot re-register id: {}'.format(id))
self.env_specs[id] = EnvSpec(id, **kwargs)
# Have a global registry
registry = EnvRegistry()
def register(id, **kwargs):
return registry.register(id, **kwargs)
def make(id, **kwargs):
return registry.make(id, **kwargs)
def spec(id):
return registry.spec(id)
warn_once = True
def patch_deprecated_methods(env):
"""
Methods renamed from '_method' to 'method', render() no longer has 'close' parameter, close is a separate method.
For backward compatibility, this makes it possible to work with unmodified environments.
"""
global warn_once
if warn_once:
logger.warn("Environment '%s' has deprecated methods '_step' and '_reset' rather than 'step' and 'reset'. Compatibility code invoked. Set _gym_disable_underscore_compat = True to disable this behavior." % str(type(env)))
warn_once = False
env.reset = env._reset
env.step = env._step
env.seed = env._seed
def render(mode):
return env._render(mode, close=False)
def close():
env._render("human", close=True)
env.render = render
env.close = close
| [
"[email protected]"
] | |
5a34b1c2505774cc28123bf7867e9d5b84e9422c | ea5de3d347ef4e1dcac9ee37da2d9850888d9ecc | /pawn_brotherhood.py | 57460c2b269a4526698cd78561b3aa401f2e81a2 | [] | no_license | skoriy88/Chekio | 4d50c18c54741c425d468a80a24ceb526a13dabe | fcbc291ca624cb9d5415128e605ea27d5e50983e | refs/heads/master | 2020-03-18T11:26:09.966384 | 2018-05-25T13:52:54 | 2018-05-25T13:52:54 | 134,671,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | lst = {"b4", "d4", "f4", "c3", "e3", "g5", "d2"}
def safe_pawns(inp):
new = {(ord(i[0]), int(i[1])) for i in inp}
safe = sum(1 for pawn in new if(pawn[0]-1, pawn[1]-1) in new or (pawn[0]+1, pawn[1]-1) in new)
#print(safe)
return safe
safe_pawns(lst)
'''
print(ord('a'))
print(ord('b'))
print(ord('c'))
print(ord('d'))
print(ord('e'))
print(ord('f'))
print(ord('g'))
print(ord('h'))
''' | [
"[email protected]"
] | |
fe4a6346bcb9bbbbbfa99d1e0c34646eaaeeb80d | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/03_model_fitting/mlrRecon/607-tideGauge.py | 5d3a5857f4fc004b496e5aaa6fe495475221f2de | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,255 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 4 15:51:30 2020
----------------------------------------------------
This program is designed to reconstruct merra daily
maximum surge using MLR
----------------------------------------------------
@author: Michael Tadesse
"""
def reconstruct():
"""
run KFOLD method for regression
"""
#import packages
import os
import pandas as pd
import statsmodels.api as sm
from datetime import datetime
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
#defining directories
dir_in = "/lustre/fs0/home/mtadesse/merraAllLagged"
dir_out = "/lustre/fs0/home/mtadesse/mlrReconstruction"
surge_path = "/lustre/fs0/home/mtadesse/05_dmax_surge_georef"
#cd to the lagged predictors directory
os.chdir(dir_in)
x = 607
y = 608
#looping through
for tg in range(x,y):
os.chdir(dir_in)
tg_name = os.listdir()[tg]
print(tg, tg_name)
#load predictor
pred = pd.read_csv(tg_name)
pred.drop('Unnamed: 0', axis = 1, inplace = True)
#add squared and cubed wind terms (as in WPI model)
pickTerms = lambda x: x.startswith('wnd')
wndTerms = pred.columns[list(map(pickTerms, pred.columns))]
wnd_sqr = pred[wndTerms]**2
wnd_cbd = pred[wndTerms]**3
pred = pd.concat([pred, wnd_sqr, wnd_cbd], axis = 1)
#standardize predictor data
dat = pred.iloc[:,1:]
scaler = StandardScaler()
print(scaler.fit(dat))
dat_standardized = pd.DataFrame(scaler.transform(dat), \
columns = dat.columns)
pred_standardized = pd.concat([pred['date'], dat_standardized], axis = 1)
#load surge data
os.chdir(surge_path)
surge = pd.read_csv(tg_name)
surge.drop('Unnamed: 0', axis = 1, inplace = True)
#remove duplicated surge rows
surge.drop(surge[surge['ymd'].duplicated()].index, axis = 0, inplace = True)
surge.reset_index(inplace = True)
surge.drop('index', axis = 1, inplace = True)
#adjust surge time format to match that of pred
time_str = lambda x: str(datetime.strptime(x, '%Y-%m-%d'))
surge_time = pd.DataFrame(list(map(time_str, surge['ymd'])), columns = ['date'])
time_stamp = lambda x: (datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
surge_new = pd.concat([surge_time, surge[['surge', 'lon', 'lat']]], axis = 1)
#merge predictors and surge to find common time frame
pred_surge = pd.merge(pred_standardized, surge_new.iloc[:,:2], on='date', how='right')
pred_surge.sort_values(by = 'date', inplace = True)
#find rows that have nans and remove them
row_nan = pred_surge[pred_surge.isna().any(axis =1)]
pred_surge.drop(row_nan.index, axis = 0, inplace = True)
pred_surge.reset_index(inplace = True)
pred_surge.drop('index', axis = 1, inplace = True)
#in case pred and surge don't overlap
if pred_surge.shape[0] == 0:
print('-'*80)
print('Predictors and Surge don''t overlap')
print('-'*80)
continue
pred_surge['date'] = pd.DataFrame(list(map(time_stamp, \
pred_surge['date'])), \
columns = ['date'])
#prepare data for training/testing
X = pred_surge.iloc[:,1:-1]
y = pd.DataFrame(pred_surge['surge'])
y = y.reset_index()
y.drop(['index'], axis = 1, inplace = True)
#apply PCA
pca = PCA(.95)
pca.fit(X)
X_pca = pca.transform(X)
{
# #apply 10 fold cross validation
# kf = KFold(n_splits=10, random_state=29)
# metric_corr = []; metric_rmse = []; #combo = pd.DataFrame(columns = ['pred', 'obs'])
# for train_index, test_index in kf.split(X):
# X_train, X_test = X_pca[train_index], X_pca[test_index]
# y_train, y_test = y['surge'][train_index], y['surge'][test_index]
# #train regression model
# lm = LinearRegression()
# lm.fit(X_train, y_train)
# #predictions
# predictions = lm.predict(X_test)
# # pred_obs = pd.concat([pd.DataFrame(np.array(predictions)), \
# # pd.DataFrame(np.array(y_test))], \
# # axis = 1)
# # pred_obs.columns = ['pred', 'obs']
# # combo = pd.concat([combo, pred_obs], axis = 0)
# #evaluation matrix - check p value
# if stats.pearsonr(y_test, predictions)[1] >= 0.05:
# print("insignificant correlation!")
# continue
# else:
# #print(stats.pearsonr(y_test, predictions))
# metric_corr.append(stats.pearsonr(y_test, predictions)[0])
# #print(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
# metric_rmse.append(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
# # #number of years used to train/test model
# num_years = np.ceil((pred_surge['date'][pred_surge.shape[0]-1] -\
# pred_surge['date'][0]).days/365)
# longitude = surge['lon'][0]
# latitude = surge['lat'][0]
# num_pc = X_pca.shape[1] #number of principal components
# corr = np.mean(metric_corr)
# rmse = np.mean(metric_rmse)
# print('num_year = ', num_years, ' num_pc = ', num_pc ,'avg_corr = ',\
# np.mean(metric_corr), ' - avg_rmse (m) = ', \
# np.mean(metric_rmse), '\n')
}
num_pc = X_pca.shape[1] #number of principal components
longitude = surge['lon'][0]
latitude = surge['lat'][0]
#surge reconstruction
pred_for_recon = pred[~pred.isna().any(axis = 1)]
pred_for_recon = pred_for_recon.reset_index().drop('index', axis = 1)
#standardize predictor data
dat = pred_for_recon.iloc[:,1:]
scaler = StandardScaler()
print(scaler.fit(dat))
dat_standardized = pd.DataFrame(scaler.transform(dat), \
columns = dat.columns)
pred_standardized = pd.concat([pred_for_recon['date'], dat_standardized], axis = 1)
X_recon = pred_standardized.iloc[:, 1:]
#apply PCA
pca = PCA(num_pc) #use the same number of PCs used for training
pca.fit(X_recon)
X_pca_recon = pca.transform(X_recon)
#model preparation
#first train model using observed surge and corresponding predictors
X_pca = sm.add_constant(X_pca)
est = sm.OLS(y['surge'], X_pca).fit()
#predict with X_recon and get 95% prediction interval
X_pca_recon = sm.add_constant(X_pca_recon)
predictions = est.get_prediction(X_pca_recon).summary_frame(alpha = 0.05)
#drop confidence interval and mean_se columns
predictions.drop(['mean_se', 'mean_ci_lower','mean_ci_upper'], \
axis = 1, inplace = True)
#final dataframe
final_dat = pd.concat([pred_standardized['date'], predictions], axis = 1)
final_dat['lon'] = longitude
final_dat['lat'] = latitude
final_dat.columns = ['date', 'surge_reconsturcted', 'pred_int_lower',\
'pred_int_upper', 'lon', 'lat']
{
# plot - optional
# time_stamp = lambda x: (datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
# final_dat['date'] = pd.DataFrame(list(map(time_stamp, final_dat['date'])), columns = ['date'])
# surge['date'] = pd.DataFrame(list(map(time_stamp, surge['date'])), columns = ['date'])
# sns.set_context('notebook', font_scale = 2)
# plt.figure()
# plt.plot(final_dat['date'], final_dat['mean'], color = 'green')
# plt.scatter(surge['date'], surge['surge'], color = 'blue')
# prediction intervals
# plt.plot(final_dat['date'], final_dat['obs_ci_lower'], color = 'red', linestyle = "--", lw = 0.8)
# plt.plot(final_dat['date'], final_dat['obs_ci_upper'], color = 'red', linestyle = "--", lw = 0.8)
# confidence intervals
# plt.plot(final_dat['date'], final_dat['mean_ci_upper'], color = 'black', linestyle = "--", lw = 0.8)
# plt.plot(final_dat['date'], final_dat['mean_ci_lower'], color = 'black', linestyle = "--", lw = 0.8)
}
#save df as cs - in case of interruption
os.chdir(dir_out)
final_dat.to_csv(tg_name)
#cd to dir_in
# os.chdir(dir_in)
reconstruct()
| [
"[email protected]"
] | |
0c952f5626c7a7187c2ce0175469a5ae5d62cbc9 | 26a0941b02286518e382fe86daa0dd5c0f596a9a | /stage_scenes.py | 26bf98175d74488c0e99843bcaa5d0d4709e9ced | [
"MIT"
] | permissive | Gargaran/videos | 729c3c7e91cb20e5377b5e397b3b90ea91e3f8a1 | 26458da42fc665eb4ae844168c16ebb0526cc231 | refs/heads/master | 2023-08-22T16:36:33.235479 | 2021-10-06T22:48:08 | 2021-10-06T22:48:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,104 | py | #!/usr/bin/env python
import inspect
import os
import sys
import importlib
from manimlib.config import get_module
from manimlib.extract_scene import is_child_scene
def get_sorted_scene_classes(module_name):
module = get_module(module_name)
if hasattr(module, "SCENES_IN_ORDER"):
return module.SCENES_IN_ORDER
# Otherwise, deduce from the order in which
# they're defined in a file
importlib.import_module(module.__name__)
line_to_scene = {}
name_scene_list = inspect.getmembers(
module,
lambda obj: is_child_scene(obj, module)
)
for name, scene_class in name_scene_list:
if inspect.getmodule(scene_class).__name__ != module.__name__:
continue
lines, line_no = inspect.getsourcelines(scene_class)
line_to_scene[line_no] = scene_class
return [
line_to_scene[index]
for index in sorted(line_to_scene.keys())
]
def stage_scenes(module_name):
scene_classes = get_sorted_scene_classes(module_name)
if len(scene_classes) == 0:
print("There are no rendered animations from this module")
return
# TODO, fix this
animation_dir = os.path.join(
os.path.expanduser('~'),
"Dropbox/3Blue1Brown/videos/2021/poly_fractal/videos"
)
#
files = os.listdir(animation_dir)
sorted_files = []
for scene_class in scene_classes:
scene_name = scene_class.__name__
clips = [f for f in files if f.startswith(scene_name + ".")]
for clip in clips:
sorted_files.append(os.path.join(animation_dir, clip))
# Partial movie file directory
# movie_dir = get_movie_output_directory(
# scene_class, **output_directory_kwargs
# )
# if os.path.exists(movie_dir):
# for extension in [".mov", ".mp4"]:
# int_files = get_sorted_integer_files(
# pmf_dir, extension=extension
# )
# for file in int_files:
# sorted_files.append(os.path.join(pmf_dir, file))
# else:
# animation_subdir = os.path.dirname(animation_dir)
count = 0
while True:
staged_scenes_dir = os.path.join(
animation_dir,
os.pardir,
"staged_scenes_{}".format(count)
)
if not os.path.exists(staged_scenes_dir):
os.makedirs(staged_scenes_dir)
break
# Otherwise, keep trying new names until
# there is a free one
count += 1
for count, f in reversed(list(enumerate(sorted_files))):
# Going in reversed order means that when finder
# sorts by date modified, it shows up in the
# correct order
symlink_name = os.path.join(
staged_scenes_dir,
"Scene_{:03}_{}".format(
count, f.split(os.sep)[-1]
)
)
os.symlink(f, symlink_name)
if __name__ == "__main__":
if len(sys.argv) < 2:
raise Exception("No module given.")
module_name = sys.argv[1]
stage_scenes(module_name)
| [
"[email protected]"
] | |
926a6e5c2f8e14ca41571537b899f12932268bbd | 5258903bb9cdeedf13a7101aa98e82c915914974 | /curriculum/migrations/0001_initial.py | d9ad40787e5dfc4ba0c58df54e9178a4392de19e | [
"BSD-2-Clause"
] | permissive | ZuluPro/django-cv | 7328e4f3e30ecfef7c5e6e598d5b95986ed7dbe9 | 64a7fda155d7052642484ebc9a7e7822d73ea1b0 | refs/heads/master | 2020-05-29T18:12:29.262859 | 2016-06-12T23:13:11 | 2016-06-12T23:13:11 | 45,745,590 | 10 | 6 | null | null | null | null | UTF-8 | Python | false | false | 29,493 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import curriculum.models.utils
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Certification',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=50, verbose_name='title')),
('authority', models.CharField(max_length=200, verbose_name='authority')),
('url', models.URLField(max_length=300, verbose_name='URL', blank=True)),
('description', models.TextField(max_length=2000, verbose_name='description', blank=True)),
],
),
migrations.CreateModel(
name='CertificationItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('start_year', models.IntegerField(verbose_name='start year', choices=[(2029, 2029), (2028, 2028), (2027, 2027), (2026, 2026), (2025, 2025), (2024, 2024), (2023, 2023), (2022, 2022), (2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900)])),
('start_month', models.IntegerField(verbose_name='start month', choices=[(1, 'january'), (2, 'febuary'), (3, 'march'), (4, 'april'), (5, 'may'), (6, 'june'), (7, 'july'), (8, 'august'), (9, 'september'), (10, 'october'), (11, 'november'), (12, 'december')])),
('expires', models.BooleanField(default=False, verbose_name='expires')),
('end_year', models.IntegerField(blank=True, null=True, verbose_name='end year', choices=[(2029, 2029), (2028, 2028), (2027, 2027), (2026, 2026), (2025, 2025), (2024, 2024), (2023, 2023), (2022, 2022), (2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900)])),
('end_month', models.IntegerField(blank=True, null=True, verbose_name='end month', choices=[(1, 'january'), (2, 'febuary'), (3, 'march'), (4, 'april'), (5, 'may'), (6, 'june'), (7, 'july'), (8, 'august'), (9, 'september'), (10, 'october'), (11, 'november'), (12, 'december')])),
('certification', models.ForeignKey(related_name='items', to='curriculum.Certification')),
],
),
migrations.CreateModel(
name='Experience',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200, verbose_name='title')),
('entreprise', models.CharField(max_length=200, verbose_name='entreprise')),
('context', models.TextField(max_length=1000, verbose_name='context', blank=True)),
('description', models.TextField(max_length=3000, verbose_name='description', blank=True)),
('results', models.TextField(max_length=3000, verbose_name='results', blank=True)),
('type', models.CharField(max_length=5, null=True, verbose_name='type', choices=[(None, 'unknown'), (b'SALAR', 'salaried'), (b'CHIEF', 'founder/chief'), (b'FREEL', 'freelance/chief'), (b'OTHER', 'other')])),
('environment', models.CharField(max_length=400, verbose_name='environment', blank=True)),
('start_year', models.IntegerField(default=curriculum.models.utils.current_year, verbose_name='start year', choices=[(2029, 2029), (2028, 2028), (2027, 2027), (2026, 2026), (2025, 2025), (2024, 2024), (2023, 2023), (2022, 2022), (2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900)])),
('start_month', models.IntegerField(default=curriculum.models.utils.current_month, verbose_name='start month', choices=[(1, 'january'), (2, 'febuary'), (3, 'march'), (4, 'april'), (5, 'may'), (6, 'june'), (7, 'july'), (8, 'august'), (9, 'september'), (10, 'october'), (11, 'november'), (12, 'december')])),
('still', models.BooleanField(default=True, verbose_name='still in office')),
('end_year', models.IntegerField(blank=True, null=True, verbose_name='end year', choices=[(2029, 2029), (2028, 2028), (2027, 2027), (2026, 2026), (2025, 2025), (2024, 2024), (2023, 2023), (2022, 2022), (2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900)])),
('end_month', models.IntegerField(blank=True, null=True, verbose_name='end month', choices=[(1, 'january'), (2, 'febuary'), (3, 'march'), (4, 'april'), (5, 'may'), (6, 'june'), (7, 'july'), (8, 'august'), (9, 'september'), (10, 'october'), (11, 'november'), (12, 'december')])),
('weight', models.IntegerField(default=1, verbose_name='weight', choices=[(0, 'Minor'), (1, 'Medium'), (2, 'Major')])),
],
),
migrations.CreateModel(
name='Language',
fields=[
('name', models.CharField(max_length=50, unique=True, serialize=False, verbose_name='name', primary_key=True)),
('description', models.TextField(max_length=2000, verbose_name='description', blank=True)),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='LanguageItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('level', models.CharField(default=b'NOT', max_length=5, verbose_name='level', choices=[(b'NOT', 'Notion'), (b'BAS', 'basic'), (b'ADV', 'advanced'), (b'PRO', 'professional'), (b'BIL', 'bilingual')])),
('language', models.ForeignKey(related_name='items', to='curriculum.Language')),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(unique=True, max_length=200, verbose_name='title')),
('description', models.TextField(max_length=3000, verbose_name='description', blank=True)),
('url', models.URLField(max_length=300, verbose_name='URL', blank=True)),
],
),
migrations.CreateModel(
name='ProjectItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('contribution', models.TextField(max_length=3000, verbose_name='contribution', blank=True)),
('start_year', models.IntegerField(default=curriculum.models.utils.current_year, verbose_name='start year', choices=[(2029, 2029), (2028, 2028), (2027, 2027), (2026, 2026), (2025, 2025), (2024, 2024), (2023, 2023), (2022, 2022), (2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900)])),
('start_month', models.IntegerField(default=curriculum.models.utils.current_month, verbose_name='start month', choices=[(1, 'january'), (2, 'febuary'), (3, 'march'), (4, 'april'), (5, 'may'), (6, 'june'), (7, 'july'), (8, 'august'), (9, 'september'), (10, 'october'), (11, 'november'), (12, 'december')])),
('still', models.BooleanField(default=True, verbose_name='still contributor')),
('end_year', models.IntegerField(blank=True, null=True, verbose_name='end year', choices=[(2029, 2029), (2028, 2028), (2027, 2027), (2026, 2026), (2025, 2025), (2024, 2024), (2023, 2023), (2022, 2022), (2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900)])),
('end_month', models.IntegerField(blank=True, null=True, verbose_name='end month', choices=[(1, 'january'), (2, 'febuary'), (3, 'march'), (4, 'april'), (5, 'may'), (6, 'june'), (7, 'july'), (8, 'august'), (9, 'september'), (10, 'october'), (11, 'november'), (12, 'december')])),
('weight', models.IntegerField(default=1, verbose_name='weight', choices=[(0, 'Minor'), (1, 'Medium'), (2, 'Major')])),
('project', models.ForeignKey(related_name='items', to='curriculum.Project')),
],
),
migrations.CreateModel(
name='Resume',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('firstname', models.CharField(max_length=150, verbose_name='First name')),
('lastname', models.CharField(max_length=150, verbose_name='Last name')),
('title', models.CharField(max_length=200, null=True, verbose_name='Title', blank=True)),
('resume', models.TextField(help_text="Short profile's description", max_length=3000, null=True, verbose_name='resume', blank=True)),
('image', models.ImageField(upload_to=b'', null=True, verbose_name='image', blank=True)),
('phone', models.CharField(max_length=100, null=True, verbose_name='phone', blank=True)),
('website', models.URLField(max_length=300, null=True, verbose_name='website', blank=True)),
('email', models.CharField(max_length=100, null=True, verbose_name='email', blank=True)),
('city', models.CharField(max_length=100, null=True, verbose_name='city', blank=True)),
('country', models.CharField(max_length=100, null=True, verbose_name='country', blank=True)),
('address', models.CharField(max_length=300, null=True, verbose_name='address', blank=True)),
('skill_summary', models.TextField(max_length=1000, null=True, verbose_name='summary of skills', blank=True)),
('experience_summary', models.TextField(max_length=1000, null=True, verbose_name='summary of experience', blank=True)),
('training_summary', models.TextField(max_length=1000, null=True, verbose_name='summary of trainings', blank=True)),
('project_summary', models.TextField(max_length=1000, null=True, verbose_name='summary of projects', blank=True)),
('driving_license', models.CharField(max_length=100, null=True, verbose_name='driving license', blank=True)),
('hobbies', models.TextField(max_length=1000, null=True, verbose_name='hobbies', blank=True)),
('tags', models.CharField(max_length=500, null=True, verbose_name='tags', blank=True)),
('skype', models.CharField(max_length=100, null=True, verbose_name='Skype ID', blank=True)),
('twitter', models.CharField(max_length=100, null=True, verbose_name='Twitter', blank=True)),
('linkedin', models.CharField(max_length=100, null=True, verbose_name='LinkedIn ID', blank=True)),
('google', models.CharField(max_length=100, null=True, verbose_name='Google+ ID', blank=True)),
('stackoverflow', models.IntegerField(null=True, verbose_name='StackOverflow ID', blank=True)),
('github', models.CharField(max_length=300, null=True, verbose_name='GitHub ID', blank=True)),
],
options={
'verbose_name': 'resume',
},
),
migrations.CreateModel(
name='Skill',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=200, verbose_name='name')),
('description', models.TextField(max_length=2000, verbose_name='description', blank=True)),
('url', models.URLField(max_length=300, verbose_name='URL', blank=True)),
('tags', models.CharField(max_length=500, verbose_name='tags', blank=True)),
('color', models.CharField(max_length=50, verbose_name='color', blank=True)),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='SkillItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('level', models.CharField(max_length=1, verbose_name='level', choices=[(None, 'unknown'), (b'B', 'beginner'), (b'S', 'skilled'), (b'A', 'advanced'), (b'E', 'expert')])),
('category', models.CharField(max_length=50, verbose_name='category', blank=True)),
('start_year', models.IntegerField(default=curriculum.models.utils.current_year, null=True, verbose_name='start year', blank=True, choices=[(2029, 2029), (2028, 2028), (2027, 2027), (2026, 2026), (2025, 2025), (2024, 2024), (2023, 2023), (2022, 2022), (2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900)])),
('start_month', models.IntegerField(default=curriculum.models.utils.current_month, null=True, verbose_name='start month', blank=True, choices=[(1, 'january'), (2, 'febuary'), (3, 'march'), (4, 'april'), (5, 'may'), (6, 'june'), (7, 'july'), (8, 'august'), (9, 'september'), (10, 'october'), (11, 'november'), (12, 'december')])),
('weight', models.IntegerField(default=1, verbose_name='weight', choices=[(0, 'Minor'), (1, 'Medium'), (2, 'Major')])),
('resume', models.ForeignKey(related_name='skills', to='curriculum.Resume')),
('skill', models.ForeignKey(related_name='items', to='curriculum.Skill')),
],
),
migrations.CreateModel(
name='Training',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('school', models.CharField(max_length=150, verbose_name='school')),
('degree', models.CharField(max_length=150, verbose_name='degree')),
('topic', models.CharField(max_length=150, verbose_name='topic', blank=True)),
('result', models.CharField(max_length=150, verbose_name='result', blank=True)),
('description', models.TextField(max_length=3000, verbose_name='description', blank=True)),
('year', models.IntegerField(verbose_name='year', choices=[(2029, 2029), (2028, 2028), (2027, 2027), (2026, 2026), (2025, 2025), (2024, 2024), (2023, 2023), (2022, 2022), (2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900)])),
('month', models.IntegerField(verbose_name='month', choices=[(1, 'january'), (2, 'febuary'), (3, 'march'), (4, 'april'), (5, 'may'), (6, 'june'), (7, 'july'), (8, 'august'), (9, 'september'), (10, 'october'), (11, 'november'), (12, 'december')])),
('resume', models.ForeignKey(related_name='trainings', to='curriculum.Resume')),
],
),
migrations.AddField(
model_name='projectitem',
name='resume',
field=models.ForeignKey(related_name='projects', to='curriculum.Resume'),
),
migrations.AddField(
model_name='languageitem',
name='resume',
field=models.ForeignKey(related_name='languages', to='curriculum.Resume'),
),
migrations.AddField(
model_name='experience',
name='resume',
field=models.ForeignKey(related_name='experiences', to='curriculum.Resume'),
),
migrations.AddField(
model_name='certificationitem',
name='resume',
field=models.ForeignKey(related_name='certifications', to='curriculum.Resume'),
),
migrations.AlterUniqueTogether(
name='certification',
unique_together=set([('title', 'authority')]),
),
migrations.AlterUniqueTogether(
name='skillitem',
unique_together=set([('skill', 'resume')]),
),
migrations.AlterUniqueTogether(
name='projectitem',
unique_together=set([('resume', 'project')]),
),
migrations.AlterUniqueTogether(
name='languageitem',
unique_together=set([('language', 'resume')]),
),
migrations.AlterUniqueTogether(
name='certificationitem',
unique_together=set([('certification', 'resume')]),
),
]
| [
"[email protected]"
] | |
852411151db8afff623d48a858ba720238508dd7 | faaf12ab18978082233c09628b815a69e73868e4 | /codechef/practice/easy/lebombs.py | 3c70653a1ff04cd448e8e83575cc876a870c045a | [
"WTFPL"
] | permissive | ferhatelmas/algo | 6826bcf0be782cb102c1ee20dce8d4345e1fd6d2 | 7b867f6d2c8a9fb896f464168b50dfc115617e56 | refs/heads/master | 2023-08-18T19:59:58.435696 | 2023-08-14T10:16:00 | 2023-08-14T10:16:00 | 3,813,734 | 27 | 16 | WTFPL | 2020-10-25T23:00:16 | 2012-03-23T23:43:31 | Java | UTF-8 | Python | false | false | 229 | py | from sys import stdin
from itertools import groupby
for i, ln in enumerate(stdin):
if i > 0 and i % 2 == 0:
s = "0" + ln.rstrip() + "0"
print(sum(max(len(list(g)) - 2, 0) for k, g in groupby(s) if k == "0"))
| [
"[email protected]"
] | |
0271e2bd69581d5e5dc88b564ddc46e9e59ed80e | 06289aabd78e6a0e5e5ab8360fffbf9a8504d615 | /api/budget/serializers/expense_serializer.py | 0f5fd044d6751912c22d145c7efad0ab32499638 | [] | no_license | jtclayt/finance_planner_api | 6ca8130c761999abc01e03429a0676c0c803b640 | 06cd592e479145cbeb6acad4574021ef7515b33b | refs/heads/main | 2023-08-15T02:20:34.455483 | 2021-09-22T16:15:49 | 2021-09-22T16:15:49 | 409,044,817 | 0 | 0 | null | 2021-09-22T05:08:48 | 2021-09-22T02:54:50 | Python | UTF-8 | Python | false | false | 627 | py | from rest_framework import serializers
from ..models.budget import Budget
from ..models.expense import Expense
class ExpenseSerializer(serializers.HyperlinkedModelSerializer):
'''Serializer for list view of expenses'''
url = serializers.HyperlinkedIdentityField(view_name='budget:expense-detail')
budget = serializers.PrimaryKeyRelatedField(queryset=Budget.objects.all())
class Meta:
model = Expense
fields = (
'id', 'url', 'description', 'annual_amount', 'monthly_amount',
'budget', 'user_id', 'created_at', 'updated_at'
)
read_only_fields = ('id',)
| [
"[email protected]"
] | |
5ef953377a82188de0c437031ecd64571429c4dd | 10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94 | /Python/special-array-with-x-elements-greater-than-or-equal-x.py | 01c11c68d89db61b02579b98174c1831b10e0923 | [
"MIT"
] | permissive | kamyu104/LeetCode-Solutions | f54822059405ef4df737d2e9898b024f051fd525 | 4dc4e6642dc92f1983c13564cc0fd99917cab358 | refs/heads/master | 2023-09-02T13:48:26.830566 | 2023-08-28T10:11:12 | 2023-08-28T10:11:12 | 152,631,182 | 4,549 | 1,651 | MIT | 2023-05-31T06:10:33 | 2018-10-11T17:38:35 | C++ | UTF-8 | Python | false | false | 3,409 | py | # Time: O(n)
# Space: O(1)
# counting sort solution
class Solution(object):
def specialArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
MAX_NUM = 1000
count = [0]*(MAX_NUM+1)
for num in nums:
count[num] += 1
n = len(nums)
for i in xrange(len(count)):
if i == n:
return i
n -= count[i]
return -1
# Time: O(n)
# Space: O(1)
# counting sort + binary search solution
class Solution2(object):
def specialArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
MAX_NUM = 1000
def inplace_counting_sort(nums, reverse=False): # Time: O(n)
count = [0]*(MAX_NUM+1)
for num in nums:
count[num] += 1
for i in xrange(1, len(count)):
count[i] += count[i-1]
for i in reversed(xrange(len(nums))): # inplace but unstable sort
while nums[i] >= 0:
count[nums[i]] -= 1
j = count[nums[i]]
nums[i], nums[j] = nums[j], ~nums[i]
for i in xrange(len(nums)):
nums[i] = ~nums[i] # restore values
if reverse: # unstable sort
nums.reverse()
inplace_counting_sort(nums, reverse=True)
left, right = 0, len(nums)-1
while left <= right: # Time: O(logn)
mid = left + (right-left)//2
if nums[mid] <= mid:
right = mid-1
else:
left = mid+1
return -1 if left < len(nums) and nums[left] == left else left
# Time: O(n)
# Space: O(n)
# counting sort + binary search solution
class Solution3(object):
def specialArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
MAX_NUM = 1000
def counting_sort(nums, reverse=False): # Time: O(n), Space: O(n)
count = [0]*(MAX_NUM+1)
for num in nums:
count[num] += 1
for i in xrange(1, len(count)):
count[i] += count[i-1]
result = [0]*len(nums)
if not reverse:
for num in reversed(nums): # stable sort
count[num] -= 1
result[count[num]] = num
else:
for num in nums: # stable sort
count[num] -= 1
result[count[num]] = num
result.reverse()
return result
nums = counting_sort(nums, reverse=True) # extra O(n) space for stable sort
left, right = 0, len(nums)-1
while left <= right: # Time: O(logn)
mid = left + (right-left)//2
if nums[mid] <= mid:
right = mid-1
else:
left = mid+1
return -1 if left < len(nums) and nums[left] == left else left
# Time: O(nlogn)
# Space: O(1)
# sort solution
class Solution4(object):
def specialArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort(reverse=True) # Time: O(nlogn)
for i in xrange(len(nums)): # Time: O(n)
if nums[i] <= i:
break
else:
i += 1
return -1 if i < len(nums) and nums[i] == i else i
| [
"[email protected]"
] | |
d17776b6855dfcc141feea8086af080f6d09fc11 | 8bcf5bf18f6e9c1d5871ef8a88ef5921e03e9b02 | /koldunov/api/urls.py | a4efa9857dea75f924535c42e25c43c5803313cc | [] | no_license | volgoweb/rest_example | 73f5fc26cce45c0aae49247768f74ffa2f4c01d4 | 7ee8b87914d6c69c80158e7e22a6b454c3e7f76b | refs/heads/master | 2021-01-10T01:32:44.098668 | 2017-09-05T12:42:00 | 2017-09-05T12:42:00 | 51,444,569 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | # -*- coding: utf-8 -*-
from rest_framework import routers
from .views.product_views import CategoryViewSet, ItemViewSet
from .views.stat_views import StatViewSet
router = routers.SimpleRouter()
router.register(r'category', CategoryViewSet)
router.register(r'item', ItemViewSet)
router.register(r'stat', StatViewSet)
urlpatterns = router.urls
| [
"[email protected]"
] | |
fb28d6bd690ff888dfd3ea29b317ae4cf3d2be7a | 8eca0a7a9ae207113f9f9ed98d093cbe21ffcd8a | /Maths_based_problems/grid_unique_paths/solution2_dp.py | eed4c34ad0482d36605fb37189b7a03c658eb218 | [] | no_license | Ranjit007ai/-Interviewbit-Maths | 3238c720bb5e0765eef0e0e1a39549eff1ba788d | 044627422fc38ee3e5aaa9cbfc8f00398d1f9bb5 | refs/heads/main | 2023-03-25T06:06:14.206384 | 2021-03-27T14:38:16 | 2021-03-27T14:38:16 | 352,065,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | def unique_paths(m,n):
dp = [[0]*n for _ in range(0,m)]
# each position in dp show the no of way to reach their
# now the first row and first col will be 1 ,since their is 1 way to traverse from one position to another in single row or column vector
for row in range(0,m):
dp[row][0] = 1
for col in range(0,n):
dp[0][col] = 1
for row in range(1,m):
for col in range(1,n):
dp[row][col] = dp[row-1][col] + dp[row][col-1]
return dp[m-1][n-1]
m =3
n =
ans = unique_paths(m,n)
print(ans)
| [
"[email protected]"
] | |
b3096c51d5f0148b23157700f003b048c28d4cb6 | efd6c1d24b0a392a177679429d53dd2f515d0d95 | /bi_auth/migrations/0001_initial.py | fd577d05d960ad8b413cd4b9a52c1be60fe0f81b | [] | no_license | mitshel/TakedaAnalitic | 5ccfb4aa83a056cbeaebce03df41819c7ece7985 | b04b08fb053bff238a1ce68df423f99314827b48 | refs/heads/master | 2021-07-02T21:27:10.023816 | 2019-02-14T14:32:18 | 2019-02-14T14:32:42 | 153,908,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | # Generated by Django 2.1.2 on 2018-12-15 11:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_orgadmin', models.BooleanField(verbose_name='Администратор организации')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='user_profile', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Профиль',
'verbose_name_plural': 'Профили',
},
),
]
| [
"[email protected]"
] | |
5fe19acc4de946e7408e43a378612cded89edc88 | aa91f6e8d59286f65e7f6ed065823c80b7694439 | /scripts/analysis/baostock/yangxian/yangxian.py | adf483a1883a24304215af71aef322817a97af98 | [
"MIT"
] | permissive | davidyuqiwei/davidyu_stock | 7f93bcc2c50a0e2c356e3b517dbf7e2e7467093f | c234911f49d5980f2dff651333f8ca957333e094 | refs/heads/master | 2023-01-07T02:08:04.330559 | 2023-01-02T16:31:37 | 2023-01-02T16:31:37 | 163,694,812 | 13 | 2 | null | 2022-12-18T03:55:10 | 2018-12-31T20:07:30 | Jupyter Notebook | UTF-8 | Python | false | false | 1,725 | py | from davidyu_cfg import *
from functions.data_dir import *
from functions.get_datetime import *
from functions.run_combine_all_csv import *
from functions.colNames import *
from functions.day_history.kLines import klineDate
from functions.LinearReg import *
from functions.common.dfProcess import *
from functions.common.loadModule.load_module_kdj import *
from scipy.stats import linregress
def stock_data(stock_index,start_date,end_date):
df_dir = os.path.join(data_path,"history_data","baostock","2020-12-17")
df1 = pd.read_csv(os.path.join(df_dir,stock_index+".csv"))
df1 = df1[(df1["dt"]>=start_date)&(df1["dt"]<=end_date)]
df1 = df1.drop_duplicates()
df1 = df1.sort_values("date")
df1["stock_index"] = [ x[3:9] for x in df1["code"]]
return df1
def get_3_pos_line(df1):
df1["line"] = df1["close"] - df1["open"]
df1["line"][df1["line"]>0]=1
df1["line"][df1["line"]<=0]=0
df1['mv_close'] = df1.close.rolling(window=3).mean()
df1['mv_close_120'] = df1.close.rolling(window=120).mean()
df1['mv_close_250'] = df1.close.rolling(window=250).mean()
df1['line_check_5'] = df1.line.rolling(window=5).sum()
df1['line_check_3'] = df1.line.rolling(window=3).sum()
df2 = df1[(df1["line_check_3"]==3)&(df1["close"]<df1['mv_close_250'])]
return df2
if __name__ =='__main__':
stock_index = sys.argv[1]
start_date = '2017-01-01'
end_date = '2020-12-17'
try:
df1 = stock_data(stock_index,start_date,end_date)
df2 = get_3_pos_line(df1)
#df3 = df2.tail(1)
#print("{},{}".format(df2['date'].values,df2['code'].values))
print(df2[["date","code"]].to_string(index=False,header=None))
except:
pass
| [
"[email protected]"
] | |
34ea96fab7aa2f5a03931e3a87d652ab5f3e629e | f6b5f0d72f3e5deb8a913d0a6d541ef3ad5445cb | /braintree/transaction.py | 5c5a315ac43e7831732e7dec0f4ba96100d19ceb | [
"MIT"
] | permissive | hathawsh/braintree_python | bf056a4d2b8c8b8094f2c876cea4782dc92c715a | 4ec0f3696438b8c2117f5917834e67ddbf3ebdc7 | refs/heads/master | 2021-01-16T00:04:08.883102 | 2013-07-23T22:11:32 | 2013-07-23T22:11:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,913 | py | import braintree
import urllib
import warnings
from decimal import Decimal
from braintree.add_on import AddOn
from braintree.disbursement_detail import DisbursementDetail
from braintree.discount import Discount
from braintree.successful_result import SuccessfulResult
from braintree.status_event import StatusEvent
from braintree.error_result import ErrorResult
from braintree.resource import Resource
from braintree.address import Address
from braintree.configuration import Configuration
from braintree.credit_card import CreditCard
from braintree.customer import Customer
from braintree.subscription_details import SubscriptionDetails
from braintree.resource_collection import ResourceCollection
from braintree.transparent_redirect import TransparentRedirect
from braintree.exceptions.not_found_error import NotFoundError
from braintree.descriptor import Descriptor
class Transaction(Resource):
"""
A class representing Braintree Transaction objects.
An example of creating an sale transaction with all available fields::
result = Transaction.sale({
"amount": "100.00",
"order_id": "123",
"channel": "MyShoppingCartProvider",
"credit_card": {
"number": "5105105105105100",
"expiration_date": "05/2011",
"cvv": "123"
},
"customer": {
"first_name": "Dan",
"last_name": "Smith",
"company": "Braintree Payment Solutions",
"email": "[email protected]",
"phone": "419-555-1234",
"fax": "419-555-1235",
"website": "https://www.braintreepayments.com"
},
"billing": {
"first_name": "Carl",
"last_name": "Jones",
"company": "Braintree",
"street_address": "123 E Main St",
"extended_address": "Suite 403",
"locality": "Chicago",
"region": "IL",
"postal_code": "60622",
"country_name": "United States of America"
},
"shipping": {
"first_name": "Andrew",
"last_name": "Mason",
"company": "Braintree",
"street_address": "456 W Main St",
"extended_address": "Apt 2F",
"locality": "Bartlett",
"region": "IL",
"postal_code": "60103",
"country_name": "United States of America"
}
})
print(result.transaction.amount)
print(result.transaction.order_id)
For more information on Transactions, see https://www.braintreepayments.com/docs/python/transactions/create
"""
def __repr__(self):
detail_list = ["amount", "credit_card", "payment_method_token", "customer_id"]
return super(Transaction, self).__repr__(detail_list)
class CreatedUsing(object):
"""
Constants representing how the transaction was created. Available types are:
* braintree.Transaction.CreatedUsing.FullInformation
* braintree.Transaction.CreatedUsing.Token
"""
FullInformation = "full_information"
Token = "token"
class GatewayRejectionReason(object):
"""
Constants representing gateway rejection reasons. Available types are:
* braintree.Transaction.GatewayRejectionReason.Avs
* braintree.Transaction.GatewayRejectionReason.AvsAndCvv
* braintree.Transaction.GatewayRejectionReason.Cvv
* braintree.Transaction.GatewayRejectionReason.Duplicate
"""
Avs = "avs"
AvsAndCvv = "avs_and_cvv"
Cvv = "cvv"
Duplicate = "duplicate"
class Source(object):
Api = "api"
ControlPanel = "control_panel"
Recurring = "recurring"
class Status(object):
"""
Constants representing transaction statuses. Available statuses are:
* braintree.Transaction.Status.Authorized
* braintree.Transaction.Status.Authorizing
* braintree.Transaction.Status.Failed
* braintree.Transaction.Status.GatewayRejected
* braintree.Transaction.Status.ProcessorDeclined
* braintree.Transaction.Status.Settled
* braintree.Transaction.Status.SettlementFailed
* braintree.Transaction.Status.Settling
* braintree.Transaction.Status.SubmittedForSettlement
* braintree.Transaction.Status.Void
"""
AuthorizationExpired = "authorization_expired"
Authorized = "authorized"
Authorizing = "authorizing"
Failed = "failed"
GatewayRejected = "gateway_rejected"
ProcessorDeclined = "processor_declined"
Settled = "settled"
SettlementFailed = "settlement_failed"
Settling = "settling"
SubmittedForSettlement = "submitted_for_settlement"
Voided = "voided"
class Type(object):
"""
Constants representing transaction types. Available types are:
* braintree.Transaction.Type.Credit
* braintree.Transaction.Type.Sale
"""
Credit = "credit"
Sale = "sale"
@staticmethod
def clone_transaction(transaction_id, params):
return Configuration.gateway().transaction.clone_transaction(transaction_id, params)
@staticmethod
def confirm_transparent_redirect(query_string):
"""
Confirms a transparent redirect request. It expects the query string from the
redirect request. The query string should _not_ include the leading "?" character. ::
result = braintree.Transaction.confirm_transparent_redirect_request("foo=bar&id=12345")
"""
warnings.warn("Please use TransparentRedirect.confirm instead", DeprecationWarning)
return Configuration.gateway().transaction.confirm_transparent_redirect(query_string)
@staticmethod
def credit(params={}):
"""
Creates a transaction of type Credit.
Amount is required. Also, a credit card,
customer_id or payment_method_token is required. ::
result = braintree.Transaction.credit({
"amount": "100.00",
"payment_method_token": "my_token"
})
result = braintree.Transaction.credit({
"amount": "100.00",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "12/2012"
}
})
result = braintree.Transaction.credit({
"amount": "100.00",
"customer_id": "my_customer_id"
})
"""
params["type"] = Transaction.Type.Credit
return Transaction.create(params)
@staticmethod
def find(transaction_id):
"""
Find a transaction, given a transaction_id. This does not return
a result object. This will raise a :class:`NotFoundError <braintree.exceptions.not_found_error.NotFoundError>` if the provided
credit_card_id is not found. ::
transaction = braintree.Transaction.find("my_transaction_id")
"""
return Configuration.gateway().transaction.find(transaction_id)
@staticmethod
def refund(transaction_id, amount=None):
"""
Refunds an existing transaction.
It expects a transaction_id.::
result = braintree.Transaction.refund("my_transaction_id")
"""
return Configuration.gateway().transaction.refund(transaction_id, amount)
@staticmethod
def sale(params={}):
"""
Creates a transaction of type Sale. Amount is required. Also, a credit card,
customer_id or payment_method_token is required. ::
result = braintree.Transaction.sale({
"amount": "100.00",
"payment_method_token": "my_token"
})
result = braintree.Transaction.sale({
"amount": "100.00",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "12/2012"
}
})
result = braintree.Transaction.sale({
"amount": "100.00",
"customer_id": "my_customer_id"
})
"""
params["type"] = Transaction.Type.Sale
return Transaction.create(params)
@staticmethod
def search(*query):
return Configuration.gateway().transaction.search(*query)
@staticmethod
def submit_for_settlement(transaction_id, amount=None):
"""
Submits an authorized transaction for settlement.
Requires the transaction id::
result = braintree.Transaction.submit_for_settlement("my_transaction_id")
"""
return Configuration.gateway().transaction.submit_for_settlement(transaction_id, amount)
@staticmethod
def tr_data_for_credit(tr_data, redirect_url):
"""
Builds tr_data for a Transaction of type Credit
"""
return Configuration.gateway().transaction.tr_data_for_credit(tr_data, redirect_url)
@staticmethod
def tr_data_for_sale(tr_data, redirect_url):
"""
Builds tr_data for a Transaction of type Sale
"""
return Configuration.gateway().transaction.tr_data_for_sale(tr_data, redirect_url)
@staticmethod
def transparent_redirect_create_url():
"""
Returns the url to be used for creating Transactions through transparent redirect.
"""
warnings.warn("Please use TransparentRedirect.url instead", DeprecationWarning)
return Configuration.gateway().transaction.transparent_redirect_create_url()
@staticmethod
def void(transaction_id):
"""
Voids an existing transaction.
It expects a transaction_id.::
result = braintree.Transaction.void("my_transaction_id")
"""
return Configuration.gateway().transaction.void(transaction_id)
@staticmethod
def create(params):
"""
Creates a transaction. Amount and type are required. Also, a credit card,
customer_id or payment_method_token is required. ::
result = braintree.Transaction.sale({
"type": braintree.Transaction.Type.Sale,
"amount": "100.00",
"payment_method_token": "my_token"
})
result = braintree.Transaction.sale({
"type": braintree.Transaction.Type.Sale,
"amount": "100.00",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "12/2012"
}
})
result = braintree.Transaction.sale({
"type": braintree.Transaction.Type.Sale,
"amount": "100.00",
"customer_id": "my_customer_id"
})
"""
return Configuration.gateway().transaction.create(params)
@staticmethod
def clone_signature():
return ["amount", "channel", {"options": ["submit_for_settlement"]}]
@staticmethod
def create_signature():
return [
"amount", "customer_id", "device_session_id", "merchant_account_id", "order_id", "channel",
"payment_method_token", "purchase_order_number", "recurring", "shipping_address_id",
"tax_amount", "tax_exempt", "type", "venmo_sdk_payment_method_code",
"device_data",
{
"credit_card": [
"token", "cardholder_name", "cvv", "expiration_date", "expiration_month", "expiration_year", "number"
]
},
{
"customer": [
"id", "company", "email", "fax", "first_name", "last_name", "phone", "website"
]
},
{
"billing": [
"first_name", "last_name", "company", "country_code_alpha2", "country_code_alpha3",
"country_code_numeric", "country_name", "extended_address", "locality",
"postal_code", "region", "street_address"
]
},
{
"shipping": [
"first_name", "last_name", "company", "country_code_alpha2", "country_code_alpha3",
"country_code_numeric", "country_name", "extended_address", "locality",
"postal_code", "region", "street_address"
]
},
{
"options": [
"store_in_vault", "store_in_vault_on_success", "submit_for_settlement",
"add_billing_address_to_payment_method", "store_shipping_address_in_vault",
"venmo_sdk_session"
]
},
{"custom_fields": ["__any_key__"]},
{"descriptor": ["name", "phone"]}
]
def __init__(self, gateway, attributes):
if "refund_id" in attributes.keys():
self._refund_id = attributes["refund_id"]
del(attributes["refund_id"])
else:
self._refund_id = None
Resource.__init__(self, gateway, attributes)
self.amount = Decimal(self.amount)
if self.tax_amount:
self.tax_amount = Decimal(self.tax_amount)
if "billing" in attributes:
self.billing_details = Address(gateway, attributes.pop("billing"))
if "credit_card" in attributes:
self.credit_card_details = CreditCard(gateway, attributes.pop("credit_card"))
if "customer" in attributes:
self.customer_details = Customer(gateway, attributes.pop("customer"))
if "shipping" in attributes:
self.shipping_details = Address(gateway, attributes.pop("shipping"))
if "add_ons" in attributes:
self.add_ons = [AddOn(gateway, add_on) for add_on in self.add_ons]
if "discounts" in attributes:
self.discounts = [Discount(gateway, discount) for discount in self.discounts]
if "status_history" in attributes:
self.status_history = [StatusEvent(gateway, status_event) for status_event in self.status_history]
if "subscription" in attributes:
self.subscription_details = SubscriptionDetails(attributes.pop("subscription"))
if "descriptor" in attributes:
self.descriptor = Descriptor(gateway, attributes.pop("descriptor"))
if "disbursement_details" in attributes:
self.disbursement_details = DisbursementDetail(attributes.pop("disbursement_details"))
@property
def refund_id(self):
warnings.warn("Please use Transaction.refund_ids instead", DeprecationWarning)
return self._refund_id
@property
def vault_billing_address(self):
"""
The vault billing address associated with this transaction
"""
return self.gateway.address.find(self.customer_details.id, self.billing_details.id)
@property
def vault_credit_card(self):
"""
The vault credit card associated with this transaction
"""
if self.credit_card_details.token is None:
return None
return self.gateway.credit_card.find(self.credit_card_details.token)
@property
def vault_customer(self):
"""
The vault customer associated with this transaction
"""
if self.customer_details.id is None:
return None
return self.gateway.customer.find(self.customer_details.id)
@property
def is_disbursed(self):
return self.disbursement_details.is_valid
| [
"[email protected]"
] | |
b468d83e6f86299cc5a6da5cc3813594132a55dc | 30b232051b10753e9103a70d88a387dfa1aca63f | /164.py | 2f3c183e1682f60665db64f6c436ac296f2bf23b | [] | no_license | samrithasudhagar/guvi2 | fe6d7af8a73cef515991524d7abad754c3700dc5 | f7eb8a8b2cd701c2708c414939cc139414d3310d | refs/heads/master | 2020-04-20T12:27:47.748859 | 2019-05-26T09:45:42 | 2019-05-26T09:45:42 | 168,843,977 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | n,k=map(int,input().split())
l=list(map(int,input().split()))
m=0
if k in l:
print(k)
else:
for i in l:
if i>m and i <k:
m=i
print(m)
| [
"[email protected]"
] | |
8fb33330b1462f23987648fc31eb06140b7e5caa | 1e03cd80d27d35ffdc8f68f70a36a461eaae4b9d | /apps/common/views.py | 9da3528729470edad1fb1663ca5e9291ee3c0179 | [] | no_license | paddy375691/flask_zlbbs | bee8f15497c58bd5f1f614d6a686b93301f93d0a | 90fb142b33aecca6ff66013953ecf6e3e39b6139 | refs/heads/master | 2023-02-04T07:55:00.798789 | 2020-12-25T08:51:13 | 2020-12-25T08:51:13 | 324,265,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py |
from flask import Blueprint
bp = Blueprint('common', __name__, url_prefix='/common')
@bp.route('/')
def index():
return 'common index'
| [
"[email protected]"
] | |
3ad52e8c095b3ad50975940c78e30707311ab01e | 32ba9f1c35ae916d33b121daeeea8e1910a447d7 | /utils/tag.py | 99083b27f2ee3987fa2f8e96f2665c4f0533c66f | [
"MIT"
] | permissive | rituparna/glTools | 8b02fa2751e1b997f7a202c7df8a3dd3d3032722 | c512a96c20ba7a4ee93a123690b626bb408a8fcd | refs/heads/master | 2020-03-19T19:23:47.684580 | 2018-06-10T23:53:58 | 2018-06-10T23:53:58 | 136,853,456 | 0 | 0 | null | 2018-06-10T23:46:54 | 2018-06-10T23:46:53 | null | UTF-8 | Python | false | false | 2,893 | py | import maya.cmds as mc
import glTools.tools.namingConvention
class Tag( object ):
def __init__(self):
'''
'''
self.nameTagAttr = 'nameTag'
def addNameTag(self,control,tag):
'''
Set the name tag value for the specified control
'''
# Check control
if not mc.objExists(control): raise Exception('Object '+control+' does not exist!')
# Add Tag attribute
if mc.objExists(control+'.'+self.nameTagAttr):
mc.addAttr(control,ln=self.nameTagAttr,dt='string')
mc.setAttr(control+'.'+self.nameTagAttr,tag,type='string')
def getNameTag(self,control):
'''
Return the name tag value of the specified control
'''
# Check control
if not mc.objExists(control): raise Exception('Object '+control+' does not exist!')
# Check tag attribute
if not mc.objExists(control+'.'+self.nameTagAttr): raise Exception('Object '+control+' does not have a "'+self.nameTagAttr+'" attribute!')
# Return tag string value
return mc.getAttr(control+'.'+self.nameTagAttr)
def guessNameTag(self,control,side=True,part=True,optSide=True,subPart=True,node=False):
'''
Return a best guess name tag based on a controls current name.
Uses name element comparison to our naming convention module.
'''
tag = ''
# Get naming convention dictionaries
nameConvention = glTools.tools.namingConvention.NamingConvention()
sideDict = dict((value, key) for key, value in nameConvention.side.iteritems())
partDict = dict((value, key) for key, value in nameConvention.part.iteritems())
subPartDict = dict((value, key) for key, value in nameConvention.subPart.iteritems())
nodeDict = dict((value, key) for key, value in nameConvention.node.iteritems())
# Get name elements
controlElem = control.split(nameConvention.delineator)
controlElemCnt = len(controlElem)
controlElemInd = 0
# Check number of elements
if controlElemCnt < 3: print 'Warning: Name pattern does not match naming convention'
# Get side
if side and sideDict.has_key(controlElem[controlElemInd]):
if controlElem[controlElemInd] != nameConvention.side['center']:
tag += sideDict[controlElem[controlElemInd]].capitalize()
controlElemInd += 1
else: return
# Get part
if part and partDict.has_key(controlElem[controlElemInd][0:-2]):
tag += partDict[controlElem[controlElemInd][0:-2]].capitalize()
controlElemInd += 1
else: return
# Get optional side
if optSide and sideDict.has_key(controlElem[controlElemInd][0:-2]):
tag += sideDict[controlElem[controlElemInd][0:-2]].capitalize()
controlElemInd += 1
# Get sub-part
if subPart and subPartDict.has_key(controlElem[controlElemInd][0:-2]):
tag += subPartDict[controlElem[controlElemInd][0:-2]].capitalize()
controlElemInd += 1
# Get type
if node and nodeDict.has_key(controlElem[controlElemInd]):
tag += nodeDict[controlElem[controlElemInd]].capitalize()
return tag
| [
"[email protected]"
] | |
e037ce0f746846b6294b60c884db7209be1e7464 | efbc8c73e9ac5cbcb9321518ab06b3965369a5f0 | /SWEA/D2/1974_스도쿠 검증.py | 5c70e3a29c30f7e85436a3f0b4edb86ade4466a6 | [] | no_license | AshOil/APS | 56b9395dcbb8eeec87a047407d4326b879481612 | fe5a2cd63448fcc4b11b5e5bc060976234ed8eea | refs/heads/master | 2023-07-15T17:32:20.684742 | 2021-08-23T13:04:05 | 2021-08-23T13:04:05 | 283,709,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,389 | py | import sys
sys.stdin = open('input_data/1974.txt',"r")
num_dict = {}
T = int(input())
for t in range(1, T+1):
for tt in range(1,10):
num_dict[tt] = list(map(int, input() .split()))
result = True
# 가로부터 검사하자
for hori in num_dict.values():
if sorted(hori) != [1, 2, 3, 4, 5, 6, 7, 8, 9]:
result = False
#세로 검사
for num in range(9):
verti_check = []
for verti in num_dict.values():
verti_check.append(verti[num])
verti_result = verti_check
if sorted(verti_check) != [1, 2, 3, 4, 5, 6, 7, 8, 9]:
result = False
#블록검사
line_start = 0
line_end = 3
block_list = list(num_dict.values())
for __ in range(3):
turn_block_list = block_list[line_start:line_end]
block_start = 0
block_end = 3
for _ in range(3):
block_check = []
for turn in range(3):
for block in turn_block_list[turn][block_start:block_end]:
block_check.append(block)
block_start += 3
block_end += 3
if sorted(block_check) != [1, 2, 3, 4, 5, 6, 7, 8, 9]:
result = False
line_start += 3
line_end += 3
if result:
print('#{} 1'.format(t))
else:
print('#{} 0'.format(t))
| [
"[email protected]"
] | |
49adb1a0d02abd33be4f5345e463f2839479722a | b2d3bd39b2de8bcc3b0f05f4800c2fabf83d3c6a | /examples/pwr_run/checkpointing/short/max_pwr/job18.py | 8d86f952590173e5884246a93a2efab702b53071 | [
"MIT"
] | permissive | boringlee24/keras_old | 3bf7e3ef455dd4262e41248f13c04c071039270e | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | refs/heads/master | 2021-11-21T03:03:13.656700 | 2021-11-11T21:57:54 | 2021-11-11T21:57:54 | 198,494,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,216 | py | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.007
args_model = 'densenet121'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_max_pwr/' + job_name + '*'
total_epochs = 19
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '121' in args_model:
base_model = DenseNet121(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '169' in args_model:
base_model = DenseNet169(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '201' in args_model:
base_model = DenseNet201(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
model.add(base_model)
#model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_max_pwr/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
# creates an file if job qualified for checkpoint
open('ckpt_qual/' + job_name + '.txt', 'a').close()
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
finish_dict = {}
while True:
if os.path.exists('finish.json'):
try:
os.rename('finish.json', 'finish_lock.json')
break
except Exception:
pass
else:
time.sleep(1)
with open('finish_lock.json', 'r') as fp:
finish_dict = json.load(fp)
finish_dict[job_name] = 1
json_file2 = json.dumps(finish_dict)
with open('finish_lock.json', 'w') as fp:
fp.write(json_file2)
os.rename('finish_lock.json', 'finish.json')
| [
"[email protected]"
] | |
1fc74891fa1324f804b07585e2b154d9b49afdf6 | de681ebfa95a07c04fbb1280bf722847b06ee548 | /migrations/versions/3fd0d7bc25ea_create_tag_model.py | c5297e733152e5145f95a89eca64b85173b984bb | [] | no_license | Dzhoker/flask-lessons | 156957ed29a674df474cfc6b8cdca12adae021d7 | 590e436516dbd8a3a9af4ad33aafbc854088a6aa | refs/heads/master | 2023-03-18T09:44:19.431920 | 2021-02-18T02:52:07 | 2021-02-18T02:52:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py | """create Tag model
Revision ID: 3fd0d7bc25ea
Revises: 1b2fd89e61b5
Create Date: 2021-01-12 07:40:03.728879
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3fd0d7bc25ea'
down_revision = '1b2fd89e61b5'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('tag',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=32), server_default='', nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('tag')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
12180bacf865e009ab34847ca2fc32c7d48d7f9b | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/operations/_ddos_custom_policies_operations.py | 9bdb5268477680f052fb35e51b998db264bd5970 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 19,277 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DdosCustomPoliciesOperations(object):
"""DdosCustomPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Deletes the specified DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
ddos_custom_policy_name=ddos_custom_policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.DdosCustomPolicy"
"""Gets information about the specified DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosCustomPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.DdosCustomPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DdosCustomPolicy"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
parameters, # type: "models.DdosCustomPolicy"
**kwargs # type: Any
):
# type: (...) -> "models.DdosCustomPolicy"
cls = kwargs.pop('cls', None) # type: ClsType["models.DdosCustomPolicy"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DdosCustomPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
parameters, # type: "models.DdosCustomPolicy"
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Creates or updates a DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:param parameters: Parameters supplied to the create or update operation.
:type parameters: ~azure.mgmt.network.v2019_12_01.models.DdosCustomPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DdosCustomPolicy or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_12_01.models.DdosCustomPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.DdosCustomPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
ddos_custom_policy_name=ddos_custom_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
parameters, # type: "models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "models.DdosCustomPolicy"
"""Update a DDoS custom policy tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:param parameters: Parameters supplied to update DDoS custom policy resource tags.
:type parameters: ~azure.mgmt.network.v2019_12_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosCustomPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.DdosCustomPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DdosCustomPolicy"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
| [
"[email protected]"
] | |
b332b14aa6f86a6013de1b87f5c0920504478890 | 4be56098894a95da5964622fc4102b69e4530ab6 | /题库/870.矩阵中的幻方.py | 00fdbac5b41f5978d6246140777b7c174e01c850 | [] | no_license | ACENDER/LeetCode | 7c7c7ecc8d0cc52215272f47ec34638637fae7ac | 3383b09ab1246651b1d7b56ab426a456f56a4ece | refs/heads/master | 2023-03-13T19:19:07.084141 | 2021-03-15T09:29:21 | 2021-03-15T09:29:21 | 299,332,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : 870.矩阵中的幻方.py
| [
"[email protected]"
] | |
1bd246b511bcb25535f008e43dec5d7633a97690 | 2c112f781016f2022dc7ff1c616b1f57185fe8f8 | /tests/conftest.py | 34a2935ba0d6d69229c0b0455e16b60a8fcb1f85 | [] | no_license | dominicgs/Website | c15312a5b081b42db880b99df6811c8c04777824 | fc3587daacff20ec3ab590df121c9f693f09a8ce | refs/heads/master | 2020-03-21T16:49:07.492309 | 2018-06-26T21:41:50 | 2018-06-26T21:41:50 | 64,015,414 | 0 | 1 | null | 2016-07-23T12:33:16 | 2016-07-23T12:33:16 | null | UTF-8 | Python | false | false | 2,265 | py | " PyTest Config. This contains global-level pytest fixtures. "
import os
import os.path
import pytest
import shutil
from models.user import User
from main import create_app, db as db_obj, Mail
from utils import CreateBankAccounts, CreateTickets
@pytest.fixture(scope="module")
def app():
""" Fixture to provide an instance of the app.
This will also create a Flask app_context and tear it down.
This fixture is scoped to the module level to avoid too much
Postgres teardown/creation activity which is slow.
"""
if 'SETTINGS_FILE' not in os.environ:
root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
os.environ['SETTINGS_FILE'] = os.path.join(root, 'config', 'test.cfg')
tmpdir = os.environ.get('TMPDIR', '/tmp')
prometheus_dir = os.path.join(tmpdir, 'emf_test_prometheus')
os.environ['prometheus_multiproc_dir'] = prometheus_dir
if os.path.exists(prometheus_dir):
shutil.rmtree(prometheus_dir)
if not os.path.exists(prometheus_dir):
os.mkdir(prometheus_dir)
app = create_app()
with app.app_context():
try:
db_obj.session.close()
except:
pass
db_obj.drop_all()
db_obj.create_all()
CreateBankAccounts().run()
CreateTickets().run()
yield app
db_obj.session.close()
db_obj.drop_all()
@pytest.fixture
def client(app):
" Yield a test HTTP client for the app "
yield app.test_client()
@pytest.fixture
def db(app):
" Yield the DB object "
yield db_obj
@pytest.fixture
def request_context(app):
" Run the test in an app request context "
with app.test_request_context('/') as c:
yield c
@pytest.fixture
def user(db):
" Yield a test user. Note that this user will be identical across all tests in a module. "
email = '[email protected]'
user = User.query.filter(User.email == email).one_or_none()
if not user:
user = User(email, 'Test User')
db.session.add(user)
db.session.commit()
yield user
@pytest.fixture
def outbox(app):
" Capture mail and yield the outbox. "
mail_obj = Mail()
with mail_obj.record_messages() as outbox:
yield outbox
| [
"[email protected]"
] | |
c80abae38d1dabb5dfaa1cc1b9606faa528421bd | 13b72e5c48f5f7213d9a569f699dc1554bc363dd | /demo/libdemo/list_git__repos.py | 35f238aa8d2d69b1030b7d8cfefa92fded15d932 | [] | no_license | srikanthpragada/PYTHON_02_MAR_2021 | 6997fece4ad627bb767c0bca5a5e166369087e68 | 5dfd0c471378bd22379ac0d66f8785d4d315017b | refs/heads/master | 2023-04-04T20:28:22.738042 | 2021-04-17T14:19:48 | 2021-04-17T14:19:48 | 344,498,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | import requests
user = "srikanthpragada"
response = requests.get(f"https://api.github.com/users/{user}/repos")
if response.status_code != 200:
print(f"Sorry! Could not get details for {user} from github!")
exit()
repos = response.json() # Convert JSON to dict
for repo in repos:
print(repo['name'])
print(repo['description'])
print('-' * 50) | [
"[email protected]"
] | |
e1f046bf1125c305df03d5d353029498f0cbe882 | 56d41bbc6b5d831ba699ad4a44f5880ba3d195c8 | /thread_sync.py | bcd0c1730eb098adb4c5bb1a0e3dc4d595662b6d | [] | no_license | pigliangliang/2018-07-05-08 | 4635e4dc1926f3f17eae7f607a0b188f6aaf9f43 | ba95331f4b0cc0316377a5c67f86d03e8cc257b8 | refs/heads/master | 2020-03-22T08:51:08.209718 | 2018-07-08T07:58:06 | 2018-07-08T07:58:06 | 139,795,833 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | #author_by zhuxiaoliang
#2018-07-05 下午3:39
"""
A. Semaphore(信号量)
在多线程编程中,为了防止不同的线程同时对一个公用的资源(比如全部变量)进行修改,需要进行同时访问的数量(通常是1)的限制。信号量同步基于内部计数器,每调用一次acquire(),计数器减1;每调用一次release(),计数器加1.当计数器为0时,acquire()调用被阻塞。
"""
import time
from random import random
from threading import Thread,Semaphore,enumerate
sema = Semaphore(3)
def foo(tid):
with sema:
print()
| [
"[email protected]"
] | |
6844ce56ffa18f4d971b348110a9f410a1502c7e | a3c662a5eda4e269a8c81c99e229879b946a76f6 | /.venv/lib/python3.7/site-packages/pylint/test/input/func_noerror_yield_assign_py25.py | f40d8d96e837e9022fc2596b23ce8733990a450c | [
"MIT"
] | permissive | ahmadreza-smdi/ms-shop | 0c29da82c58b243507575672bbc94fb6e8068aeb | 65ba3f3061e2ac5c63115b08dadfe7d67f645fb6 | refs/heads/master | 2023-04-27T19:51:34.858182 | 2019-11-24T20:57:59 | 2019-11-24T20:57:59 | 223,616,552 | 6 | 2 | MIT | 2023-04-21T20:51:21 | 2019-11-23T16:09:03 | Python | UTF-8 | Python | false | false | 387 | py | """http://www.logilab.org/ticket/8771"""
from __future__ import print_function
def generator():
"""yield as assignment"""
yield 45
xxxx = yield 123
print(xxxx)
def generator_fp1(seq):
"""W0631 false positive"""
for val in seq:
pass
for val in seq:
yield val
def generator_fp2():
"""E0601 false positive"""
xxxx = 12
yield xxxx
| [
"[email protected]"
] | |
40dbe20a67504c37c5be2abfab99add67569df21 | 585bac463cb1919ac697391ff130bbced73d6307 | /36_ValidSudoku/solution1.py | 91569e66f6dee678a57f5dd30000308804bcd22a | [] | no_license | llgeek/leetcode | ce236cf3d3e3084933a7a4a5e8c7766f7f407285 | 4d340a45fb2e9459d47cbe179ebfa7a82e5f1b8c | refs/heads/master | 2021-01-22T23:44:13.318127 | 2020-03-11T00:59:05 | 2020-03-11T00:59:05 | 85,667,214 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | class Solution:
def isValidSudoku(self, board: List[List[str]]) -> bool:
seen = set()
for i in range(len(board)):
for j in range(len(board[0])):
val = board[i][j]
if val == '.': continue
if str(val) + 'in row ' + str(i) in seen:
return False
seen.add(str(val) + 'in row ' + str(i))
if str(val) + 'in column ' + str(j) in seen:
return False
seen.add(str(val) + 'in column ' + str(j))
if str(val) + 'in grid ' + str(i // 3) + ' ' + str(j // 3) in seen:
return False
seen.add(str(val) + 'in grid ' + str(i // 3) + ' ' + str(j // 3))
return True | [
"[email protected]"
] | |
bf439e9862b4ae08f44e047b1d51ff58c9ae6f67 | c6666d0235d1d03ed9a5a2d1a3cfa9ccc9d9e88c | /webcore/migrations/0001_initial.py | bc167afd1bfb17d3738481c8cc02fc4ac5b3fcf0 | [] | no_license | boiyelove/savingsensei | 67dc8a5690c7599dd126159837af6e567852aa73 | 8acd46ebd770b9e18f64e14ff08bfd2ddbcc0edc | refs/heads/master | 2021-10-20T01:32:10.775234 | 2019-02-25T03:27:31 | 2019-02-25T03:27:31 | 172,426,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,812 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-20 12:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Banner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('desc', models.CharField(max_length=60)),
('btn_link', models.URLField()),
('btn_title', models.CharField(max_length=18)),
('created', models.DateTimeField(auto_now=True)),
('updated', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('subject', models.CharField(max_length=30)),
('content', models.TextField()),
('created', models.DateTimeField(auto_now=True)),
('updated', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Newsletter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('created', models.DateTimeField(auto_now=True)),
('updated', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"[email protected]"
] | |
4b1ecbe8bfc1dfb288e7e30b8ba859c26d6a53c9 | b13ca274b4463c9900840ee6516094b7509b6041 | /empower/lvapp/lvaphandler.py | b5c42204161a1e3cc0f451116cafa63d304a1803 | [
"Apache-2.0"
] | permissive | imec-idlab/sdn_wifi_manager | 09d206f2f649aa715752d3c44e011d3f54faf592 | eda52649f855722fdec1d02e25a28c61a8fbda06 | refs/heads/master | 2021-06-23T08:03:22.482931 | 2020-12-03T11:30:10 | 2020-12-03T11:30:10 | 162,106,793 | 0 | 0 | Apache-2.0 | 2019-03-27T16:23:31 | 2018-12-17T09:33:47 | Python | UTF-8 | Python | false | false | 3,522 | py | #!/usr/bin/env python3
#
# Copyright (c) 2016 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""LVAPs Handerler."""
import tornado.web
import tornado.httpserver
from empower.datatypes.etheraddress import EtherAddress
from empower.restserver.apihandlers import EmpowerAPIHandler
from empower.core.resourcepool import ResourceBlock
from empower.main import RUNTIME
class LVAPHandler(EmpowerAPIHandler):
"""LVAP handler. Used to view LVAPs (controller-wide)."""
HANDLERS = [r"/api/v1/lvaps/?",
r"/api/v1/lvaps/([a-zA-Z0-9:]*)/?"]
def get(self, *args, **kwargs):
""" Get all LVAPs or just the specified one.
Args:
lvap_id: the lvap address
Example URLs:
GET /api/v1/lvaps
GET /api/v1/lvaps/11:22:33:44:55:66
"""
try:
if len(args) > 1:
raise ValueError("Invalid URL")
if not args:
self.write_as_json(RUNTIME.lvaps.values())
else:
lvap = EtherAddress(args[0])
self.write_as_json(RUNTIME.lvaps[lvap])
except KeyError as ex:
self.send_error(404, message=ex)
except ValueError as ex:
self.send_error(400, message=ex)
self.set_status(200, None)
def put(self, *args, **kwargs):
""" Set the WTP for a given LVAP, effectivelly hands-over the LVAP to
another WTP
Args:
lvap_id: the lvap address
Request:
version: the protocol version (1.0)
Example URLs:
PUT /api/v1/lvaps/11:22:33:44:55:66
"""
try:
if len(args) != 1:
raise ValueError("Invalid URL")
request = tornado.escape.json_decode(self.request.body)
if "version" not in request:
raise ValueError("missing version element")
lvap_addr = EtherAddress(args[0])
lvap = RUNTIME.lvaps[lvap_addr]
if "wtp" in request:
wtp_addr = EtherAddress(request['wtp'])
wtp = RUNTIME.wtps[wtp_addr]
lvap.wtp = wtp
elif "blocks" in request:
pool = []
for block in request["blocks"]:
wtp_addr = EtherAddress(block['wtp'])
wtp = RUNTIME.wtps[wtp_addr]
hwaddr = EtherAddress(block['hwaddr'])
channel = int(block['channel'])
band = int(block['band'])
r_block = ResourceBlock(wtp, hwaddr, channel, band)
pool.append(r_block)
lvap.blocks = pool
if "encap" in request:
encap = EtherAddress(request["encap"])
lvap.encap = encap
except KeyError as ex:
self.send_error(404, message=ex)
except ValueError as ex:
self.send_error(400, message=ex)
self.set_status(204, None)
| [
"[email protected]"
] | |
1e273a85868f0f6b461bfd41551779c6a908e717 | eab72229ae04d1160704cbf90a08a582802a739c | /pipeline.py | 951739aed5ac7ad0818e105dbff2397a48108344 | [
"MIT"
] | permissive | megatazm/Crowd-Counting | 444d39b0e3d6e98995f53badf4c073829038b6b7 | 647a055baccee2c3b6b780f38930e2ffd14d1664 | refs/heads/master | 2022-04-01T04:49:16.409675 | 2020-01-31T21:24:02 | 2020-01-31T21:24:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | import os
# Crop area
#os.system("python3 crop.py")
## APPROACH 1 MCNN
os.system("python3 put_zero_image.py")
os.system("python3 test.py")
os.system("python3 put_zero_den.py")
os.system("python3 find_people.py")
os.system("python3 position.py")
## APPROACH 2 - RNN
#os.system("python3 tiny_face_eval.py --weight_file_path weight --prob_thresh 0.04 --nms_thresh 0.0")
## TRACKING
# Put heads into file
#os.system("python3 get_heads.py")
# Track heads among videos
#os.system("python3 track_video.py")
| [
"[email protected]"
] | |
934e6966fbd17ae8a420204911909a52151bbaf6 | 8d5f49fa1fda8ffc473e7f5a62786c77838a5820 | /website/load_tests/drawquest/test_scripts/utils.py | e305eef730b14c15bd7911f0cf1ade88885204ff | [
"BSD-3-Clause"
] | permissive | MichaelBechHansen/drawquest-web | dfc6f5d9541860a5df23db678e82564a230bd42e | 8d8f9149b6efeb65202809a5f8916386f58a1b3b | refs/heads/master | 2021-01-14T10:30:10.861222 | 2015-11-10T03:13:42 | 2015-11-10T03:13:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,724 | py | import json
import uuid
import requests
PASSWORD = 'testpassword'
#QUEST_ID = 658
#QUEST_ID = 926 #staging smilie
QUEST_ID = 7004
PLAYBACK_DATA = ''
TEST_USERNAME = 'test_account__'
TEST_PASSWORD = 'testaccount'
class ApiError(Exception):
pass
class HttpError(Exception):
pass
class ApiConsumer(object):
def __init__(self):
self.session_id = None
def call(self, endpoint, params={}):
payload = json.dumps(params)
headers = {
'content-type': 'application/json',
}
if self.session_id:
headers['X-SESSIONID'] = self.session_id
ret = requests.post('http://api.staging.example.com/' + endpoint, data=payload, headers=headers)
if ret.status_code != 200:
raise HttpError(ret.status_code)
if not ret.json.get('success'):
raise ApiError(ret.json)
return ret.json
def signup(self, username=None):
if not username:
username = '_TEST_' + str(uuid.uuid4())[-10:].replace('-', '_')
ret = self.call('auth/signup', {
'username': username,
'email': '{}@example.example'.format(username),
'password': PASSWORD,
})
self.session_id = ret['sessionid']
def heavy_state_sync(self):
return self.call('heavy_state_sync')
def onboarding_quest(self):
return self.call('quests/onboarding')
def quest_comments(self, quest_id):
return self.call('quests/comments', {'quest_id': quest_id})
class DrawquestTransaction(object):
def __init__(self):
self.custom_timers = {}
def main(trans_cls):
trans = trans_cls()
trans.run()
print trans.custom_timers
| [
"[email protected]"
] | |
5dbd16bad92c13444eb77d53b650fba51d099460 | 7f8cebd9315129bcdb7ef220dc449cda26a19ce4 | /models/aetanh.py | bcff65d94ee5b2f960314125e4beb4f15db6e754 | [] | no_license | KaiqianZhang/dpcca_v8 | 75477b1768905b6c41838c8da9ff77fba13b5a45 | 1b65fc0c3ec6b182907ba070e859c1d92fc98942 | refs/heads/master | 2020-08-30T09:32:58.485684 | 2019-11-11T17:34:55 | 2019-11-11T17:34:55 | 218,334,012 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,639 | py | """=============================================================================
Autoencoder.
============================================================================="""
import numpy as np
from torch import nn
# ------------------------------------------------------------------------------
class AETanH(nn.Module):
def __name__(self):
return 'AE'
# ------------------------------------------------------------------------------
def __init__(self, cfg):
super(AETanH, self).__init__()
assert cfg.GENE_EMBED_DIM < 12
self.nc = cfg.N_CHANNELS
self.w = cfg.IMG_SIZE
self.input_dim = cfg.N_GENES
self.encoder = nn.Sequential(
nn.Linear(self.input_dim, 128),
nn.Tanh(),
nn.Linear(128, 64),
nn.Tanh(),
nn.Linear(64, cfg.GENE_EMBED_DIM)
)
self.decoder = nn.Sequential(
nn.Linear(cfg.GENE_EMBED_DIM, 64),
nn.Tanh(),
nn.Linear(64, 128),
nn.Tanh(),
nn.Linear(128, self.input_dim)
)
# ------------------------------------------------------------------------------
def encode(self, x):
x = x.view(-1, np.prod(x.shape[1:]))
return self.encoder(x)
# ------------------------------------------------------------------------------
def decode(self, z):
x = self.decoder(z)
return x.view(-1, self.input_dim)
# ------------------------------------------------------------------------------
def forward(self, x):
x = self.encode(x)
x = self.decode(x)
return x
| [
"[email protected]"
] | |
5824f026706f22fed9333ce3b0f3cdc2674fb5cf | afb7d4d6013b6a9022d707d5835a3dd578214b2e | /Bite_172.py | d38f7db655c51e87afd6b54e249df6347f9a2efa | [] | no_license | JB0925/Bites | 86f0bd49d8b53376257c14df280ae0a9643139a2 | f884ce4ffd7ce39afcea5b86a80cec14c607a4f0 | refs/heads/master | 2023-03-29T21:48:42.849729 | 2021-03-29T01:37:48 | 2021-03-29T01:37:48 | 316,419,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | from functools import partial
# create 2 partials:
# - 'rounder_int' rounds to int (0 places)
# - 'rounder_detailed' rounds to 4 places
rounder_int = 0
rounder_detailed = 0
def round_to_int(num, places):
return round(num, places)
rounder_int = partial(round_to_int, places=0)
rounder_detailed = partial(round_to_int, places=4)
print(rounder_detailed(10.4232567))
| [
"[email protected]"
] | |
6bb7901debec9f9ddd547ba4fb9d52462ca74c58 | fa45fe7eaba7ef7c27ecf95db7c460ca189ce0d4 | /everydays/BookBeingRead/python高级编程/day12.1.py | 0d2033516213b11dfa91ea44119d6e37e17ceb4c | [] | no_license | jake20001/Hello | be1a2bb5331f2ad4c1d8f30c6a9a530aff79e605 | 08217871bb17152eb09e68cd154937ebe5d59d2c | refs/heads/master | 2021-07-10T09:48:15.883716 | 2021-04-23T14:49:03 | 2021-04-23T14:49:03 | 56,282,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | # -*- coding:utf-8 -*-
# -------------------------------
# ProjectName : autoDemo
# Author : zhangjk
# CreateTime : 2020/12/18 11:05
# FileName : day12.1
# Description :
# --------------------------------
import os
class DublinCoreAdapter(object):
def __init__(self,filename):
self._filename = filename
def title(self):
return os.path.splitext(self._filename)[0]
def creater(self):
return "Someone"
def language(self):
return ('en',)
class DublinCoreInfo(object):
def summary(self,dc_ob):
print('Title %s'%dc_ob.title())
print('Create %s'%dc_ob.creater())
print('Languge %s'%','.join(dc_ob.language()))
adapter = DublinCoreAdapter('1.txt')
infos = DublinCoreInfo()
infos.summary(adapter)
| [
"[email protected]"
] | |
12036ced2dc9a7de9f3d4d79fc1ad4e7fbcbe6cd | 8fef8af953e8dafde78c671e8ee9813d08ab2d60 | /trees/BST/LowestCommAncestor.py | f1e104c3496e69731c8a6af520b879abc8aa4736 | [
"MIT"
] | permissive | htrahddis-hub/DSA-Together-HacktoberFest | 037b009c744863070e0f1b61167c18f9101335f2 | a5c6165c449c5b5b91e56815f2a38d5fd23bf354 | refs/heads/main | 2023-08-23T18:52:55.654386 | 2021-10-17T15:45:14 | 2021-10-17T15:45:14 | 418,180,825 | 1 | 0 | MIT | 2021-10-17T15:56:21 | 2021-10-17T15:56:21 | null | UTF-8 | Python | false | false | 2,808 | py | # Link to the problem :https://practice.geeksforgeeks.org/problems/lowest-common-ancestor-in-a-bst/1#
#Function to find the lowest common ancestor in a BST.
# We are looking for a node which is closest to both the nodes
def LCA(root, n1, n2):
#code here.
while(root):
# If the root is greater than both nodes , then we are looking for something smaller , so go to left
if(root.data > n1 and root.data > n2):
root = root.left
# If the root is smaller than both nodes , then we are looking for something greater than this and go to right
elif(root.data < n1 and root.data < n2):
root = root.right
#If the root is not greater or smaller then we have found something closest to both the nodes , so returns the root
else:
break
return root
#{
# Driver Code Starts
#Initial Template for Python 3
from collections import deque
# Tree Node
class Node:
def __init__(self, val):
self.right = None
self.data = val
self.left = None
# Function to Build Tree
def buildTree(s):
#Corner Case
if(len(s)==0 or s[0]=="N"):
return None
# Creating list of strings from input
# string after spliting by space
ip=list(map(str,s.split()))
# Create the root of the tree
root=Node(int(ip[0]))
size=0
q=deque()
# Push the root to the queue
q.append(root)
size=size+1
# Starting from the second element
i=1
while(size>0 and i<len(ip)):
# Get and remove the front of the queue
currNode=q[0]
q.popleft()
size=size-1
# Get the current node's value from the string
currVal=ip[i]
# If the left child is not null
if(currVal!="N"):
# Create the left child for the current node
currNode.left=Node(int(currVal))
# Push it to the queue
q.append(currNode.left)
size=size+1
# For the right child
i=i+1
if(i>=len(ip)):
break
currVal=ip[i]
# If the right child is not null
if(currVal!="N"):
# Create the right child for the current node
currNode.right=Node(int(currVal))
# Push it to the queue
q.append(currNode.right)
size=size+1
i=i+1
return root
if __name__=="__main__":
t=int(input())
for _ in range(0,t):
s=input()
root=buildTree(s)
n1,n2=list(map(int,input().split()))
print(LCA(root,n1,n2).data);
# } Driver Code Ends | [
"[email protected]"
] | |
1eba6ca236ff4f6105330a8c2c4442d3537a21a8 | 00b762e37ecef30ed04698033f719f04be9c5545 | /scripts/test_results/scikit-learn_test_results/conflicts/52_bench_sgd_covertype_actual.py | f88cf076b4bf0da384e6c9ba249ccf1ec8f143b1 | [] | no_license | kenji-nicholson/smerge | 4f9af17e2e516333b041727b77b8330e3255b7c2 | 3da9ebfdee02f9b4c882af1f26fe2e15d037271b | refs/heads/master | 2020-07-22T02:32:03.579003 | 2018-06-08T00:40:53 | 2018-06-08T00:40:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,491 | py | """
================================
Covertype dataset with dense SGD
================================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset of
Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is low-
dimensional with 54 features and a sparsity of approx. 23%. Here, we consider
the task of predicting class 1 (spruce/fir). The classification performance of
SGD is competitive with Liblinear while being two orders of magnitude faster to
train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
Liblinear 11.8977s 0.0285s 0.2305
GaussianNB 3.5931s 0.6645s 0.6367
SGD 0.2924s 0.0114s 0.2300
CART 39.9829s 0.0345s 0.0476
RandomForest 794.6232s 1.0526s 0.0249
Extra-Trees 1401.7051s 1.1181s 0.0230
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
To run this example use your favorite python shell::
% ipython benchmark/bench_sgd_covertype.py
"""
from __future__ import division
print __doc__
# Author: Peter Prettenhoer <[email protected]>
# License: BSD Style.
# $Id$
from time import time
import os
import numpy as np
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble.gradient_boosting import GradientBoostingClassifier
from sklearn import metrics
######################################################################
## Download the data, if not already on disk
if not os.path.exists('covtype.data.gz'):
# Download the data
import urllib
print "Downloading data, Please Wait (11MB)..."
opener = urllib.urlopen(
'http://archive.ics.uci.edu/ml/'
'machine-learning-databases/covtype/covtype.data.gz')
open('covtype.data.gz', 'wb').write(opener.read())
######################################################################
## Load dataset
print("Loading dataset...")
import gzip
f = gzip.open('covtype.data.gz')
X = np.fromstring(f.read().replace(",", " "), dtype=np.float64, sep=" ",
count=-1)
X = X.reshape((581012, 55))
f.close()
# class 1 vs. all others.
y = np.ones(X.shape[0]) * -1
y[np.where(X[:, -1] == 1)] = 1
X = X[:, :-1]
######################################################################
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
train_idx = idx[:522911]
test_idx = idx[522911:]
X_train = X[train_idx]
y_train = y[train_idx]
X_test = X[test_idx]
y_test = y[test_idx]
# free memory
del X
del y
######################################################################
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
######################################################################
## Print dataset statistics
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25),
X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25),
np.unique(y_train).shape[0]))
print("%s %d (%d, %d)" % ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == -1)))
print("%s %d (%d, %d)" % ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == -1)))
print("")
print("Training classifiers...")
print("")
######################################################################
## Benchmark classifiers
def benchmark(clf):
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
err = metrics.zero_one(y_test, pred) / float(pred.shape[0])
return err, train_time, test_time
######################################################################
## Train Liblinear model
liblinear_parameters = {
'loss': 'l2',
'penalty': 'l2',
'C': 1000,
'dual': False,
'tol': 1e-3,
}
liblinear_res = benchmark(LinearSVC(**liblinear_parameters))
liblinear_err, liblinear_train_time, liblinear_test_time = liblinear_res
######################################################################
## Train GaussianNB model
gnb_err, gnb_train_time, gnb_test_time = benchmark(GaussianNB())
######################################################################
## Train SGD model
sgd_parameters = {
'alpha': 0.001,
'n_iter': 2,
}
sgd_err, sgd_train_time, sgd_test_time = benchmark(SGDClassifier(
**sgd_parameters))
## Train CART model
<<<<<<< REMOTE
cart_err, cart_train_time, cart_test_time = benchmark(
DecisionTreeClassifier(min_split=5,
max_depth=None))
=======
## print("Training GB model")
>>>>>>> LOCAL
<<<<<<< REMOTE
=======
## gb_err, gb_train_time, gb_test_time = benchmark(
>>>>>>> LOCAL
<<<<<<< REMOTE
######################################################################
=======
## GradientBoostingClassifier(min_split=5, max_depth=10, n_iter=20,
>>>>>>> LOCAL
<<<<<<< REMOTE
## Train RandomForest model
=======
## learn_rate=.8, subsample=0.5))
>>>>>>> LOCAL
<<<<<<< REMOTE
print("")
=======
>>>>>>> LOCAL
## print_row("GB", gb_train_time, gb_test_time, gb_err)
######################################################################
## Print classification performance
print_row("RandomForest", rf_train_time, rf_test_time, rf_err)
print_row("Extra-Trees", et_train_time, et_test_time, et_err)
print("Classification performance:")
print("===========================")
print("")
def print_row(clf_type, train_time, test_time, err):
print("%s %s %s %s" % (clf_type.ljust(12),
("%.4fs" % train_time).center(10),
("%.4fs" % test_time).center(10),
("%.4f" % err).center(10)))
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"error-rate"))
print("-" * 44)
print_row("Liblinear", liblinear_train_time, liblinear_test_time,
liblinear_err)
print_row("GaussianNB", gnb_train_time, gnb_test_time, gnb_err)
print_row("SGD", sgd_train_time, sgd_test_time, sgd_err)
print_row("CART", cart_train_time, cart_test_time, cart_err)
print("")
print("")
| [
"[email protected]"
] | |
516ddce9995ee16a9c3d14b282864b36283da25f | 0805420ce1890c36aa9e0cc1a782945464433ef6 | /client/eve/common/lib/eveLocalization/__init__.py | a26d9acf4f873ae1332caf2913e0b18ee75e8119 | [] | no_license | cnrat/dec-eve-serenity | 4ebc3b2ab8faa6e6714dbb72b7ebcf92c4b2d75c | 37519e66a5fbb0d7c417d5cf9778636991efbed8 | refs/heads/master | 2021-01-21T03:39:48.969227 | 2016-08-10T05:25:07 | 2016-08-10T05:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\common\lib\eveLocalization\__init__.py
from _evelocalization import * | [
"[email protected]"
] | |
9107cd52b4f5cb29c06fa7c3b10e07dbb89fe3a2 | e230e3c1d6935d36b7074390f096d782cabd75af | /dailyfresh/settings.py | 520e1cbe63fe0018a6d3e7702bc98f883808c38e | [] | no_license | PeterZhangxing/dailyfresh_ori | 603e7e42457d27ffefb6a4601f9b6826a3a55a6f | 19b6d667d6f49a528aeb6f4430e2537c933936f0 | refs/heads/master | 2020-12-02T01:41:32.160278 | 2019-12-30T04:24:50 | 2019-12-30T04:24:50 | 230,846,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,607 | py | """
Django settings for dailyfresh project.
Generated by 'django-admin startproject' using Django 2.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,os.path.join(BASE_DIR,'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h2)2bq3(3=-9a#8m$t-ci9t91o*tr%xs%@3g2^e-4^)i$(335l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tinymce', # 富文本编辑器
'haystack', # 注册全文检索框架
'user', # 用户模块
'goods', # 商品模块
'cart', # 购物车模块
'order', # 订单模块
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dailyfresh.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dailyfresh.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'dailyfresh',
'USER': 'zx2005',
'PASSWORD': 'redhat',
'HOST': '10.1.1.128',
'PORT':3306,
}
}
# 告诉django其自带的认证系统,使用哪个模型类
AUTH_USER_MODEL='user.User'
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'zh-hans' # 本地化
TIME_ZONE = 'Asia/Shanghai' # 本地化
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,'static'),
]
# 富文本编辑器配置
TINYMCE_DEFAULT_CONFIG = {
'theme': 'advance',
'width': 600,
'height': 400,
}
# 发送邮件配置
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# smpt服务地址
EMAIL_HOST = 'smtp.qq.com'
EMAIL_PORT = 25
# 发送邮件的邮箱
EMAIL_HOST_USER = '[email protected]'
# 在邮箱中设置的客户端授权密码
EMAIL_HOST_PASSWORD = 'cdbnlajjhfctbjhb'
# 收件人看到的发件人
EMAIL_FROM = '天天吃屎<[email protected]>'
# Django的缓存配置
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://10.1.1.128:6379/9",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
# 配置session存储在缓存中,就是上面的缓存
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "default"
# 配置django系统自带认证失败后,默认跳转的地址
LOGIN_URL='/user/login'
# 设置Django的文件存储类
DEFAULT_FILE_STORAGE='utils.fdfs.storage.FdfsStorage'
# 设置fdfs使用的client.conf文件路径
FDFS_CLIENT_CONF='./utils/fdfs/client.conf'
# 设置fdfs存储服务器上nginx的IP和端口号
FDFS_URL='http://10.1.1.128:8888/'
# 全文检索框架的配置
HAYSTACK_CONNECTIONS = {
'default': {
# 使用whoosh引擎
# 'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'ENGINE': 'haystack.backends.whoosh_cn_backend.WhooshEngine',
# 索引文件路径
'PATH': os.path.join(BASE_DIR, 'whoosh_index'),
}
}
# 当添加、修改、删除数据时,自动生成索引
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
# 指定搜索结果每页显示的条数
HAYSTACK_SEARCH_RESULTS_PER_PAGE=1 | [
"[email protected]"
] | |
c564381b8a3786274c292ddc6a57ed24ad1e6895 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03681/s311284212.py | d8b25fb6e7dd6d5384882806daa8e1c440d5c178 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | n, m = map(int, input().split())
mod = 10 ** 9 + 7
if abs(n - m) >= 2:
print(0)
else:
res = 1
for i in range(1, n+1):
res = res * i % mod
for i in range(1, m+1):
res = res * i % mod
if abs(n - m) == 1:
print(res)
else:
print(res * 2 % mod) | [
"[email protected]"
] | |
fbc9661d70e561d78342cfa587b4a738aa00e9e6 | c85ec43e50f81f8e20c883eae9e06a5c8c621f8e | /caldera/utils/__init__.py | b2967128d628a732cece629c50a123db23a166f8 | [
"MIT"
] | permissive | jvrana/caldera | b6cc0faed560df6bfa15a3f460fed4ea18b8a55a | a346324e77f20739e00a82f97530dda4906f59dd | refs/heads/master | 2023-04-27T04:19:05.499430 | 2021-03-09T16:37:50 | 2021-03-09T16:37:50 | 266,161,720 | 0 | 0 | MIT | 2020-08-12T01:40:48 | 2020-05-22T16:49:35 | Jupyter Notebook | UTF-8 | Python | false | false | 1,795 | py | r"""
Caldera utility functions.
.. autosummary::
:toctree: generated/
dict_join
# pairwise
Indexing
--------
.. autosummary::
:toctree: generated/
reindex_tensor
unravel_index
Tensor
------
Utilities for :class:`torch.Tensor`
.. autosummary::
:toctree: generated/
scatter_coo
scatter_indices
torch_coo_to_scipy_coo
deterministic_seed
long_isin
same_storage
stable_arg_sort_long
tensor_is_empty
torch_scatter_group
Functional
----------
Functional programming module.
.. autosummary::
:toctree: generated/
:recursive:
functional
Networkx Utilities
------------------
Extra :mod:`networkx` utilities
.. autosummary::
:toctree: generated/
:recursive:
"""
from ._dict_join import dict_join
from ._iteration import _first
from ._iteration import pairwise
from caldera.utils.indexing import reindex_tensor
from caldera.utils.indexing import unravel_index
from caldera.utils.np import replace_nan_with_inf
from caldera.utils.sparse import scatter_coo
from caldera.utils.sparse import scatter_indices
from caldera.utils.sparse import torch_coo_to_scipy_coo
from caldera.utils.tensor import deterministic_seed
from caldera.utils.tensor import long_isin
from caldera.utils.tensor import same_storage
from caldera.utils.tensor import stable_arg_sort_long
from caldera.utils.tensor import tensor_is_empty
from caldera.utils.tensor import torch_scatter_group
__all__ = [
"reindex_tensor",
"unravel_index",
"scatter_coo",
"scatter_indices",
"torch_coo_to_scipy_coo",
"deterministic_seed",
"long_isin",
"same_storage",
"stable_arg_sort_long",
"tensor_is_empty",
"torch_scatter_group",
"dict_join",
"pairwise",
"_first",
"replace_nan_with_inf",
]
| [
"[email protected]"
] | |
db704e6bc73086f4b513638afc26cfca69671862 | 4618c0152d45bcb5f54e298661a1479c643353f4 | /pyengine/api/v1/GetProductDetail.py | d47126c2ec1ccde23882d215ccd30c526680ccaf | [] | no_license | pyengine/catalog | 07312fb7606f6ff0b7e55359740af4a4e5d509f4 | 2403563c1f93d988466a12b870ce25475b0d1d92 | refs/heads/master | 2021-01-10T04:47:24.200088 | 2016-04-13T15:04:47 | 2016-04-13T15:04:47 | 55,772,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | from pyengine.lib.error import *
from pyengine.lib.command import Command
class GetProductDetail(Command):
# Request Parameter Info
req_params = {
'uuid': ('r', 'str'),
}
def __init__(self, api_request):
super(self.__class__, self).__init__(api_request)
def execute(self):
mgr = self.locator.getManager('ProductManager')
info = mgr.getProductDetail(self.params)
return info.result()
| [
"[email protected]"
] | |
1f8eb3d5d29c1b02e07895acc3f612ee67858941 | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/slxos/v17s_1_02/routing_system/interface/loopback/ipv6/ipv6_config/address/ipv6_address/__init__.py | 0eecc2d4add5bb3dd32255bc3b601767a6121cd0 | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,516 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class ipv6_address(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-common-def - based on the path /routing-system/interface/loopback/ipv6/ipv6-config/address/ipv6-address. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__address','__eui64','__anycast',)
_yang_name = 'ipv6-address'
_rest_name = 'ipv6-address'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__eui64 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="eui64", rest_name="eui-64", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address with an automatically computed EUI-64 interface Id', u'alt-name': u'eui-64'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
self.__anycast = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="anycast", rest_name="anycast", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address as anycast'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
self.__address = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="address", rest_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'A:B::C:D/LEN;; IPv6 prefix format: xxxx:xxxx/ml, xxxx:xxxx::/ml, xxxx::xx/128'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='union', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'routing-system', u'interface', u'loopback', u'ipv6', u'ipv6-config', u'address', u'ipv6-address']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Loopback', u'ipv6', u'address', u'ipv6-address']
def _get_address(self):
"""
Getter method for address, mapped from YANG variable /routing_system/interface/loopback/ipv6/ipv6_config/address/ipv6_address/address (union)
"""
return self.__address
def _set_address(self, v, load=False):
"""
Setter method for address, mapped from YANG variable /routing_system/interface/loopback/ipv6/ipv6_config/address/ipv6_address/address (union)
If this variable is read-only (config: false) in the
source YANG file, then _set_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_address() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="address", rest_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'A:B::C:D/LEN;; IPv6 prefix format: xxxx:xxxx/ml, xxxx:xxxx::/ml, xxxx::xx/128'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='union', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """address must be of a type compatible with union""",
'defined-type': "brocade-ipv6-config:union",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="address", rest_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'A:B::C:D/LEN;; IPv6 prefix format: xxxx:xxxx/ml, xxxx:xxxx::/ml, xxxx::xx/128'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='union', is_config=True)""",
})
self.__address = t
if hasattr(self, '_set'):
self._set()
def _unset_address(self):
self.__address = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="address", rest_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'A:B::C:D/LEN;; IPv6 prefix format: xxxx:xxxx/ml, xxxx:xxxx::/ml, xxxx::xx/128'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='union', is_config=True)
def _get_eui64(self):
"""
Getter method for eui64, mapped from YANG variable /routing_system/interface/loopback/ipv6/ipv6_config/address/ipv6_address/eui64 (empty)
"""
return self.__eui64
def _set_eui64(self, v, load=False):
"""
Setter method for eui64, mapped from YANG variable /routing_system/interface/loopback/ipv6/ipv6_config/address/ipv6_address/eui64 (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_eui64 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_eui64() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="eui64", rest_name="eui-64", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address with an automatically computed EUI-64 interface Id', u'alt-name': u'eui-64'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """eui64 must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="eui64", rest_name="eui-64", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address with an automatically computed EUI-64 interface Id', u'alt-name': u'eui-64'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)""",
})
self.__eui64 = t
if hasattr(self, '_set'):
self._set()
def _unset_eui64(self):
self.__eui64 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="eui64", rest_name="eui-64", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address with an automatically computed EUI-64 interface Id', u'alt-name': u'eui-64'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
def _get_anycast(self):
"""
Getter method for anycast, mapped from YANG variable /routing_system/interface/loopback/ipv6/ipv6_config/address/ipv6_address/anycast (empty)
"""
return self.__anycast
def _set_anycast(self, v, load=False):
"""
Setter method for anycast, mapped from YANG variable /routing_system/interface/loopback/ipv6/ipv6_config/address/ipv6_address/anycast (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_anycast is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_anycast() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="anycast", rest_name="anycast", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address as anycast'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """anycast must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="anycast", rest_name="anycast", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address as anycast'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)""",
})
self.__anycast = t
if hasattr(self, '_set'):
self._set()
def _unset_anycast(self):
self.__anycast = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="anycast", rest_name="anycast", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address as anycast'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
address = __builtin__.property(_get_address, _set_address)
eui64 = __builtin__.property(_get_eui64, _set_eui64)
anycast = __builtin__.property(_get_anycast, _set_anycast)
_pyangbind_elements = {'address': address, 'eui64': eui64, 'anycast': anycast, }
| [
"[email protected]"
] | |
f6d5e74ffb4eeffb6b0b056154516157ac157cef | 1edf3c5a83c3d9d1fe63a38a67db667c89ee0642 | /botTester/unpredictable.py | 0d9e45bcfb074f7611b9ecea581d00d73c3ce08a | [] | no_license | roctbb/GoTo | e0ebbb9a70cd4c123a43d980299da043bc8537c2 | 6ba9cca18e106acc2e6d441dd98b597e397ec211 | refs/heads/master | 2020-05-21T20:13:52.486735 | 2018-11-18T09:38:35 | 2018-11-18T09:38:35 | 61,876,224 | 3 | 4 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | __author__ = 'roctbb'
from random import choice
def step(history):
return choice(["камень", "ножницы","бумага"]) | [
"[email protected]"
] | |
abf7eb515ae21d5ef3f410269569113c07252f57 | 22b3e3b9d5137575f4a9c8b70703ffaecfe9a5a8 | /gauss1.py | 34384098fbae7656a4f61a8bb77a8c6f8855db6d | [] | no_license | abdcelikkanat/expemb | d2cee75fa5b533a294a3da2349cef326c627fc2e | e5180e9bceceba507cf4d6438541ea6d6ca541ab | refs/heads/master | 2020-03-22T09:58:20.236304 | 2018-07-18T15:53:14 | 2018-07-18T15:53:14 | 139,872,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,350 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic word2vec example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import sys
import argparse
import random
from tempfile import gettempdir
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
# Give a folder path as an argument with '--log_dir' to save
# TensorBoard summaries. Default is a log folder in current directory.
current_path = os.path.dirname(os.path.realpath(sys.argv[0]))
parser = argparse.ArgumentParser()
parser.add_argument(
'--log_dir',
type=str,
default=os.path.join(current_path, 'log'),
help='The log directory for TensorBoard summaries.')
FLAGS, unparsed = parser.parse_known_args()
# Create the directory for TensorBoard variables if there is not.
if not os.path.exists(FLAGS.log_dir):
os.makedirs(FLAGS.log_dir)
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
# pylint: disable=redefined-outer-name
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
local_filename = os.path.join(gettempdir(), filename)
if not os.path.exists(local_filename):
local_filename, _ = urllib.request.urlretrieve(url + filename, local_filename)
statinfo = os.stat(local_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception('Failed to verify ' + local_filename + ' Can you get to it with a browser?')
return local_filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]] # unknown word
count.extend(collections.Counter(words).most_common(n_words - 1)) # get the most common words
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary) # label each word with a number
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0: # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
# Filling 4 global variables:
# data - list of codes (integers from 0 to vocabulary_size-1).
# This is the original text but words are replaced by their codes
# count - map of words(strings) to count of occurrences
# dictionary - map of words(strings) to their codes(integers)
# reverse_dictionary - maps codes(integers) to words(strings)
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary, vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span) # pylint: disable=redefined-builtin
if data_index + span > len(data):
data_index = 0
buffer.extend(data[data_index:data_index + span])
data_index += span
for i in range(batch_size // num_skips):
context_words = [w for w in range(span) if w != skip_window]
words_to_use = random.sample(context_words, num_skips)
for j, context_word in enumerate(words_to_use):
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[context_word]
if data_index == len(data):
buffer.extend(data[0:span])
data_index = span
else:
buffer.append(data[data_index])
data_index += 1
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]], '->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
num_sampled = 64 # Number of negative examples to sample.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent. These 3 variables are used only for
# displaying model accuracy, they don't affect calculation.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
graph = tf.Graph()
with graph.as_default():
# Input data.
with tf.name_scope('inputs'):
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
with tf.name_scope('embeddings'):
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
with tf.name_scope('weights'):
nce_weights = tf.Variable(
tf.truncated_normal(
[vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
with tf.name_scope('biases'):
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
# Explanation of the meaning of NCE loss:
# http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/
with tf.name_scope('loss'):
loss = tf.reduce_mean(
tf.nn.nce_loss(
weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Add the loss value as a scalar to summary.
tf.summary.scalar('loss', loss)
# Construct the SGD optimizer using a learning rate of 1.0.
with tf.name_scope('optimizer'):
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)
# Merge all summaries.
merged = tf.summary.merge_all()
# Add variable initializer.
init = tf.global_variables_initializer()
# Create a saver.
saver = tf.train.Saver()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# Open a writer to write summaries.
writer = tf.summary.FileWriter(FLAGS.log_dir, session.graph)
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(batch_size, num_skips,
skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# Define metadata variable.
run_metadata = tf.RunMetadata()
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
# Also, evaluate the merged op to get all summaries from the returned "summary" variable.
# Feed metadata variable to session for visualizing the graph in TensorBoard.
_, summary, loss_val = session.run(
[optimizer, merged, loss],
feed_dict=feed_dict,
run_metadata=run_metadata)
average_loss += loss_val
# Add returned summaries to writer in each step.
writer.add_summary(summary, step)
# Add metadata to visualize the graph for the last run.
if step == (num_steps - 1):
writer.add_run_metadata(run_metadata, 'step%d' % step)
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Write corresponding labels for the embeddings.
with open(FLAGS.log_dir + '/metadata.tsv', 'w') as f:
for i in xrange(vocabulary_size):
f.write(reverse_dictionary[i] + '\n')
# Save the model for checkpoints.
saver.save(session, os.path.join(FLAGS.log_dir, 'model.ckpt'))
# Create a configuration for visualizing embeddings with the labels in TensorBoard.
config = projector.ProjectorConfig()
embedding_conf = config.embeddings.add()
embedding_conf.tensor_name = embeddings.name
embedding_conf.metadata_path = os.path.join(FLAGS.log_dir, 'metadata.tsv')
projector.visualize_embeddings(writer, config)
writer.close()
# Step 6: Visualize the embeddings.
# pylint: disable=missing-docstring
# Function to draw visualization of distance between embeddings.
def plot_with_labels(low_dim_embs, labels, filename):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(
label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels, os.path.join(gettempdir(), 'tsne.png'))
except ImportError as ex:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
print(ex)
| [
"[email protected]"
] | |
0f40308406e38359eb00bd87c471b5f1ff5f6778 | 0fba89a7703d883231decbb5b748d4df22832e6a | /recipe_scrapers/_decorators.py | 87fb968ea2b97ba5373f8906f7b9acf4f19879ef | [
"MIT"
] | permissive | tobiaghiraldini/recipe-scrapers | c66f1fb448f6e696677ec95d43a595be8470e890 | 1ced80d25dcc6e88877c26187990f112f3134e67 | refs/heads/master | 2022-07-04T20:31:07.114353 | 2020-05-20T10:42:26 | 2020-05-20T10:42:26 | 262,996,294 | 0 | 0 | MIT | 2020-05-11T09:23:45 | 2020-05-11T09:23:45 | null | UTF-8 | Python | false | false | 1,895 | py | import functools
from language_tags import tags
from ._schemaorg import SchemaOrgException
class Decorators:
@staticmethod
def schema_org_priority(decorated):
"""
Use SchemaOrg parser with priority (if there's data in it)
On exception raised - continue by default.
If there's no data (no schema implemented on the site) - continue by default
"""
@functools.wraps(decorated)
def schema_org_priority_wrapper(self, *args, **kwargs):
function = getattr(self.schema, decorated.__name__)
if not function:
raise SchemaOrgException(
"Function '{}' not found in schema"
.format(decorated.__name)
)
if not self.schema.data:
return decorated(self, *args, **kwargs)
try:
value = function(*args, **kwargs)
except SchemaOrgException:
return decorated(self, *args, **kwargs)
return value or decorated(self, *args, **kwargs)
return schema_org_priority_wrapper
@staticmethod
def og_image_get(decorated):
@functools.wraps(decorated)
def og_image_get_wrapper(self, *args, **kwargs):
try:
image = self.soup.find(
'meta',
{'property': 'og:image', 'content': True}
)
return image.get('content')
except AttributeError:
return decorated(self, *args, **kwargs)
return og_image_get_wrapper
@staticmethod
def bcp47_validate(decorated):
@functools.wraps(decorated)
def bcp47_validate_wrapper(self, *args, **kwargs):
tag = tags.tag(decorated(self, *args, **kwargs))
return str(tag) if tag.valid else None
return bcp47_validate_wrapper
| [
"[email protected]"
] | |
d9e5e750b84c63450d958537f59dbc8b3863f3b4 | 2194df5490666825d382e6e47bd33139b1faf0df | /vtools/videotoimage.py | ff6b9cb5e919adadbff64930f5eb8a56adafd551 | [] | no_license | aiporre/video_tools | a88a3134c6148bd384c71e846aeab49da6bfab8e | f955c22fc7259a4b45592f522bb80f0533e6093d | refs/heads/master | 2021-08-02T21:03:53.344844 | 2021-07-28T16:45:57 | 2021-07-28T16:45:57 | 213,970,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,568 | py | import cv2
import argparse
import os
from tqdm import tqdm
class VideoToImage(object):
def __init__(self, src=0, output_path = './', extension = '.jpg', prefix='frame_', padding=-1):
# Create a VideoCapture object
self.capture = cv2.VideoCapture(src)
self.output_path = output_path
self.frame_counter = 0
# resolution of the video
self.frame_width = int(self.capture.get(3))
self.frame_height = int(self.capture.get(4))
self.n_frames = int(self.capture.get(7))
self.extension = extension
self.prefix = prefix
self.padding = padding
def update(self):
# Read the next frame
if self.capture.isOpened():
(self.status, self.frame) = self.capture.read()
self.frame_counter +=1
def show_frame(self):
# Convert to grayscale and display frames
if self.status:
cv2.imshow('frame', self.frame)
# Press 'q' on keyboard to stop recording
key = cv2.waitKey(1)
if key == ord('q'):
self.capture.release()
cv2.destroyAllWindows()
exit(1)
def save_frame(self):
# Save grayscale frame into video output file
if self.status: # self.capture.isOpened():
if self.padding > 0:
filename = os.path.join(self.output_path, self.prefix + "{1:0{0}}".format(self.padding,self.frame_counter) + self.extension)
else:
filename = os.path.join(self.output_path, self.prefix + str(self.frame_counter) + self.extension)
cv2.imwrite(filename, self.frame)
def close(self, exit=False):
self.capture.release()
cv2.destroyAllWindows()
if exit:
exit(1)
class VideoToGrayImage(VideoToImage):
def __init__(self, src=0, output_path = './', extension = '.jpg', prefix='frame_', padding=-1):
super(VideoToGrayImage,self).__init__(src=src, output_path = output_path, extension = extension, prefix=prefix, padding=padding)
def update(self):
super().update()
if self.status:
self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
def run(video_src, output_path=None, extension ='.png', plot='n', prefix='frame_', padding=-1, gray = 'y'):
'''
run default video to image
'''
if output_path is None:
output_path = os.path.dirname(video_src)
output_path = os.path.join(output_path,'video_images')
if not os.path.exists(output_path):
os.mkdir(output_path)
if gray == 'y':
video_stream_widget = VideoToGrayImage(video_src, output_path = output_path, extension = extension, prefix=prefix, padding=padding)
else:
video_stream_widget = VideoToImage(video_src, output_path=output_path, extension=extension, prefix=prefix, padding=padding)
if plot == 'y':
print('stop convertion by pressing q')
for _ in tqdm(range(video_stream_widget.n_frames)):
if video_stream_widget.capture.isOpened():
try:
video_stream_widget.update()
if plot == 'y':
video_stream_widget.show_frame()
video_stream_widget.save_frame()
except AttributeError:
pass
else:
video_stream_widget.close()
video_stream_widget.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert to gray avi videos.')
parser.add_argument('--target', metavar='target', type=str,
help='target avi video full path')
parser.add_argument('--output', metavar='output', type=str,
help='output path where the images are saved')
parser.add_argument('--plot', metavar='plot', type=str, default='y',
help='show video during convertion flag (y(default), or n))')
parser.add_argument('--extension', metavar='extension', type=str, default='.jpg',
help='extension of the imamge output (default: .jpg)')
args = parser.parse_args()
video_src = args.target
print(video_src)
video_stream_widget = VideoToGrayImage(video_src, output_path = args.output, extension = args.extension)
print('stop convertion by pressing q')
while video_stream_widget.capture.isOpened():
try:
video_stream_widget.update()
if args.plot == 'y':
video_stream_widget.show_frame()
video_stream_widget.save_frame()
except AttributeError:
pass
| [
"[email protected]"
] | |
dab5a55c04a4f4242ed5725c95704470f8d27791 | aa30891b324f86fe9c6a3eeeb6a9b8ae64b7d81d | /ex043.py | 3f7ab5d16e10be268d9e4f0765ca04086af2ad88 | [] | no_license | JoamirS/Exercicios-Python | 0055c5f73b9d0fb2d5d780c620bb0c4840c7d1b8 | 09b74babdfdf7142254a8d14132859e52f7b52b6 | refs/heads/master | 2023-08-31T13:21:03.310332 | 2023-08-26T03:42:58 | 2023-08-26T03:42:58 | 178,745,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | '''
Desenvolva uma lógica que leia o peso e a altura de uma pessoa, calcule seu IMC e mostre seu status, de acordo
com a tabela abaixo:
- Abaixo de 18.5: Abaixo do Peso | - Entre 18.5 e 25: Peso ideal | 25 até 30: Sobrepeso | 30 até 40: Obesidade
- Acima de 40: Obesidade Morbida
'''
#Declarando as variáveis
print('\033[31mExemplo: KG 70\033[0;0m')
weight = float(input('Digite seu peso: KG '))
print('\033[31mExemplo: M 1.85\033[0;0m')
height = float(input('Digite sua altura: M '))
imc = weight / (height ** 2)
print('O IMC desta pessoa é {:.1f}'.format(imc))
#Declarando as condições
if imc < 18.5:
print('Você está abaixo do peso')
elif 18.5 <= imc < 25:
print('Você está na faixa de peso ideal')
elif 25 <= imc < 30:
print('Sobrepeso')
elif 30 <= imc < 40:
print('Obesidade')
elif imc >= 40:
print('Obesidade Mórbida')
| [
"[email protected]"
] | |
caa5d7f22e33db8b41abcb461289fd84c5a814ee | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/40/usersdata/78/24413/submittedfiles/main.py | eab508f3756a8f0f59276fbd4bed79017c152c6b | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | # -*- coding: utf-8 -*-
from __future__ import division
import funcoes
#COMECE AQUI
m=int(input('digite o valor de m:')
e=input('digite o valor de epsilon:')
m=funcoes.absoluto(m)
pi=funcoes.pi(m)
cosseno=funcoes.cosseno(pi/5,e)
razaoaurea=funcoes.razaoaurea(m,e)
print('%.15f' %pi)
print('%.15f' %razaoaurea)
| [
"[email protected]"
] | |
b0b53b387467c7290b49d7c01a16691b782d9100 | 951b605ea41da28dccba6d3de63fb9211b7ad5b1 | /Mains/main.py | f3cabc1b8e650a5af81217b1b118a57e8a7327f4 | [
"MIT"
] | permissive | tiangeluo/DefectiveCNN | 99296f7a86efd3c4d044701f4e94388989cbd66a | fdbf5235adffa846630fadb4ff910de50870c077 | refs/heads/master | 2022-01-29T14:23:10.198712 | 2022-01-08T22:20:54 | 2022-01-08T22:20:54 | 222,830,775 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,648 | py | '''Train CIFAR10 with PyTorch.'''
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import argparse
#from models import *
from resnet import ResNet18
#from resnet_drop import ResNet18
from utils import progress_bar
from torch.optim.lr_scheduler import MultiStepLR
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# Model
print('==> Building model..')
#net = VGG('VGG19')
#net = ResNet18()
# net = PreActResNet18()
#net = GoogLeNet()
#net = DenseNet121()
#net = ResNet50()
#net = ResNeXt29_2x64d()
# net = MobileNet()
#net = MobileNetV2()
#net = DPN92()
# net = ShuffleNetG2()
#net = SENet18()
net = ResNet18()
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/ckpt.t7')
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
scheduler = MultiStepLR(optimizer, milestones=[150,250], gamma=0.1)
# Training
def train(epoch):
scheduler.step()
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
# Save checkpoint.
acc = 100.*correct/total
if acc > best_acc:
print('Saving..')
state = {
'net': net.state_dict(),
'acc': acc,
'epoch': epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/ckpt7.t7')
best_acc = acc
for epoch in range(start_epoch, start_epoch+350):
train(epoch)
if epoch % 5 == 0:
test(epoch)
| [
"[email protected]"
] | |
e47a993956b0cf5a138d15c01e3ad44563245394 | 023167de90034d0ac4e3695db5d0fc419e298247 | /flash_examples/serve/translation/inference_server.py | f8f9c8dbce012db0f244cc5b81026cd00ef424f8 | [
"Apache-2.0"
] | permissive | dlangerm/lightning-flash | 9e66e90f86d597d362e5c307e391b623f509c092 | 892f7594fff40ebc4333b62a5c4e73b02549fb82 | refs/heads/master | 2023-08-06T22:11:56.467679 | 2021-09-29T11:00:22 | 2021-09-29T11:00:22 | 406,909,766 | 0 | 0 | Apache-2.0 | 2021-09-15T20:04:16 | 2021-09-15T20:04:15 | null | UTF-8 | Python | false | false | 754 | py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flash.text import TranslationTask
model = TranslationTask.load_from_checkpoint("https://flash-weights.s3.amazonaws.com/translation_model_en_ro.pt")
model.serve()
| [
"[email protected]"
] | |
13b2b9c390f93b4c58274db5a361c530327c3a2b | bbe74f172bf1f1cca1c77bd249c6f9a97ca897a4 | /probs11-20/prob13.py | 0e84fcee2846a2e45db91e51d3eefd773b8d39cf | [] | no_license | kruthar/euler | 5b32b7780502ff82e855c0c9670c91aff3938c5d | 18a59531f2108074de3a7db29a77017663753abc | refs/heads/master | 2021-01-13T13:19:54.723543 | 2016-02-22T14:53:45 | 2016-02-22T14:53:45 | 52,280,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,357 | py | __author__ = 'kruthar'
'''
Large Sum
Work out the first ten digits of the sum of the following one-hundred 50-digit numbers.
37107287533902102798797998220837590246510135740250
46376937677490009712648124896970078050417018260538
74324986199524741059474233309513058123726617309629
91942213363574161572522430563301811072406154908250
23067588207539346171171980310421047513778063246676
89261670696623633820136378418383684178734361726757
28112879812849979408065481931592621691275889832738
44274228917432520321923589422876796487670272189318
47451445736001306439091167216856844588711603153276
70386486105843025439939619828917593665686757934951
62176457141856560629502157223196586755079324193331
64906352462741904929101432445813822663347944758178
92575867718337217661963751590579239728245598838407
58203565325359399008402633568948830189458628227828
80181199384826282014278194139940567587151170094390
35398664372827112653829987240784473053190104293586
86515506006295864861532075273371959191420517255829
71693888707715466499115593487603532921714970056938
54370070576826684624621495650076471787294438377604
53282654108756828443191190634694037855217779295145
36123272525000296071075082563815656710885258350721
45876576172410976447339110607218265236877223636045
17423706905851860660448207621209813287860733969412
81142660418086830619328460811191061556940512689692
51934325451728388641918047049293215058642563049483
62467221648435076201727918039944693004732956340691
15732444386908125794514089057706229429197107928209
55037687525678773091862540744969844508330393682126
18336384825330154686196124348767681297534375946515
80386287592878490201521685554828717201219257766954
78182833757993103614740356856449095527097864797581
16726320100436897842553539920931837441497806860984
48403098129077791799088218795327364475675590848030
87086987551392711854517078544161852424320693150332
59959406895756536782107074926966537676326235447210
69793950679652694742597709739166693763042633987085
41052684708299085211399427365734116182760315001271
65378607361501080857009149939512557028198746004375
35829035317434717326932123578154982629742552737307
94953759765105305946966067683156574377167401875275
88902802571733229619176668713819931811048770190271
25267680276078003013678680992525463401061632866526
36270218540497705585629946580636237993140746255962
24074486908231174977792365466257246923322810917141
91430288197103288597806669760892938638285025333403
34413065578016127815921815005561868836468420090470
23053081172816430487623791969842487255036638784583
11487696932154902810424020138335124462181441773470
63783299490636259666498587618221225225512486764533
67720186971698544312419572409913959008952310058822
95548255300263520781532296796249481641953868218774
76085327132285723110424803456124867697064507995236
37774242535411291684276865538926205024910326572967
23701913275725675285653248258265463092207058596522
29798860272258331913126375147341994889534765745501
18495701454879288984856827726077713721403798879715
38298203783031473527721580348144513491373226651381
34829543829199918180278916522431027392251122869539
40957953066405232632538044100059654939159879593635
29746152185502371307642255121183693803580388584903
41698116222072977186158236678424689157993532961922
62467957194401269043877107275048102390895523597457
23189706772547915061505504953922979530901129967519
86188088225875314529584099251203829009407770775672
11306739708304724483816533873502340845647058077308
82959174767140363198008187129011875491310547126581
97623331044818386269515456334926366572897563400500
42846280183517070527831839425882145521227251250327
55121603546981200581762165212827652751691296897789
32238195734329339946437501907836945765883352399886
75506164965184775180738168837861091527357929701337
62177842752192623401942399639168044983993173312731
32924185707147349566916674687634660915035914677504
99518671430235219628894890102423325116913619626622
73267460800591547471830798392868535206946944540724
76841822524674417161514036427982273348055556214818
97142617910342598647204516893989422179826088076852
87783646182799346313767754307809363333018982642090
10848802521674670883215120185883543223812876952786
71329612474782464538636993009049310363619763878039
62184073572399794223406235393808339651327408011116
66627891981488087797941876876144230030984490851411
60661826293682836764744779239180335110989069790714
85786944089552990653640447425576083659976645795096
66024396409905389607120198219976047599490197230297
64913982680032973156037120041377903785566085089252
16730939319872750275468906903707539413042652315011
94809377245048795150954100921645863754710598436791
78639167021187492431995700641917969777599028300699
15368713711936614952811305876380278410754449733078
40789923115535562561142322423255033685442488917353
44889911501440648020369068063960672322193204149535
41503128880339536053299340368006977710650566631954
81234880673210146739058568557934581403627822703280
82616570773948327592232845941706525094512325230608
22918802058777319719839450180888072429661980811197
77158542502016545090413245809786882778948721859617
72107838435069186155435662884062257473692284509516
20849603980134001723930671666823555245252804609722
53503534226472524250874054075591789781264330331690
'''
f = open('../data/data-prob13.txt', 'r');
total = 0
for line in f.readlines():
total += int(line)
print str(total)[0:10] | [
"[email protected]"
] | |
19a5eb94d0a3c8ccb52b085d6825e08f5a8062ca | 51f2492a5c207e3664de8f6b2d54bb93e313ca63 | /atcoder/soundhound2018-summer-qual/c.py | 93091c2550ea9792540a7ddf7fe97eb7d9c2060f | [
"WTFPL",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | abeaumont/competitive-programming | 23c5aabd587d7bb15a61efd3428838cb934233dd | a24c9b89941a59d344b51dc1010de66522b1a0dd | refs/heads/master | 2023-09-01T09:50:58.267361 | 2023-07-31T18:00:10 | 2023-07-31T18:00:10 | 117,589,708 | 618 | 262 | WTFPL | 2023-07-12T17:36:20 | 2018-01-15T20:00:56 | C++ | UTF-8 | Python | false | false | 296 | py | #!/usr/bin/env python3
# https://soundhound2018-summer-qual.contest.atcoder.jp/tasks/soundhound2018_summer_qual_c
n, m, d = map(int, input().split())
if d == 0: print('{:.10f}'.format((m - 1) / n))
else:
t = n * (n - 1) // 2
print('{:.10f}'.format((m - 1) * (n - 1) * (n - d) / (t * n)))
| [
"[email protected]"
] | |
8a2eb862ad50edda68a729d3dc9f11fc97df64e8 | ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb | /examples/docs_snippets/docs_snippets_tests/concepts_tests/resources_tests/test_resources.py | 5e07b899452a7f25971b2a9d834e8dd7bb8a8a0f | [
"Apache-2.0"
] | permissive | dagster-io/dagster | 6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a | fe21995e0402878437a828c6a4244025eac8c43b | refs/heads/master | 2023-09-05T20:46:08.203794 | 2023-09-05T19:54:52 | 2023-09-05T19:54:52 | 131,619,646 | 8,565 | 1,154 | Apache-2.0 | 2023-09-14T21:57:37 | 2018-04-30T16:30:04 | Python | UTF-8 | Python | false | false | 1,581 | py | from dagster import build_init_resource_context, build_op_context
from docs_snippets.concepts.resources.resources import (
cereal_fetcher,
connect,
db_connection,
db_resource,
do_database_stuff_dev,
do_database_stuff_job,
do_database_stuff_prod,
op_requires_resources,
test_cm_resource,
test_my_resource,
test_my_resource_with_context,
use_db_connection,
uses_db_connection,
)
def test_cereal_fetcher():
assert cereal_fetcher(None)
def test_database_resource():
class BasicDatabase:
def execute_query(self, query):
pass
op_requires_resources(build_op_context(resources={"database": BasicDatabase()}))
def test_resource_testing_examples():
test_my_resource()
test_my_resource_with_context()
test_cm_resource()
def test_resource_deps_job():
result = connect.execute_in_process()
assert result.success
def test_resource_config_example():
dbconn = db_resource(build_init_resource_context(config={"connection": "foo"}))
assert dbconn.connection == "foo"
def test_jobs():
assert do_database_stuff_job.execute_in_process().success
assert do_database_stuff_dev.execute_in_process().success
assert do_database_stuff_prod.execute_in_process().success
def test_cm_resource_example():
with db_connection() as db_conn:
assert db_conn
def test_cm_resource_op():
with build_op_context(resources={"db_connection": db_connection}) as context:
use_db_connection(context)
def test_build_resources_example():
uses_db_connection()
| [
"[email protected]"
] | |
1856c7c864ac34d62c6c9bc7de93fbbd76a236f0 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2009.1/x11/terminal/xterm/actions.py | 0bc996b345bff3ffad1468eeeffc1e93bc0c3d83 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2009 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def setup():
shelltools.export("CC", get.CC())
autotools.configure(" \
--disable-full-tgetent \
--with-app-defaults=/usr/share/X11/app-defaults \
--disable-desktop \
--with-utempter \
--with-tty-group=tty \
--enable-256-color \
--enable-exec-xterm \
--enable-freetype \
--enable-luit \
--enable-wide-chars \
--enable-warnings \
")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.removeDir("/usr/share/pixmaps")
pisitools.dodoc("README.i18n", "xterm.log.html", "ctlseqs.txt", "16colors.txt")
| [
"[email protected]"
] | |
0a53ab68989d286f013da079bf2fa922a9c6acde | 8dd000d05a29cece1460fd48c4f6b12c56281ca1 | /ugly/default_settings.py | e3dea2571f19db51dc2da821e5e99b015d7ad1a8 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | imclab/ugly | 3e2717982b6e999c99d3e884d7f4d48d08fc7609 | bc09834849184552619ee926d7563ed37630accb | refs/heads/master | 2021-01-15T19:35:38.835572 | 2014-01-08T20:24:56 | 2014-01-08T20:24:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | # Flask stuff.
DEBUG = False
TESTING = False
SECRET_KEY = "development key"
# App stuff.
ADMIN_EMAIL = "Ugly Reader <[email protected]>"
BASE_MAILBOX = "[Ugly Reader]"
AES_KEY = b"test AES key... change this in production"
MAX_FEEDS = 100
# Database stuff.
SQLALCHEMY_DATABASE_URI = "postgresql://localhost/ugly"
# Google OAuth stuff.
GOOGLE_OAUTH2_CLIENT_ID = None
GOOGLE_OAUTH2_CLIENT_SECRET = None
| [
"[email protected]"
] | |
8f55e7fc73404cd650b20ca669fd313db96f1b3c | 4c67112b8e4c1ed7fd2f636a0dcee4972eeb79e6 | /deployment/GPT2/encoder.py | f6508e866e80f4de9aaa34474e404aae72cbb3bd | [
"MIT"
] | permissive | t04glovern/gpt2-k8s-cloud-run | 700cc8da97e8b42ca39fb0aed9a26f7edebb090b | 687a20f76c3e53f917ea9553e569be52deb323d6 | refs/heads/master | 2023-06-04T14:07:50.532901 | 2022-09-03T12:58:48 | 2022-09-03T12:58:48 | 180,802,919 | 8 | 1 | MIT | 2023-05-22T21:56:35 | 2019-04-11T13:53:44 | Python | UTF-8 | Python | false | false | 4,156 | py | """Byte pair encoding utilities"""
import os
import json
import regex as re
from functools import lru_cache
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class Encoder:
def __init__(self, encoder, bpe_merges, errors='replace'):
self.encoder = encoder
self.decoder = {v:k for k,v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v:k for k, v in self.byte_encoder.items()}
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
# Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def get_encoder():
with open('./GPT2/encoder.json', 'r') as f:
encoder = json.load(f)
with open('./GPT2/vocab.bpe', 'r', encoding="utf-8") as f:
bpe_data = f.read()
bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split('\n')[1:-1]]
return Encoder(
encoder=encoder,
bpe_merges=bpe_merges,
) | [
"[email protected]"
] | |
dfea14f587580d86c76f3dbc73c65587e1154af8 | faaf12ab18978082233c09628b815a69e73868e4 | /leetcode/algorithms/easy/keep_multiplying_found_values_by_two.py | 9d03b8b29a25813877514664235bcbeb70bc846b | [
"WTFPL"
] | permissive | ferhatelmas/algo | 6826bcf0be782cb102c1ee20dce8d4345e1fd6d2 | 7b867f6d2c8a9fb896f464168b50dfc115617e56 | refs/heads/master | 2023-08-18T19:59:58.435696 | 2023-08-14T10:16:00 | 2023-08-14T10:16:00 | 3,813,734 | 27 | 16 | WTFPL | 2020-10-25T23:00:16 | 2012-03-23T23:43:31 | Java | UTF-8 | Python | false | false | 212 | py | from typing import List
class Solution:
def findFinalValue(self, nums: List[int], original: int) -> int:
s = set(nums)
o = original
while o in s:
o *= 2
return o
| [
"[email protected]"
] | |
5a43f55a19e3c63e780c242dc3f5a1013c94a070 | a951ccc03e99ae61178ab85f6db0fd5968709280 | /prefix_sums/genomic_range.py | 04f8f6028e9ab8f7e8919e44da513188dc5cd481 | [] | no_license | mmanishh/codilitysolution | 37142e66c25f786ef7bedaebbe0b164e50ff7804 | d3487be50e52861cc59d3651e996d4d23cb32613 | refs/heads/master | 2021-07-07T12:58:07.651699 | 2020-08-07T10:00:21 | 2020-08-07T10:00:21 | 163,286,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py |
def genomic_range(S,P,Q):
S = list(S)
new_s = []
result = []
impact = {'A':1,'C':2,'G':3,'T':4}
for s in S:
new_s.append(impact[s])
for i in range(len(P)):
l ,r = P[i] , Q[i]
sliced = new_s[l:r+1]
result.append(min(sliced))
return result
if __name__ == '__main__':
S = 'CAGCCTA'
P = [2,5,0]
Q = [4,5,6]
print(genomic_range(S,P,Q))
| [
"[email protected]"
] | |
0c3685cd9f60cf9fab17887921f148cea4932610 | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/client/gui/Scaleform/daapi/view/lobby/fortifications/FortCalendarWindow.py | 96f439b6351f27ea524c3190daac96e5559db5f9 | [] | no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 11,877 | py | # 2017.05.04 15:23:16 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/fortifications/FortCalendarWindow.py
import BigWorld
from collections import defaultdict
from helpers import time_utils
from helpers.i18n import makeString as _ms
from gui import makeHtmlString
from gui.Scaleform.daapi.settings.views import VIEW_ALIAS
from gui.Scaleform.daapi.view.lobby.fortifications.fort_utils.FortViewHelper import FortViewHelper
from gui.Scaleform.daapi.view.meta.FortCalendarWindowMeta import FortCalendarWindowMeta
from gui.Scaleform.genConsts.FORTIFICATION_ALIASES import FORTIFICATION_ALIASES
from gui.Scaleform.locale.FORTIFICATIONS import FORTIFICATIONS
from gui.Scaleform.locale.MENU import MENU
from gui.Scaleform.locale.RES_ICONS import RES_ICONS
from gui.shared.utils import toLower
from gui.shared.fortifications.fort_seqs import BATTLE_ITEM_TYPE
from gui.Scaleform.daapi.view.lobby.fortifications.fort_utils.fort_formatters import getDivisionIcon
class FortCalendarWindow(FortViewHelper, FortCalendarWindowMeta):
class TIME_LIMITS:
LOW = FORTIFICATION_ALIASES.ACTIVE_EVENTS_PAST_LIMIT * time_utils.ONE_DAY
HIGH = FORTIFICATION_ALIASES.ACTIVE_EVENTS_FUTURE_LIMIT * time_utils.ONE_DAY
def __init__(self, ctx):
super(FortCalendarWindow, self).__init__()
self.__selectedDate = ctx.get('dateSelected') or time_utils.getCurrentTimestamp()
def getCalendar(self):
return self.components.get(VIEW_ALIAS.CALENDAR)
def startCalendarListening(self):
calendar = self.getCalendar()
if calendar is not None:
calendar.onMonthChangedEvent += self.onMonthChanged
calendar.onDateSelectedEvent += self.onDateSelected
return
def stopCalendarListening(self):
calendar = self.getCalendar()
if calendar is not None:
calendar.onMonthChangedEvent -= self.onMonthChanged
calendar.onDateSelectedEvent -= self.onDateSelected
return
def onMonthChanged(self, timestamp):
self.__selectedDate = timestamp
self._populateMonthEvents()
self._populateCalendarMessage()
def onDateSelected(self, timestamp):
self.__selectedDate = timestamp
self._populatePreviewBlock()
def onWindowClose(self):
self.destroy()
def onFortBattleChanged(self, cache, item, battleItem):
self._update()
def onFortBattleRemoved(self, cache, battleID):
self._update()
def _populateMonthEvents(self):
calendar = self.getCalendar()
if calendar is not None:
result = []
for dayStartTimestamp, battles in self._getBattlesByDay().iteritems():
if time_utils.isFuture(dayStartTimestamp):
tooltipHead = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_CALENDAR_DAYTOOLTIP_FUTURE_HEADER, count=len(battles))
tooltipBody = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_CALENDAR_DAYTOOLTIP_FUTURE_BODY)
iconSource = RES_ICONS.MAPS_ICONS_LIBRARY_FORTIFICATION_DEFENCEFUTUREBG
elif time_utils.isToday(dayStartTimestamp):
finishedBattles = [ b for b in battles if b.isEnded() ]
upcomingBattles = [ b for b in battles if b.isPlanned() ]
if not upcomingBattles:
tooltipHead = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_CALENDAR_DAYTOOLTIP_PAST_HEADER, count=len(finishedBattles))
tooltipBody = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_CALENDAR_DAYTOOLTIP_PAST_BODY)
iconSource = RES_ICONS.MAPS_ICONS_LIBRARY_FORTIFICATION_DEFENCEPASTBG
else:
tooltipHead = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_CALENDAR_DAYTOOLTIP_FUTURE_HEADER, count=len(upcomingBattles))
tooltipBody = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_CALENDAR_DAYTOOLTIP_FUTURE_BODY)
iconSource = RES_ICONS.MAPS_ICONS_LIBRARY_FORTIFICATION_DEFENCEFUTUREBG
else:
tooltipHead = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_CALENDAR_DAYTOOLTIP_PAST_HEADER, count=len(battles))
tooltipBody = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_CALENDAR_DAYTOOLTIP_PAST_BODY)
iconSource = RES_ICONS.MAPS_ICONS_LIBRARY_FORTIFICATION_DEFENCEPASTBG
result.append({'tooltipHeader': tooltipHead,
'tooltipBody': tooltipBody,
'iconSource': iconSource,
'rawDate': dayStartTimestamp})
calendar.as_updateMonthEventsS(result)
return
def _populatePreviewBlock(self):
fort = self.fortCtrl.getFort()
localDateTime = time_utils.getDateTimeInLocal(self.__selectedDate)
targetDayStartTimestamp, _ = time_utils.getDayTimeBoundsForLocal(self.__selectedDate)
eventItems, dateInfo, noEventsText = [], None, None
dateString = _ms(MENU.DATETIME_SHORTDATEFORMATWITHOUTYEAR, weekDay=_ms('#menu:dateTime/weekDays/full/%d' % localDateTime.isoweekday()), monthDay=localDateTime.day, month=toLower(_ms('#menu:dateTime/months/full/%d' % localDateTime.month)))
if not self._isValidTime(self.__selectedDate):
noEventsText = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_EVENTSLIST_EMPTY_NOTAVAILABLE)
else:
for dayStartTimestamp, battles in self._getBattlesByDay().iteritems():
if dayStartTimestamp == targetDayStartTimestamp:
for battle in sorted(battles):
startTimestamp = battle.getStartTime()
battleHasEnded = battle.isEnded()
opponentsClanInfo = battle.getOpponentClanInfo()
if battle.getType() == BATTLE_ITEM_TYPE.ATTACK:
if battleHasEnded:
icon = RES_ICONS.MAPS_ICONS_LIBRARY_FORTIFICATION_OFFENCEPAST
else:
icon = RES_ICONS.MAPS_ICONS_LIBRARY_FORTIFICATION_OFFENCEFUTURE
titleTpl = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_EVENTSLIST_ITEM_TITLE_OFFENCE)
else:
if battleHasEnded:
icon = RES_ICONS.MAPS_ICONS_LIBRARY_FORTIFICATION_DEFENCEPAST
else:
icon = RES_ICONS.MAPS_ICONS_LIBRARY_FORTIFICATION_DEFENCEFUTURE
titleTpl = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_EVENTSLIST_ITEM_TITLE_DEFENCE)
tankIconVO = getDivisionIcon(battle.defenderFortLevel, battle.attackerFortLevel, determineAlert=battle.getType() == BATTLE_ITEM_TYPE.ATTACK)
if battle.isWin():
background = RES_ICONS.MAPS_ICONS_LIBRARY_FORTIFICATION_BATTLEFORTVICTORY
resultLabel = 'win'
elif battle.isLose():
background = RES_ICONS.MAPS_ICONS_LIBRARY_FORTIFICATION_BATTLEFORTDEFEAT
resultLabel = 'lose'
else:
background, resultLabel = (None, None)
eventItem = {'icon': icon,
'title': titleTpl % {'clanName': '[%s]' % opponentsClanInfo[1]},
'clanID': opponentsClanInfo[0],
'direction': _ms(FORTIFICATIONS.GENERAL_DIRECTION, value=_ms('#fortifications:General/directionName%d' % battle.getDirection())),
'timeInfo': _ms(FORTIFICATIONS.FORTCALENDARWINDOW_EVENTSLIST_ITEM_TIMEINFO) % {'startTime': BigWorld.wg_getShortTimeFormat(startTimestamp),
'endTime': BigWorld.wg_getShortTimeFormat(startTimestamp + time_utils.ONE_HOUR)},
'background': background,
'tankIconVO': tankIconVO,
'showTankIcon': not battleHasEnded}
if battleHasEnded and resultLabel:
resultText = makeHtmlString('html_templates:lobby/fortifications', 'battleResult', {'result': _ms(MENU.finalstatistic_commonstats_resultlabel(resultLabel))})
eventItem.update({'result': resultText})
eventItems.append(eventItem)
if not len(eventItems):
if fort.isOnVacationAt(self.__selectedDate):
noEventsText = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_EVENTSLIST_EMPTY_VACATION, date=fort.getVacationDateStr())
else:
noEventsText = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_EVENTSLIST_EMPTY_NOEVENTS)
if len(eventItems) > 0:
dateInfo = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_EVENTSLIST_INFO_BATTLESCOUNT, eventsCount=len(eventItems))
self.as_updatePreviewDataS({'dateString': dateString,
'dateInfo': dateInfo,
'noEventsText': noEventsText,
'events': eventItems})
return
def _populateCalendarMessage(self):
calendar = self.getCalendar()
if calendar is not None:
fort, message = self.fortCtrl.getFort(), ''
vacationStart, vacationEnd = fort.getVacationDate()
if self._isValidTime(vacationStart, self.__selectedDate) or self._isValidTime(vacationEnd, self.__selectedDate):
message = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_MESSAGE_VACATION, date=fort.getVacationDateStr())
calendar.as_setCalendarMessageS(message)
return
def _populate(self):
super(FortCalendarWindow, self)._populate()
self.startFortListening()
self.startCalendarListening()
self._update()
def _dispose(self):
self.stopFortListening()
self.stopCalendarListening()
super(FortCalendarWindow, self)._dispose()
def _update(self):
calendar = self.getCalendar()
if calendar is not None:
lowerTimeBound = time_utils.getCurrentLocalServerTimestamp() - self.TIME_LIMITS.LOW
higherTimeBound = time_utils.getCurrentLocalServerTimestamp() + self.TIME_LIMITS.HIGH
calendar.as_setMinAvailableDateS(lowerTimeBound)
calendar.as_setMaxAvailableDateS(higherTimeBound)
calendar.as_openMonthS(self.__selectedDate)
calendar.as_selectDateS(self.__selectedDate)
self._populateMonthEvents()
self._populatePreviewBlock()
self._populateCalendarMessage()
return
@classmethod
def _isValidTime(cls, timestampToCheck, rootTimestamp = None):
rootTimestamp = rootTimestamp or time_utils.getCurrentTimestamp()
minLimit = rootTimestamp - cls.TIME_LIMITS.LOW
dayStart, _ = time_utils.getDayTimeBoundsForLocal(minLimit)
minLimit = dayStart
maxLimit = rootTimestamp + cls.TIME_LIMITS.HIGH
_, dayEnd = time_utils.getDayTimeBoundsForLocal(maxLimit)
maxLimit = dayEnd
return minLimit < timestampToCheck < maxLimit
def _getBattlesByDay(self):
result, fort = defaultdict(list), self.fortCtrl.getFort()
for battle in fort.getAttacks() + fort.getDefences():
startTimestamp = battle.getStartTime()
if self._isValidTime(startTimestamp):
dayStartTimestamp, _ = time_utils.getDayTimeBoundsForLocal(startTimestamp)
result[dayStartTimestamp].append(battle)
return result
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\daapi\view\lobby\fortifications\FortCalendarWindow.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:23:17 Střední Evropa (letní čas)
| [
"[email protected]"
] | |
b53210f45388c5820faf0c133ad6ef73039b955b | 9a034b12c845d01f36aff2e5fdbf8486a9e8a642 | /faketrudy/trudy_api/migrations/0005_child_tweets.py | 58e5ce1a352b4425e107065b667d213e62e02fbe | [] | no_license | piyush6191996/Django-Rest-Framework | 2d1cd89de700e7aa68f93f9104418c05c70e800a | 3950a72bed52fd4bcbec3de439fe9f1130df10f9 | refs/heads/master | 2020-03-15T06:00:31.362680 | 2018-05-07T19:09:17 | 2018-05-07T19:09:17 | 131,998,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,464 | py | # Generated by Django 2.0.2 on 2018-04-10 08:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('trudy_api', '0004_auto_20180410_1229'),
]
operations = [
migrations.CreateModel(
name='Child',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('age', models.IntegerField()),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=1)),
('twitter_token', models.CharField(blank=True, max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Tweets',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tweets', models.TextField()),
('sentiment', models.CharField(max_length=255)),
('child', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='trudy_api.Child')),
],
),
]
| [
"[email protected]"
] | |
fac948d696d4a82b62dca8ce6557a6b4e27a4e6e | 0ecb1763b4cab08a1fb80234639e46afc8921e2f | /further/routing_1.py | 882cf1231be2c220621e4dd32a8a4aea3cdd9566 | [] | no_license | mach8686devops/pyside6-demo | 4eed3713288ec21b0ec4b8561290f87925693b89 | 848302ff9c1536034cf5f225fa953944d011c2a4 | refs/heads/main | 2023-05-05T11:12:20.711846 | 2021-05-28T13:44:41 | 2021-05-28T13:44:41 | 371,714,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,491 | py | import sys
from PySide6.QtCore import QSize, Qt
from PySide6.QtWidgets import QApplication, QLabel, QMainWindow
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.label = QLabel("Click in this window")
self.status = self.statusBar()
self.setFixedSize(QSize(200, 100))
self.setCentralWidget(self.label)
def mouseMoveEvent(self, e):
self.label.setText("mouseMoveEvent")
def mousePressEvent(self, e):
button = e.button()
if button == Qt.LeftButton:
self.label.setText("mousePressEvent LEFT")
if e.x() < 100:
self.status.showMessage("Left click on left")
self.move(self.x() - 10, self.y())
else:
self.status.showMessage("Left click on right")
self.move(self.x() + 10, self.y())
elif button == Qt.MiddleButton:
self.label.setText("mousePressEvent MIDDLE")
elif button == Qt.RightButton:
self.label.setText("mousePressEvent RIGHT")
if e.x() < 100:
self.status.showMessage("Right click on left")
print("Something else here.")
self.move(10, 10)
else:
self.status.showMessage("Right click on right")
self.move(400, 400)
app = QApplication(sys.argv)
window = MainWindow()
window.show()
app.exec_()
| [
"[email protected]"
] | |
f9a3bff56e5ed0ba4f874a6571ecf9e908e79f95 | de1f9d660cfb738afdb66e4a2d63a4577c07d9c6 | /xcube/webapi/defaults.py | e2f0580e213aeaa838812aab943976b33b2c918e | [
"MIT"
] | permissive | rabaneda/xcube | db47eb416db85df891a924063482a7943cae9d4f | 0d38ca513987184dbc4a37da1616e4076964d0f1 | refs/heads/master | 2020-11-24T00:11:17.107630 | 2020-02-11T10:11:34 | 2020-02-11T10:11:34 | 227,877,138 | 0 | 0 | MIT | 2019-12-13T16:14:51 | 2019-12-13T16:14:50 | null | UTF-8 | Python | false | false | 1,831 | py | # The MIT License (MIT)
# Copyright (c) 2019 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
SERVER_NAME = 'xcube Server'
SERVER_DESCRIPTION = f'WMTS, catalogue, data access, tile, feature, time-series services for' \
' xarray-enabled data cubes'
DEFAULT_ADDRESS = 'localhost'
DEFAULT_PORT = 8080
DEFAULT_TILE_CACHE_SIZE = "512M"
DEFAULT_UPDATE_PERIOD = 2.
DEFAULT_LOG_PREFIX = 'xcube-serve.log'
DEFAULT_TILE_COMP_MODE = 0
DEFAULT_TRACE_PERF = False
DEFAULT_CMAP_NAME = 'viridis'
DEFAULT_CMAP_VMIN = 0.
DEFAULT_CMAP_VMAX = 1.
DEFAULT_CMAP_WIDTH = 1
DEFAULT_CMAP_HEIGHT = 5
_GIGAS = 1000 * 1000 * 1000
FILE_TILE_CACHE_CAPACITY = 20 * _GIGAS
FILE_TILE_CACHE_ENABLED = False
FILE_TILE_CACHE_PATH = './image-cache'
MEM_TILE_CACHE_CAPACITY = 2 * _GIGAS
| [
"[email protected]"
] | |
16788fb6c4d87a3d199099337d60a972ac10c1d0 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5631989306621952_1/Python/gvalli/2016-1A-A-lastword.py | becda986965852bb63622f5a8164983cb9663cf1 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | #! /#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
fh = open(sys.argv[1], 'r')
T = int(fh.readline()) # number of test cases
for t in range(T):
S = fh.readline().split()[0] # string of letters
res = ''
oldval = -1
for c in S:
val = ord(c)
if val >= oldval:
res = c + res
oldval = ord(c)
else:
res = res + c
print('Case #{:d}: {}'.format(t + 1, res))
| [
"[email protected]"
] | |
def6c18b46463b5c3cd481ceefdafb7b8c4e49d6 | 98a936d5372294ed892a9bf9cf98646c72af515c | /usage/lab/explorer_usage.py | fd4a3b0be9636dbe6d5abd61ffe6a45858e3c81c | [
"MIT"
] | permissive | edublancas/pipeline | f6d22ad07b134be98c139d1de6ca7d8321072ba8 | 5bef04d77fdadc1dc4ec22b9b346f0a062cca1ce | refs/heads/master | 2021-05-15T01:09:50.072378 | 2016-12-29T05:45:48 | 2016-12-29T05:45:48 | 59,692,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | from pipeline import ExperimentExplorer
# load everything
explorer = ExperimentExplorer()
# just load results from my_experiment_a
explorer = ExperimentExplorer('my_experiment_a')
# load results from my_experiment_a and my_experiment_b
explorer = ExperimentExplorer(['my_experiment_a', 'my_experiment_b'])
# compute new metric for every model
explorer.apply(lambda m: m.compute_new_metric)
# store this new metric for every model affected
explorer.save()
# after plotting, analyzing results, I want to get the
# trained model
model = explorer.get('some_id')
metric = model.compute_metric()
print 'metric is {}'.format(metric)
# the problem is: should I pickle models? I should NOT pickle everything
# buf it logger is smart enoigh I may be able to just pickle the top models
# another option is to just re-train the model...
# independent of the options the API should be transparent for the user
# since he does not need to know and just be able to recover the object
# - problem with re-training: I need the data. Assuming the data is still the
# same I can do that, but if the numbers have changed and the columns
# are named the same I'm gonna have a baaad time | [
"[email protected]"
] | |
c5a4840e2abacff143dd7d855e796d90b83c83fe | d9eef8dd3489682c8db41f2311e3058d1f369780 | /.history/abel-network-files/metis_transf_20180709124830.py | 42a9fae8f327a0df02f62926b8ffe1d5dacf3f19 | [] | no_license | McKenzie-Lamb/Gerrymandering | 93fe4a49fe39a0b307ed341e46ba8620ea1225be | b7a7c4129d6b0fcd760ba8952de51eafa701eac3 | refs/heads/master | 2021-01-25T06:06:43.824339 | 2018-10-16T14:27:01 | 2018-10-16T14:27:01 | 93,526,515 | 0 | 0 | null | 2018-07-12T19:07:35 | 2017-06-06T14:17:47 | Python | UTF-8 | Python | false | false | 2,331 | py | # Author: Abel Gonzalez
# Date: 06/26/18
#
# Description:
# This program uses the .shp file to create a network graph where each node
# represents a census tract and the edge represents adjacency between each
# tract, usign graph-tool instead of networkx
import graph_tool.all as gt
import metis
from pathlib import Path
# Paths
main_folder = Path("abel-network-files/")
data_folder = Path("abel-network-files/data/")
images_folder = Path("abel-network-files/images/")
# Loading the previous created Graph and creating the prop maps
graph = gt.load_graph(str(data_folder / "tmp_graph100.gt"))
name = graph.new_vertex_property('string')
color = graph.new_vertex_property('string')
adjlist_pop = []
nodew_pop = []
for i in graph.vertices():
neighbors = tuple([j for j in i.all_neighbors()])
adjlist_pop.append(neighbors)
#print(graph.vp.data[i]['PERSONS'])
weights = (graph.vp.data[i]['PERSONS'], graph.vp.data[i][int('CONREP14']/graph.vp.data[i]['CONDEM14']))
nodew_pop.append(weights)
metis_graph = metis.adjlist_to_metis(adjlist_pop, nodew=nodew_pop)
objval, parts = metis.part_graph(metis_graph, nparts=4)
for i in range(len(parts)):
name[graph.vertex(i)] = parts[i]
if graph.vp.data[graph.vertex(i)]['CONREP14'] > graph.vp.data[graph.vertex(i)]['CONDEM14']:
color[graph.vertex(i)] = 'red'
else:
color[graph.vertex(i)] = 'blue'
gt.graph_draw(graph, pos=graph.vp.pos, vertex_text=name, output=str(main_folder / 'tmp_metis_init.png'))
adjlist = []
nodew = []
for i in graph.vertices():
neighbors = tuple([j for j in i.all_neighbors()])
adjlist.append(neighbors)
#print(graph.vp.data[i]['PERSONS'])
weights = (graph.vp.data[i]['PERSONS'], int(graph.vp.data[i]['CONREP14']/graph.vp.data[i]['CONDEM14']))
nodew.append(weights)
metis_graph = metis.adjlist_to_metis(adjlist, nodew=nodew)
objval, parts = metis.part_graph(metis_graph, nparts=4, tpwgts=[(0.25,0.50),(0.25,0.10),(0.25, 0.30),(0.25, 0.10)])
for i in range(len(parts)):
name[graph.vertex(i)] = parts[i]
if graph.vp.data[graph.vertex(i)]['CONREP14'] > graph.vp.data[graph.vertex(i)]['CONDEM14']:
color[graph.vertex(i)] = 'red'
else:
color[graph.vertex(i)] = 'blue'
gt.graph_draw(graph, pos=graph.vp.pos, vertex_text=name, output=str(main_folder / 'tmp_metis_fin.png')) | [
"[email protected]"
] | |
cde74c8664798c8237fa5329c575a705974c6f41 | 34c5a03855ab0aca39acea941be520157f7d0b74 | /lib/ansible/modules/cloud/alicloud/ali_slb_vsg_info.py | 72e35f09e490e814c2cd95556da2fa6bd18f6359 | [
"Apache-2.0"
] | permissive | lixue323/ansible-provider | 1260d1bc17a2fa7bf4c0f387a33dd942059850ed | aae2658532afcbcdf471609fae0e2108fb57af3b | refs/heads/master | 2020-08-11T21:44:37.685788 | 2019-12-13T03:11:23 | 2019-12-13T04:00:45 | 214,633,323 | 0 | 1 | Apache-2.0 | 2019-10-12T11:12:07 | 2019-10-12T11:12:07 | null | UTF-8 | Python | false | false | 6,239 | py | #!/usr/bin/python
# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see http://www.gnu.org/licenses/.
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ali_slb_vsg_info
version_added: "2.8"
short_description: Gather facts on virtual server group of Alibaba Cloud SLB.
description:
- This module fetches virtual server groups data from the Open API in Alibaba Cloud.
options:
load_balancer_id:
description:
- ID of server load balancer.
required: true
aliases: ["lb_id"]
vserver_group_ids:
description:
- A list of SLB vserver group ids.
required: false
aliases: ["group_ids", "ids"]
name_prefix:
description:
- Use a vritual server group name prefix to filter vserver groups.
author:
- "He Guimin (@xiaozhu36)"
requirements:
- "python >= 2.6"
- "footmark >= 1.9.0"
extends_documentation_fragment:
- alicloud
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the Alibaba Cloud Guide for details.
- name: Retrieving vsgs using slb id
ali_slb_vsg_info:
lb_id: '{{item}}'
with_items: '{{slbs.ids}}'
- name: Filter vsg using name_regex
ali_slb_vsg_info:
name_prefix: 'ansible-foo'
lb_id: 'lb-cn3cn34'
'''
RETURN = '''
ids:
description: List ids of being fetched virtual server group.
returned: when success
type: list
sample: ["rsp-2zehblhcv", "rsp-f22c4lhcv"]
names:
description: List name of being fetched virtual server group.
returned: when success
type: list
sample: ["ansible-1", "ansible-2"]
vserver_groups:
description:
- info about the virtual server group that was created or deleted.
returned: on present
type: complex
contains:
address:
description: The IP address of the loal balancer
returned: always
type: string
sample: "47.94.26.126"
backend_servers:
description: The load balancer's backend servers
returned: always
type: complex
contains:
port:
description: The backend server port
returned: always
type: int
sample: 22
server_id:
description: The backend server id
returned: always
type: string
sample: "i-vqunci342"
type:
description: The backend server type, ecs or eni
returned: always
type: string
sample: "ecs"
weight:
description: The backend server weight
returned: always
type: int
sample: 100
id:
description: The ID of the virtual server group was created. Same as vserver_group_id.
returned: always
type: string
sample: "rsp-2zehblhcv"
vserver_group_id:
description: The ID of the virtual server group was created.
returned: always
type: string
sample: "rsp-2zehblhcv"
vserver_group_name:
description: The name of the virtual server group was created.
returned: always
type: string
sample: "ansible-ali_slb_vsg"
name:
description: The name of the virtual server group was created.
returned: always
type: string
sample: "ansible-ali_slb_vsg"
tags:
description: The load balancer tags
returned: always
type: complex
sample: {}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.alicloud_ecs import ecs_argument_spec, slb_connect
HAS_FOOTMARK = False
try:
from footmark.exception import SLBResponseError
HAS_FOOTMARK = True
except ImportError:
HAS_FOOTMARK = False
def main():
argument_spec = ecs_argument_spec()
argument_spec.update(dict(
load_balancer_id=dict(type='str', aliases=['lb_id'], required=True),
vserver_group_ids=dict(type='list', aliases=['group_ids', 'ids']),
name_prefix=dict(type='str')
))
module = AnsibleModule(argument_spec=argument_spec)
if HAS_FOOTMARK is False:
module.fail_json(msg="Package 'footmark' required for this module.")
vsg_ids = module.params['vserver_group_ids']
name_prefix = module.params['name_prefix']
ids = []
vsgs = []
names = []
try:
slb = slb_connect(module)
groups = slb.describe_vserver_groups(**{'load_balancer_id': module.params['load_balancer_id']})
if groups:
for group in groups:
if vsg_ids and group.id not in vsg_ids:
continue
if name_prefix and not str(group.name).startswith(name_prefix):
continue
vsgs.append(group.read())
ids.append(group.id)
names.append(group.name)
module.exit_json(changed=False, vserver_groups=vsgs, ids=ids, names=names)
except Exception as e:
module.fail_json(msg=str("Unable to describe slb vserver groups, error:{0}".format(e)))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
4067eaa4a5851aa47554afb318aa9f0825522d89 | c9490d7bb9c3add1a5e71b06c9180260ffc1fff5 | /web_dynamic/2-hbnb.py | 2eadee48a25ba93f32aa643310baf9dfb56b7b2c | [
"MIT"
] | permissive | PierreBeaujuge/AirBnB_clone_v4 | 54a255023587e6e291f41410f124da8089f2a5b7 | f93bb1f22660f4497fb942abe120a5e69815affc | refs/heads/master | 2021-01-04T15:00:01.541582 | 2020-10-08T09:04:29 | 2020-10-08T09:04:29 | 240,601,631 | 0 | 1 | MIT | 2020-02-18T02:25:15 | 2020-02-14T21:28:36 | HTML | UTF-8 | Python | false | false | 1,351 | py | #!/usr/bin/python3
"""
Flask App that integrates with AirBnB static HTML Template
"""
from flask import Flask, render_template, url_for
from models import storage
import uuid
# flask setup
app = Flask(__name__)
app.url_map.strict_slashes = False
port = 5000
host = '0.0.0.0'
# begin flask page rendering
@app.teardown_appcontext
def teardown_db(exception):
"""
after each request, this method calls .close() (i.e. .remove()) on
the current SQLAlchemy Session
"""
storage.close()
@app.route('/2-hbnb/')
def hbnb_filters(the_id=None):
"""
handles request to custom template with states, cities & amentities
"""
state_objs = storage.all('State').values()
states = dict([state.name, state] for state in state_objs)
amens = storage.all('Amenity').values()
places = storage.all('Place').values()
users = dict([user.id, "{} {}".format(user.first_name, user.last_name)]
for user in storage.all('User').values())
cache_id = uuid.uuid4()
return render_template('2-hbnb.html',
states=states,
amens=amens,
places=places,
users=users,
cache_id=cache_id)
if __name__ == "__main__":
"""
MAIN Flask App"""
app.run(host=host, port=port)
| [
"[email protected]"
] | |
4c20e6b6769d1680490e49efd35daee18df732f1 | 010279e2ba272d09e9d2c4e903722e5faba2cf7a | /contrib/python/plotly/py2/plotly/graph_objs/sankey/__init__.py | 951083a2bed5804da50a572a9104aeb1dea14990 | [
"MIT",
"Apache-2.0"
] | permissive | catboost/catboost | 854c1a1f439a96f1ae6b48e16644be20aa04dba2 | f5042e35b945aded77b23470ead62d7eacefde92 | refs/heads/master | 2023-09-01T12:14:14.174108 | 2023-09-01T10:01:01 | 2023-09-01T10:22:12 | 97,556,265 | 8,012 | 1,425 | Apache-2.0 | 2023-09-11T03:32:32 | 2017-07-18T05:29:04 | Python | UTF-8 | Python | false | false | 95,548 | py | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "sankey"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Textfont object
Sets the font for node labels
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.sankey.Textfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.Textfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.sankey import textfont as v_textfont
# Initialize validators
# ---------------------
self._validators["color"] = v_textfont.ColorValidator()
self._validators["family"] = v_textfont.FamilyValidator()
self._validators["size"] = v_textfont.SizeValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "sankey"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.sankey.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.Stream`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.sankey import stream as v_stream
# Initialize validators
# ---------------------
self._validators["maxpoints"] = v_stream.MaxpointsValidator()
self._validators["token"] = v_stream.TokenValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
self["maxpoints"] = maxpoints if maxpoints is not None else _v
_v = arg.pop("token", None)
self["token"] = token if token is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Node(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
Sets the `node` color. It can be a single value, or an array
for specifying color for each `node`. If `node.color` is
omitted, then the default `Plotly` color palette will be cycled
through to have a variety of colors. These defaults are not
fully opaque, to allow some visibility of what is beneath the
node.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data to each node.
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for customdata
.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# groups
# ------
@property
def groups(self):
"""
Groups of nodes. Each group is defined by an array with the
indices of the nodes it contains. Multiple groups can be
specified.
The 'groups' property is an info array that may be specified as:
* a 2D list where:
The 'groups[i][j]' property is a number and may be specified as:
- An int or float
Returns
-------
list
"""
return self["groups"]
@groups.setter
def groups(self, val):
self["groups"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear when hovering nodes.
If `none` or `skip` are set, no information is displayed upon
hovering. But, if `none` is set, click and hover events are
still fired.
The 'hoverinfo' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'none', 'skip']
Returns
-------
Any
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.sankey.node.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
Returns
-------
plotly.graph_objs.sankey.node.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for details on
the date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
variables `value` and `label`. Anything contained in tag
`<extra>` is displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary box
completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
hovertemplate .
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
# label
# -----
@property
def label(self):
"""
The shown name of the node.
The 'label' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["label"]
@label.setter
def label(self, val):
self["label"] = val
# labelsrc
# --------
@property
def labelsrc(self):
"""
Sets the source reference on Chart Studio Cloud for label .
The 'labelsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["labelsrc"]
@labelsrc.setter
def labelsrc(self, val):
self["labelsrc"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.sankey.node.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the color of the `line` around each
`node`.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
width
Sets the width (in px) of the `line` around
each `node`.
widthsrc
Sets the source reference on Chart Studio Cloud
for width .
Returns
-------
plotly.graph_objs.sankey.node.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# pad
# ---
@property
def pad(self):
"""
Sets the padding (in px) between the `nodes`.
The 'pad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["pad"]
@pad.setter
def pad(self, val):
self["pad"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness (in px) of the `nodes`.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# x
# -
@property
def x(self):
"""
The normalized horizontal position of the node.
The 'x' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xsrc
# ----
@property
def xsrc(self):
"""
Sets the source reference on Chart Studio Cloud for x .
The 'xsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["xsrc"]
@xsrc.setter
def xsrc(self, val):
self["xsrc"] = val
# y
# -
@property
def y(self):
"""
The normalized vertical position of the node.
The 'y' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# ysrc
# ----
@property
def ysrc(self):
"""
Sets the source reference on Chart Studio Cloud for y .
The 'ysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ysrc"]
@ysrc.setter
def ysrc(self, val):
self["ysrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "sankey"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the `node` color. It can be a single value, or an
array for specifying color for each `node`. If
`node.color` is omitted, then the default `Plotly`
color palette will be cycled through to have a variety
of colors. These defaults are not fully opaque, to
allow some visibility of what is beneath the node.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
customdata
Assigns extra data to each node.
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
groups
Groups of nodes. Each group is defined by an array with
the indices of the nodes it contains. Multiple groups
can be specified.
hoverinfo
Determines which trace information appear when hovering
nodes. If `none` or `skip` are set, no information is
displayed upon hovering. But, if `none` is set, click
and hover events are still fired.
hoverlabel
:class:`plotly.graph_objects.sankey.node.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `value` and `label`. Anything
contained in tag `<extra>` is displayed in the
secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
label
The shown name of the node.
labelsrc
Sets the source reference on Chart Studio Cloud for
label .
line
:class:`plotly.graph_objects.sankey.node.Line` instance
or dict with compatible properties
pad
Sets the padding (in px) between the `nodes`.
thickness
Sets the thickness (in px) of the `nodes`.
x
The normalized horizontal position of the node.
xsrc
Sets the source reference on Chart Studio Cloud for x
.
y
The normalized vertical position of the node.
ysrc
Sets the source reference on Chart Studio Cloud for y
.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
customdata=None,
customdatasrc=None,
groups=None,
hoverinfo=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
label=None,
labelsrc=None,
line=None,
pad=None,
thickness=None,
x=None,
xsrc=None,
y=None,
ysrc=None,
**kwargs
):
"""
Construct a new Node object
The nodes of the Sankey plot.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.sankey.Node`
color
Sets the `node` color. It can be a single value, or an
array for specifying color for each `node`. If
`node.color` is omitted, then the default `Plotly`
color palette will be cycled through to have a variety
of colors. These defaults are not fully opaque, to
allow some visibility of what is beneath the node.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
customdata
Assigns extra data to each node.
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
groups
Groups of nodes. Each group is defined by an array with
the indices of the nodes it contains. Multiple groups
can be specified.
hoverinfo
Determines which trace information appear when hovering
nodes. If `none` or `skip` are set, no information is
displayed upon hovering. But, if `none` is set, click
and hover events are still fired.
hoverlabel
:class:`plotly.graph_objects.sankey.node.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `value` and `label`. Anything
contained in tag `<extra>` is displayed in the
secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
label
The shown name of the node.
labelsrc
Sets the source reference on Chart Studio Cloud for
label .
line
:class:`plotly.graph_objects.sankey.node.Line` instance
or dict with compatible properties
pad
Sets the padding (in px) between the `nodes`.
thickness
Sets the thickness (in px) of the `nodes`.
x
The normalized horizontal position of the node.
xsrc
Sets the source reference on Chart Studio Cloud for x
.
y
The normalized vertical position of the node.
ysrc
Sets the source reference on Chart Studio Cloud for y
.
Returns
-------
Node
"""
super(Node, self).__init__("node")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.Node
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.Node`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.sankey import node as v_node
# Initialize validators
# ---------------------
self._validators["color"] = v_node.ColorValidator()
self._validators["colorsrc"] = v_node.ColorsrcValidator()
self._validators["customdata"] = v_node.CustomdataValidator()
self._validators["customdatasrc"] = v_node.CustomdatasrcValidator()
self._validators["groups"] = v_node.GroupsValidator()
self._validators["hoverinfo"] = v_node.HoverinfoValidator()
self._validators["hoverlabel"] = v_node.HoverlabelValidator()
self._validators["hovertemplate"] = v_node.HovertemplateValidator()
self._validators["hovertemplatesrc"] = v_node.HovertemplatesrcValidator()
self._validators["label"] = v_node.LabelValidator()
self._validators["labelsrc"] = v_node.LabelsrcValidator()
self._validators["line"] = v_node.LineValidator()
self._validators["pad"] = v_node.PadValidator()
self._validators["thickness"] = v_node.ThicknessValidator()
self._validators["x"] = v_node.XValidator()
self._validators["xsrc"] = v_node.XsrcValidator()
self._validators["y"] = v_node.YValidator()
self._validators["ysrc"] = v_node.YsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("customdata", None)
self["customdata"] = customdata if customdata is not None else _v
_v = arg.pop("customdatasrc", None)
self["customdatasrc"] = customdatasrc if customdatasrc is not None else _v
_v = arg.pop("groups", None)
self["groups"] = groups if groups is not None else _v
_v = arg.pop("hoverinfo", None)
self["hoverinfo"] = hoverinfo if hoverinfo is not None else _v
_v = arg.pop("hoverlabel", None)
self["hoverlabel"] = hoverlabel if hoverlabel is not None else _v
_v = arg.pop("hovertemplate", None)
self["hovertemplate"] = hovertemplate if hovertemplate is not None else _v
_v = arg.pop("hovertemplatesrc", None)
self["hovertemplatesrc"] = (
hovertemplatesrc if hovertemplatesrc is not None else _v
)
_v = arg.pop("label", None)
self["label"] = label if label is not None else _v
_v = arg.pop("labelsrc", None)
self["labelsrc"] = labelsrc if labelsrc is not None else _v
_v = arg.pop("line", None)
self["line"] = line if line is not None else _v
_v = arg.pop("pad", None)
self["pad"] = pad if pad is not None else _v
_v = arg.pop("thickness", None)
self["thickness"] = thickness if thickness is not None else _v
_v = arg.pop("x", None)
self["x"] = x if x is not None else _v
_v = arg.pop("xsrc", None)
self["xsrc"] = xsrc if xsrc is not None else _v
_v = arg.pop("y", None)
self["y"] = y if y is not None else _v
_v = arg.pop("ysrc", None)
self["ysrc"] = ysrc if ysrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Link(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
Sets the `link` color. It can be a single value, or an array
for specifying color for each `link`. If `link.color` is
omitted, then by default, a translucent grey link will be used.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorscales
# -----------
@property
def colorscales(self):
"""
The 'colorscales' property is a tuple of instances of
Colorscale that may be specified as:
- A list or tuple of instances of plotly.graph_objs.sankey.link.Colorscale
- A list or tuple of dicts of string/value properties that
will be passed to the Colorscale constructor
Supported dict properties:
cmax
Sets the upper bound of the color domain.
cmin
Sets the lower bound of the color domain.
colorscale
Sets the colorscale. The colorscale must be an
array containing arrays mapping a normalized
value to an rgb, rgba, hex, hsl, hsv, or named
color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required.
For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`cmin` and
`cmax`. Alternatively, `colorscale` may be a
palette name string of the following list: Grey
s,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,
Picnic,Rainbow,Portland,Jet,Hot,Blackbody,Earth
,Electric,Viridis,Cividis.
label
The label of the links to color based on their
concentration within a flow.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
Returns
-------
tuple[plotly.graph_objs.sankey.link.Colorscale]
"""
return self["colorscales"]
@colorscales.setter
def colorscales(self, val):
self["colorscales"] = val
# colorscaledefaults
# ------------------
@property
def colorscaledefaults(self):
"""
When used in a template (as
layout.template.data.sankey.link.colorscaledefaults), sets the
default property values to use for elements of
sankey.link.colorscales
The 'colorscaledefaults' property is an instance of Colorscale
that may be specified as:
- An instance of :class:`plotly.graph_objs.sankey.link.Colorscale`
- A dict of string/value properties that will be passed
to the Colorscale constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.sankey.link.Colorscale
"""
return self["colorscaledefaults"]
@colorscaledefaults.setter
def colorscaledefaults(self, val):
self["colorscaledefaults"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data to each link.
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for customdata
.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear when hovering links.
If `none` or `skip` are set, no information is displayed upon
hovering. But, if `none` is set, click and hover events are
still fired.
The 'hoverinfo' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'none', 'skip']
Returns
-------
Any
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.sankey.link.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
Returns
-------
plotly.graph_objs.sankey.link.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for details on
the date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
variables `value` and `label`. Anything contained in tag
`<extra>` is displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary box
completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
hovertemplate .
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
# label
# -----
@property
def label(self):
"""
The shown name of the link.
The 'label' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["label"]
@label.setter
def label(self, val):
self["label"] = val
# labelsrc
# --------
@property
def labelsrc(self):
"""
Sets the source reference on Chart Studio Cloud for label .
The 'labelsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["labelsrc"]
@labelsrc.setter
def labelsrc(self, val):
self["labelsrc"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.sankey.link.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the color of the `line` around each
`link`.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
width
Sets the width (in px) of the `line` around
each `link`.
widthsrc
Sets the source reference on Chart Studio Cloud
for width .
Returns
-------
plotly.graph_objs.sankey.link.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# source
# ------
@property
def source(self):
"""
An integer number `[0..nodes.length - 1]` that represents the
source node.
The 'source' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["source"]
@source.setter
def source(self, val):
self["source"] = val
# sourcesrc
# ---------
@property
def sourcesrc(self):
"""
Sets the source reference on Chart Studio Cloud for source .
The 'sourcesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sourcesrc"]
@sourcesrc.setter
def sourcesrc(self, val):
self["sourcesrc"] = val
# target
# ------
@property
def target(self):
"""
An integer number `[0..nodes.length - 1]` that represents the
target node.
The 'target' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["target"]
@target.setter
def target(self, val):
self["target"] = val
# targetsrc
# ---------
@property
def targetsrc(self):
"""
Sets the source reference on Chart Studio Cloud for target .
The 'targetsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["targetsrc"]
@targetsrc.setter
def targetsrc(self, val):
self["targetsrc"] = val
# value
# -----
@property
def value(self):
"""
A numeric value representing the flow volume value.
The 'value' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# valuesrc
# --------
@property
def valuesrc(self):
"""
Sets the source reference on Chart Studio Cloud for value .
The 'valuesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["valuesrc"]
@valuesrc.setter
def valuesrc(self, val):
self["valuesrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "sankey"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the `link` color. It can be a single value, or an
array for specifying color for each `link`. If
`link.color` is omitted, then by default, a translucent
grey link will be used.
colorscales
A tuple of
:class:`plotly.graph_objects.sankey.link.Colorscale`
instances or dicts with compatible properties
colorscaledefaults
When used in a template (as
layout.template.data.sankey.link.colorscaledefaults),
sets the default property values to use for elements of
sankey.link.colorscales
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
customdata
Assigns extra data to each link.
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
hoverinfo
Determines which trace information appear when hovering
links. If `none` or `skip` are set, no information is
displayed upon hovering. But, if `none` is set, click
and hover events are still fired.
hoverlabel
:class:`plotly.graph_objects.sankey.link.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `value` and `label`. Anything
contained in tag `<extra>` is displayed in the
secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
label
The shown name of the link.
labelsrc
Sets the source reference on Chart Studio Cloud for
label .
line
:class:`plotly.graph_objects.sankey.link.Line` instance
or dict with compatible properties
source
An integer number `[0..nodes.length - 1]` that
represents the source node.
sourcesrc
Sets the source reference on Chart Studio Cloud for
source .
target
An integer number `[0..nodes.length - 1]` that
represents the target node.
targetsrc
Sets the source reference on Chart Studio Cloud for
target .
value
A numeric value representing the flow volume value.
valuesrc
Sets the source reference on Chart Studio Cloud for
value .
"""
def __init__(
self,
arg=None,
color=None,
colorscales=None,
colorscaledefaults=None,
colorsrc=None,
customdata=None,
customdatasrc=None,
hoverinfo=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
label=None,
labelsrc=None,
line=None,
source=None,
sourcesrc=None,
target=None,
targetsrc=None,
value=None,
valuesrc=None,
**kwargs
):
"""
Construct a new Link object
The links of the Sankey plot.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.sankey.Link`
color
Sets the `link` color. It can be a single value, or an
array for specifying color for each `link`. If
`link.color` is omitted, then by default, a translucent
grey link will be used.
colorscales
A tuple of
:class:`plotly.graph_objects.sankey.link.Colorscale`
instances or dicts with compatible properties
colorscaledefaults
When used in a template (as
layout.template.data.sankey.link.colorscaledefaults),
sets the default property values to use for elements of
sankey.link.colorscales
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
customdata
Assigns extra data to each link.
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
hoverinfo
Determines which trace information appear when hovering
links. If `none` or `skip` are set, no information is
displayed upon hovering. But, if `none` is set, click
and hover events are still fired.
hoverlabel
:class:`plotly.graph_objects.sankey.link.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `value` and `label`. Anything
contained in tag `<extra>` is displayed in the
secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
label
The shown name of the link.
labelsrc
Sets the source reference on Chart Studio Cloud for
label .
line
:class:`plotly.graph_objects.sankey.link.Line` instance
or dict with compatible properties
source
An integer number `[0..nodes.length - 1]` that
represents the source node.
sourcesrc
Sets the source reference on Chart Studio Cloud for
source .
target
An integer number `[0..nodes.length - 1]` that
represents the target node.
targetsrc
Sets the source reference on Chart Studio Cloud for
target .
value
A numeric value representing the flow volume value.
valuesrc
Sets the source reference on Chart Studio Cloud for
value .
Returns
-------
Link
"""
super(Link, self).__init__("link")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.Link
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.Link`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.sankey import link as v_link
# Initialize validators
# ---------------------
self._validators["color"] = v_link.ColorValidator()
self._validators["colorscales"] = v_link.ColorscalesValidator()
self._validators["colorscaledefaults"] = v_link.ColorscaleValidator()
self._validators["colorsrc"] = v_link.ColorsrcValidator()
self._validators["customdata"] = v_link.CustomdataValidator()
self._validators["customdatasrc"] = v_link.CustomdatasrcValidator()
self._validators["hoverinfo"] = v_link.HoverinfoValidator()
self._validators["hoverlabel"] = v_link.HoverlabelValidator()
self._validators["hovertemplate"] = v_link.HovertemplateValidator()
self._validators["hovertemplatesrc"] = v_link.HovertemplatesrcValidator()
self._validators["label"] = v_link.LabelValidator()
self._validators["labelsrc"] = v_link.LabelsrcValidator()
self._validators["line"] = v_link.LineValidator()
self._validators["source"] = v_link.SourceValidator()
self._validators["sourcesrc"] = v_link.SourcesrcValidator()
self._validators["target"] = v_link.TargetValidator()
self._validators["targetsrc"] = v_link.TargetsrcValidator()
self._validators["value"] = v_link.ValueValidator()
self._validators["valuesrc"] = v_link.ValuesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("colorscales", None)
self["colorscales"] = colorscales if colorscales is not None else _v
_v = arg.pop("colorscaledefaults", None)
self["colorscaledefaults"] = (
colorscaledefaults if colorscaledefaults is not None else _v
)
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("customdata", None)
self["customdata"] = customdata if customdata is not None else _v
_v = arg.pop("customdatasrc", None)
self["customdatasrc"] = customdatasrc if customdatasrc is not None else _v
_v = arg.pop("hoverinfo", None)
self["hoverinfo"] = hoverinfo if hoverinfo is not None else _v
_v = arg.pop("hoverlabel", None)
self["hoverlabel"] = hoverlabel if hoverlabel is not None else _v
_v = arg.pop("hovertemplate", None)
self["hovertemplate"] = hovertemplate if hovertemplate is not None else _v
_v = arg.pop("hovertemplatesrc", None)
self["hovertemplatesrc"] = (
hovertemplatesrc if hovertemplatesrc is not None else _v
)
_v = arg.pop("label", None)
self["label"] = label if label is not None else _v
_v = arg.pop("labelsrc", None)
self["labelsrc"] = labelsrc if labelsrc is not None else _v
_v = arg.pop("line", None)
self["line"] = line if line is not None else _v
_v = arg.pop("source", None)
self["source"] = source if source is not None else _v
_v = arg.pop("sourcesrc", None)
self["sourcesrc"] = sourcesrc if sourcesrc is not None else _v
_v = arg.pop("target", None)
self["target"] = target if target is not None else _v
_v = arg.pop("targetsrc", None)
self["targetsrc"] = targetsrc if targetsrc is not None else _v
_v = arg.pop("value", None)
self["value"] = value if value is not None else _v
_v = arg.pop("valuesrc", None)
self["valuesrc"] = valuesrc if valuesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for align .
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for bgcolor .
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.sankey.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
plotly.graph_objs.sankey.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for namelength
.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "sankey"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
namelength .
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.sankey.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
namelength .
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.Hoverlabel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.sankey import hoverlabel as v_hoverlabel
# Initialize validators
# ---------------------
self._validators["align"] = v_hoverlabel.AlignValidator()
self._validators["alignsrc"] = v_hoverlabel.AlignsrcValidator()
self._validators["bgcolor"] = v_hoverlabel.BgcolorValidator()
self._validators["bgcolorsrc"] = v_hoverlabel.BgcolorsrcValidator()
self._validators["bordercolor"] = v_hoverlabel.BordercolorValidator()
self._validators["bordercolorsrc"] = v_hoverlabel.BordercolorsrcValidator()
self._validators["font"] = v_hoverlabel.FontValidator()
self._validators["namelength"] = v_hoverlabel.NamelengthValidator()
self._validators["namelengthsrc"] = v_hoverlabel.NamelengthsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
self["align"] = align if align is not None else _v
_v = arg.pop("alignsrc", None)
self["alignsrc"] = alignsrc if alignsrc is not None else _v
_v = arg.pop("bgcolor", None)
self["bgcolor"] = bgcolor if bgcolor is not None else _v
_v = arg.pop("bgcolorsrc", None)
self["bgcolorsrc"] = bgcolorsrc if bgcolorsrc is not None else _v
_v = arg.pop("bordercolor", None)
self["bordercolor"] = bordercolor if bordercolor is not None else _v
_v = arg.pop("bordercolorsrc", None)
self["bordercolorsrc"] = bordercolorsrc if bordercolorsrc is not None else _v
_v = arg.pop("font", None)
self["font"] = font if font is not None else _v
_v = arg.pop("namelength", None)
self["namelength"] = namelength if namelength is not None else _v
_v = arg.pop("namelengthsrc", None)
self["namelengthsrc"] = namelengthsrc if namelengthsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Domain(_BaseTraceHierarchyType):
# column
# ------
@property
def column(self):
"""
If there is a layout grid, use the domain for this column in
the grid for this sankey trace .
The 'column' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["column"]
@column.setter
def column(self, val):
self["column"] = val
# row
# ---
@property
def row(self):
"""
If there is a layout grid, use the domain for this row in the
grid for this sankey trace .
The 'row' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["row"]
@row.setter
def row(self, val):
self["row"] = val
# x
# -
@property
def x(self):
"""
Sets the horizontal domain of this sankey trace (in plot
fraction).
The 'x' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'x[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'x[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# y
# -
@property
def y(self):
"""
Sets the vertical domain of this sankey trace (in plot
fraction).
The 'y' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'y[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'y[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "sankey"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
column
If there is a layout grid, use the domain for this
column in the grid for this sankey trace .
row
If there is a layout grid, use the domain for this row
in the grid for this sankey trace .
x
Sets the horizontal domain of this sankey trace (in
plot fraction).
y
Sets the vertical domain of this sankey trace (in plot
fraction).
"""
def __init__(self, arg=None, column=None, row=None, x=None, y=None, **kwargs):
"""
Construct a new Domain object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.sankey.Domain`
column
If there is a layout grid, use the domain for this
column in the grid for this sankey trace .
row
If there is a layout grid, use the domain for this row
in the grid for this sankey trace .
x
Sets the horizontal domain of this sankey trace (in
plot fraction).
y
Sets the vertical domain of this sankey trace (in plot
fraction).
Returns
-------
Domain
"""
super(Domain, self).__init__("domain")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.Domain
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.Domain`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.sankey import domain as v_domain
# Initialize validators
# ---------------------
self._validators["column"] = v_domain.ColumnValidator()
self._validators["row"] = v_domain.RowValidator()
self._validators["x"] = v_domain.XValidator()
self._validators["y"] = v_domain.YValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("column", None)
self["column"] = column if column is not None else _v
_v = arg.pop("row", None)
self["row"] = row if row is not None else _v
_v = arg.pop("x", None)
self["x"] = x if x is not None else _v
_v = arg.pop("y", None)
self["y"] = y if y is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = [
"Domain",
"Hoverlabel",
"Link",
"Node",
"Stream",
"Textfont",
"hoverlabel",
"link",
"node",
]
from plotly.graph_objs.sankey import node
from plotly.graph_objs.sankey import link
from plotly.graph_objs.sankey import hoverlabel
| [
"[email protected]"
] | |
85723fbe5a2fd93ee074528b9234f24cb86ed9e2 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/6/n7e.py | 33f3429b1e2c4c1ad76ee80ffc4f1c7f76064da3 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'n7E':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
e99b6fff29c79ce050c083f47a26f60657b1e4de | 55a947cddcac5188c557e175aec98df19485f623 | /tests/integration/test_customer.py | 811fcaa2b0bc6edf8f9dbe8c3288e900ea02b3db | [
"MIT"
] | permissive | pfrantz/braintree_python | d02c1691049df68d87f7738e53d489682db94a7e | 055e7400dd70a79ec18e5a30476dc77827bc465d | refs/heads/master | 2021-01-16T20:07:31.664653 | 2013-10-30T00:19:16 | 2013-10-30T00:19:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,730 | py | from tests.test_helper import *
import braintree.test.venmo_sdk as venmo_sdk
class TestCustomer(unittest.TestCase):
def test_all(self):
collection = Customer.all()
self.assertTrue(collection.maximum_size > 100)
customer_ids = [c.id for c in collection.items]
self.assertEquals(collection.maximum_size, len(TestHelper.unique(customer_ids)))
self.assertEquals(Customer, type(collection.first))
def test_create(self):
result = Customer.create({
"first_name": "Bill",
"last_name": "Gates",
"company": "Microsoft",
"email": "[email protected]",
"phone": "312.555.1234",
"fax": "614.555.5678",
"website": "www.microsoft.com"
})
self.assertTrue(result.is_success)
customer = result.customer
self.assertEqual("Bill", customer.first_name)
self.assertEqual("Gates", customer.last_name)
self.assertEqual("Microsoft", customer.company)
self.assertEqual("[email protected]", customer.email)
self.assertEqual("312.555.1234", customer.phone)
self.assertEqual("614.555.5678", customer.fax)
self.assertEqual("www.microsoft.com", customer.website)
self.assertNotEqual(None, customer.id)
self.assertNotEqual(None, re.search("\A\d{6,7}\Z", customer.id))
def test_create_with_device_session_id(self):
result = Customer.create({
"first_name": "Bill",
"last_name": "Gates",
"company": "Microsoft",
"email": "[email protected]",
"phone": "312.555.1234",
"fax": "614.555.5678",
"website": "www.microsoft.com",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2010",
"cvv": "100",
"device_session_id": "abc123"
}
})
self.assertTrue(result.is_success)
def test_create_with_unicode(self):
result = Customer.create({
"first_name": u"Bill<&>",
"last_name": u"G\u1F00t\u1F18s",
"company": "Microsoft",
"email": "[email protected]",
"phone": "312.555.1234",
"fax": "614.555.5678",
"website": "www.microsoft.com"
})
self.assertTrue(result.is_success)
customer = result.customer
self.assertEqual(u"Bill<&>", customer.first_name)
self.assertEqual(u"G\u1f00t\u1F18s", customer.last_name)
self.assertEqual("Microsoft", customer.company)
self.assertEqual("[email protected]", customer.email)
self.assertEqual("312.555.1234", customer.phone)
self.assertEqual("614.555.5678", customer.fax)
self.assertEqual("www.microsoft.com", customer.website)
self.assertNotEqual(None, customer.id)
self.assertNotEqual(None, re.search("\A\d{6,7}\Z", customer.id))
found_customer = Customer.find(customer.id)
self.assertEqual(u"G\u1f00t\u1F18s", found_customer.last_name)
def test_create_with_no_attributes(self):
result = Customer.create()
self.assertTrue(result.is_success)
self.assertNotEqual(None, result.customer.id)
def test_create_with_special_chars(self):
result = Customer.create({"first_name": "XML Chars <>&'\""})
self.assertTrue(result.is_success)
self.assertEqual("XML Chars <>&'\"", result.customer.first_name)
def test_create_returns_an_error_response_if_invalid(self):
result = Customer.create({
"email": "@invalid.com",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2010",
"billing_address": {
"country_code_alpha2": "MX",
"country_code_alpha3": "USA"
}
}
})
self.assertFalse(result.is_success)
self.assertEquals(2, result.errors.size)
self.assertEquals(ErrorCodes.Customer.EmailIsInvalid, result.errors.for_object("customer").on("email")[0].code)
self.assertEquals(
ErrorCodes.Address.InconsistentCountry,
result.errors.for_object("customer").for_object("credit_card").for_object("billing_address").on("base")[0].code
)
def test_create_customer_and_payment_method_at_the_same_time(self):
result = Customer.create({
"first_name": "Mike",
"last_name": "Jones",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2010",
"cvv": "100"
}
})
self.assertTrue(result.is_success)
customer = result.customer
self.assertEqual("Mike", customer.first_name)
self.assertEqual("Jones", customer.last_name)
credit_card = customer.credit_cards[0]
self.assertEqual("411111", credit_card.bin)
self.assertEqual("1111", credit_card.last_4)
self.assertEqual("05/2010", credit_card.expiration_date)
def test_create_customer_and_verify_payment_method(self):
result = Customer.create({
"first_name": "Mike",
"last_name": "Jones",
"credit_card": {
"number": "4000111111111115",
"expiration_date": "05/2010",
"cvv": "100",
"options": {"verify_card": True}
}
})
self.assertFalse(result.is_success)
self.assertEquals(CreditCardVerification.Status.ProcessorDeclined, result.credit_card_verification.status)
def test_create_customer_with_check_duplicate_payment_method(self):
attributes = {
"first_name": "Mike",
"last_name": "Jones",
"credit_card": {
"number": "4000111111111115",
"expiration_date": "05/2010",
"cvv": "100",
"options": {"fail_on_duplicate_payment_method": True}
}
}
Customer.create(attributes)
result = Customer.create(attributes)
self.assertFalse(result.is_success)
self.assertEquals(ErrorCodes.CreditCard.DuplicateCardExists, result.errors.for_object("customer").for_object("credit_card").on("number")[0].code)
self.assertEquals("Duplicate card exists in the vault.", result.message)
def test_create_customer_with_payment_method_and_billing_address(self):
result = Customer.create({
"first_name": "Mike",
"last_name": "Jones",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2010",
"cvv": "100",
"billing_address": {
"street_address": "123 Abc Way",
"locality": "Chicago",
"region": "Illinois",
"postal_code": "60622",
"country_code_alpha2": "US",
"country_code_alpha3": "USA",
"country_code_numeric": "840",
"country_name": "United States of America"
}
}
})
self.assertTrue(result.is_success)
customer = result.customer
self.assertEqual("Mike", customer.first_name)
self.assertEqual("Jones", customer.last_name)
address = customer.credit_cards[0].billing_address
self.assertEqual("123 Abc Way", address.street_address)
self.assertEqual("Chicago", address.locality)
self.assertEqual("Illinois", address.region)
self.assertEqual("60622", address.postal_code)
self.assertEqual("US", address.country_code_alpha2)
self.assertEqual("USA", address.country_code_alpha3)
self.assertEqual("840", address.country_code_numeric)
self.assertEqual("United States of America", address.country_name)
def test_create_with_customer_fields(self):
result = Customer.create({
"first_name": "Mike",
"last_name": "Jones",
"custom_fields": {
"store_me": "custom value"
}
})
self.assertTrue(result.is_success)
self.assertEquals("custom value", result.customer.custom_fields["store_me"])
def test_create_returns_nested_errors(self):
result = Customer.create({
"email": "invalid",
"credit_card": {
"number": "invalid",
"billing_address": {
"country_name": "invalid"
}
}
})
self.assertFalse(result.is_success)
self.assertEquals(
ErrorCodes.Customer.EmailIsInvalid,
result.errors.for_object("customer").on("email")[0].code
)
self.assertEquals(
ErrorCodes.CreditCard.NumberHasInvalidLength,
result.errors.for_object("customer").for_object("credit_card").on("number")[0].code
)
self.assertEquals(
ErrorCodes.Address.CountryNameIsNotAccepted,
result.errors.for_object("customer").for_object("credit_card").for_object("billing_address").on("country_name")[0].code
)
def test_create_returns_errors_if_custom_fields_are_not_registered(self):
result = Customer.create({
"first_name": "Jack",
"last_name": "Kennedy",
"custom_fields": {
"spouse_name": "Jacqueline"
}
})
self.assertFalse(result.is_success)
self.assertEquals(ErrorCodes.Customer.CustomFieldIsInvalid, result.errors.for_object("customer").on("custom_fields")[0].code)
def test_create_with_venmo_sdk_session(self):
result = Customer.create({
"first_name": "Jack",
"last_name": "Kennedy",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2010",
"options": {
"venmo_sdk_session": venmo_sdk.Session
}
}
})
self.assertTrue(result.is_success)
self.assertTrue(result.customer.credit_cards[0].venmo_sdk)
def test_create_with_venmo_sdk_payment_method_code(self):
result = Customer.create({
"first_name": "Jack",
"last_name": "Kennedy",
"credit_card": {
"venmo_sdk_payment_method_code": venmo_sdk.generate_test_payment_method_code("4111111111111111")
}
})
self.assertTrue(result.is_success)
self.assertEquals("411111", result.customer.credit_cards[0].bin)
def test_delete_with_valid_customer(self):
customer = Customer.create().customer
result = Customer.delete(customer.id)
self.assertTrue(result.is_success)
@raises(NotFoundError)
def test_delete_with_invalid_customer(self):
customer = Customer.create().customer
Customer.delete(customer.id)
Customer.delete(customer.id)
def test_find_with_valid_customer(self):
customer = Customer.create({
"first_name": "Joe",
"last_name": "Cool"
}).customer
found_customer = Customer.find(customer.id)
self.assertEquals(customer.id, found_customer.id)
self.assertEquals(customer.first_name, found_customer.first_name)
self.assertEquals(customer.last_name, found_customer.last_name)
def test_find_with_invalid_customer(self):
try:
Customer.find("badid")
self.assertTrue(False)
except NotFoundError, e:
self.assertEquals("customer with id badid not found", str(e))
def test_update_with_valid_options(self):
customer = Customer.create({
"first_name": "Steve",
"last_name": "Jobs",
"company": "Apple",
"email": "[email protected]",
"phone": "312.555.5555",
"fax": "614.555.5555",
"website": "www.apple.com"
}).customer
result = Customer.update(customer.id, {
"first_name": "Bill",
"last_name": "Gates",
"company": "Microsoft",
"email": "[email protected]",
"phone": "312.555.1234",
"fax": "614.555.5678",
"website": "www.microsoft.com"
})
self.assertTrue(result.is_success)
customer = result.customer
self.assertEqual("Bill", customer.first_name)
self.assertEqual("Gates", customer.last_name)
self.assertEqual("Microsoft", customer.company)
self.assertEqual("[email protected]", customer.email)
self.assertEqual("312.555.1234", customer.phone)
self.assertEqual("614.555.5678", customer.fax)
self.assertEqual("www.microsoft.com", customer.website)
self.assertNotEqual(None, customer.id)
self.assertNotEqual(None, re.search("\A\d{6,7}\Z", customer.id))
def test_update_with_nested_values(self):
customer = Customer.create({
"first_name": "Steve",
"last_name": "Jobs",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "10/10",
"billing_address": {
"postal_code": "11111"
}
}
}).customer
credit_card = customer.credit_cards[0]
address = credit_card.billing_address
updated_customer = Customer.update(customer.id, {
"first_name": "Bill",
"last_name": "Gates",
"credit_card": {
"expiration_date": "12/12",
"options": {
"update_existing_token": credit_card.token
},
"billing_address": {
"postal_code": "44444",
"country_code_alpha2": "US",
"country_code_alpha3": "USA",
"country_code_numeric": "840",
"country_name": "United States of America",
"options": {
"update_existing": True
}
}
}
}).customer
updated_credit_card = CreditCard.find(credit_card.token)
updated_address = Address.find(customer.id, address.id)
self.assertEqual("Bill", updated_customer.first_name)
self.assertEqual("Gates", updated_customer.last_name)
self.assertEqual("12/2012", updated_credit_card.expiration_date)
self.assertEqual("44444", updated_address.postal_code)
self.assertEqual("US", updated_address.country_code_alpha2)
self.assertEqual("USA", updated_address.country_code_alpha3)
self.assertEqual("840", updated_address.country_code_numeric)
self.assertEqual("United States of America", updated_address.country_name)
def test_update_with_nested_billing_address_id(self):
customer = Customer.create().customer
address = Address.create({
"customer_id": customer.id,
"postal_code": "11111"
}).address
updated_customer = Customer.update(customer.id, {
"credit_card": {
"number": "4111111111111111",
"expiration_date": "12/12",
"billing_address_id": address.id
}
}).customer
credit_card = updated_customer.credit_cards[0]
self.assertEqual(address.id, credit_card.billing_address.id)
self.assertEqual("11111", credit_card.billing_address.postal_code)
def test_update_with_invalid_options(self):
customer = Customer.create({
"first_name": "Steve",
"last_name": "Jobs",
"company": "Apple",
"email": "[email protected]",
"phone": "312.555.5555",
"fax": "614.555.5555",
"website": "www.apple.com"
}).customer
result = Customer.update(customer.id, {
"email": "@microsoft.com",
})
self.assertFalse(result.is_success)
self.assertEquals(
ErrorCodes.Customer.EmailIsInvalid,
result.errors.for_object("customer").on("email")[0].code
)
def test_create_from_transparent_redirect_with_successful_result(self):
tr_data = {
"customer": {
"first_name": "John",
"last_name": "Doe",
"company": "Doe Co",
}
}
post_params = {
"tr_data": Customer.tr_data_for_create(tr_data, "http://example.com/path"),
"customer[email]": "[email protected]",
"customer[phone]": "312.555.2323",
"customer[fax]": "614.555.5656",
"customer[website]": "www.johndoe.com",
"customer[credit_card][number]": "4111111111111111",
"customer[credit_card][expiration_date]": "05/2012",
"customer[credit_card][billing_address][country_code_alpha2]": "MX",
"customer[credit_card][billing_address][country_code_alpha3]": "MEX",
"customer[credit_card][billing_address][country_code_numeric]": "484",
"customer[credit_card][billing_address][country_name]": "Mexico",
}
query_string = TestHelper.simulate_tr_form_post(post_params, Customer.transparent_redirect_create_url())
result = Customer.confirm_transparent_redirect(query_string)
self.assertTrue(result.is_success)
customer = result.customer
self.assertEquals("John", customer.first_name)
self.assertEquals("Doe", customer.last_name)
self.assertEquals("Doe Co", customer.company)
self.assertEquals("[email protected]", customer.email)
self.assertEquals("312.555.2323", customer.phone)
self.assertEquals("614.555.5656", customer.fax)
self.assertEquals("www.johndoe.com", customer.website)
self.assertEquals("05/2012", customer.credit_cards[0].expiration_date)
self.assertEquals("MX", customer.credit_cards[0].billing_address.country_code_alpha2)
self.assertEquals("MEX", customer.credit_cards[0].billing_address.country_code_alpha3)
self.assertEquals("484", customer.credit_cards[0].billing_address.country_code_numeric)
self.assertEquals("Mexico", customer.credit_cards[0].billing_address.country_name)
def test_create_from_transparent_redirect_with_error_result(self):
tr_data = {
"customer": {
"company": "Doe Co",
}
}
post_params = {
"tr_data": Customer.tr_data_for_create(tr_data, "http://example.com/path"),
"customer[email]": "john#doe.com",
}
query_string = TestHelper.simulate_tr_form_post(post_params, Customer.transparent_redirect_create_url())
result = Customer.confirm_transparent_redirect(query_string)
self.assertFalse(result.is_success)
self.assertEquals(ErrorCodes.Customer.EmailIsInvalid, result.errors.for_object("customer").on("email")[0].code)
def test_update_from_transparent_redirect_with_successful_result(self):
customer = Customer.create({
"first_name": "Jane",
}).customer
tr_data = {
"customer_id": customer.id,
"customer": {
"first_name": "John",
}
}
post_params = {
"tr_data": Customer.tr_data_for_update(tr_data, "http://example.com/path"),
"customer[email]": "[email protected]",
}
query_string = TestHelper.simulate_tr_form_post(post_params, Customer.transparent_redirect_update_url())
result = Customer.confirm_transparent_redirect(query_string)
self.assertTrue(result.is_success)
customer = result.customer
self.assertEquals("John", customer.first_name)
self.assertEquals("[email protected]", customer.email)
def test_update_with_nested_values_via_transparent_redirect(self):
customer = Customer.create({
"first_name": "Steve",
"last_name": "Jobs",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "10/10",
"billing_address": {
"postal_code": "11111"
}
}
}).customer
credit_card = customer.credit_cards[0]
address = credit_card.billing_address
tr_data = {
"customer_id": customer.id,
"customer": {
"first_name": "Bill",
"last_name": "Gates",
"credit_card": {
"expiration_date": "12/12",
"options": {
"update_existing_token": credit_card.token
},
"billing_address": {
"postal_code": "44444",
"options": {
"update_existing": True
}
}
}
}
}
post_params = {
"tr_data": Customer.tr_data_for_update(tr_data, "http://example.com/path"),
}
query_string = TestHelper.simulate_tr_form_post(post_params, Customer.transparent_redirect_update_url())
updated_customer = Customer.confirm_transparent_redirect(query_string).customer
updated_credit_card = CreditCard.find(credit_card.token)
updated_address = Address.find(customer.id, address.id)
self.assertEqual("Bill", updated_customer.first_name)
self.assertEqual("Gates", updated_customer.last_name)
self.assertEqual("12/2012", updated_credit_card.expiration_date)
self.assertEqual("44444", updated_address.postal_code)
def test_update_from_transparent_redirect_with_error_result(self):
customer = Customer.create({
"first_name": "Jane",
}).customer
tr_data = {
"customer_id": customer.id,
"customer": {
"first_name": "John",
}
}
post_params = {
"tr_data": Customer.tr_data_for_update(tr_data, "http://example.com/path"),
"customer[email]": "john#doe.com",
}
query_string = TestHelper.simulate_tr_form_post(post_params, Customer.transparent_redirect_update_url())
result = Customer.confirm_transparent_redirect(query_string)
self.assertFalse(result.is_success)
self.assertEquals(ErrorCodes.Customer.EmailIsInvalid, result.errors.for_object("customer").on("email")[0].code)
| [
"[email protected]"
] | |
d891da04d501abe4b1f6da6ca84babc9ccac723d | d7fb8eacd8a1aae8fe6eb49111f93090b7e87ce0 | /backend/tstcr2020102701_dev_14091/settings.py | e74bd1f3e5a9f57d885d8b38f60ca2550b592ad3 | [] | no_license | crowdbotics-apps/tstcr2020102701-dev-14091 | 4d5bcfc2b0aa29e67cebcd8948258b75e8ad9c6b | cc6ba4999444c7e93943f76af75c2506048bf2b6 | refs/heads/master | 2023-01-03T05:09:02.457778 | 2020-10-28T21:59:17 | 2020-10-28T21:59:17 | 307,772,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,125 | py | """
Django settings for tstcr2020102701_dev_14091 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tstcr2020102701_dev_14091.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tstcr2020102701_dev_14091.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"[email protected]"
] | |
70b49f18abf43bf5e67f80ab9e45eba1399e8cd0 | e8274f167fd219ef78241ba8ea89e5d5875ed794 | /cloud/quantum/quantum/openstack/common/lockutils.py | 9f4eddf57c95bd92e8539b8e2c97039dc97bb433 | [
"Apache-2.0"
] | permissive | virt2x/folsomCloud | 02db0147f7e0f2ab0375faf4f36ca08272084152 | e6fd612dd77f35a72739cf4d4750e9795c0fa508 | refs/heads/master | 2021-01-01T17:26:28.405651 | 2013-10-17T12:36:04 | 2013-10-17T12:36:04 | 13,647,787 | 0 | 1 | null | 2020-07-24T08:25:22 | 2013-10-17T12:10:24 | Python | UTF-8 | Python | false | false | 8,455 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import functools
import os
import shutil
import tempfile
import time
import weakref
from eventlet import semaphore
from quantum.openstack.common import cfg
from quantum.openstack.common import fileutils
from quantum.openstack.common import log as logging
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
'../')),
help='Directory to use for lock files')
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def __enter__(self):
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
return self
except IOError, e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.unlock()
self.lockfile.close()
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
def trylock(self):
msvcrt.locking(self.lockfile, msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile, msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
def synchronized(name, lock_file_prefix, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the bar method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix. The prefix should end with a hyphen ('-') if specified.
The external keyword argument denotes whether this lock should work across
multiple processes. This means that if two different workers both run a
a method decorated with @synchronized('mylock', external=True), only one
of them will execute at a time.
The lock_path keyword argument is used to specify a special location for
external lock files to live. If nothing is set, then CONF.lock_path is
used as a default.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
# NOTE(soren): If we ever go natively threaded, this will be racy.
# See http://stackoverflow.com/questions/5390569/dyn
# amically-allocating-and-destroying-mutexes
sem = _semaphores.get(name, semaphore.Semaphore())
if name not in _semaphores:
# this check is not racy - we're already holding ref locally
# so GC won't remove the item and there was no IO switch
# (only valid in greenthreads)
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s" for method '
'"%(method)s"...'), {'lock': name,
'method': f.__name__})
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
'method "%(method)s"...'),
{'lock': name, 'method': f.__name__})
cleanup_dir = False
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path
if not local_lock_path:
local_lock_path = CONF.lock_path
if not local_lock_path:
cleanup_dir = True
local_lock_path = tempfile.mkdtemp()
if not os.path.exists(local_lock_path):
cleanup_dir = True
fileutils.ensure_tree(local_lock_path)
# NOTE(mikal): the lock name cannot contain directory
# separators
safe_name = name.replace(os.sep, '_')
lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
lock_file_path = os.path.join(local_lock_path,
lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock:
LOG.debug(_('Got file lock "%(lock)s" at %(path)s '
'for method "%(method)s"...'),
{'lock': name,
'path': lock_file_path,
'method': f.__name__})
retval = f(*args, **kwargs)
finally:
# NOTE(vish): This removes the tempdir if we needed
# to create one. This is used to cleanup
# the locks left behind by unit tests.
if cleanup_dir:
shutil.rmtree(local_lock_path)
else:
retval = f(*args, **kwargs)
return retval
return inner
return wrap
| [
"[email protected]"
] | |
96e74a51787d9206d2e4ddd5c9531473c08384c5 | 593dff0c5746603268417a702a00cd3355f47f3a | /hq_extracter.py | 0637fa456196047256f299fb62689b0330057cc7 | [] | no_license | vc2309/Blue-sky-tools | e508f2cb0fd240a95b812ed53f2ac6ed3ea1cd64 | 055b06cc9865808f3d0665dc9c95aba6b401fe69 | refs/heads/master | 2021-09-10T18:55:52.616954 | 2018-03-31T07:14:01 | 2018-03-31T07:14:01 | 104,192,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | import pandas as pd
ifiles=['sjs_all_feb_report_m.csv']
floors=['HQ - G/F'
,'HQ - 2S'
,'HQ - 2N'
,'HQ - 3S'
,'HQ - 3N'
,'HQ - 4N'
,'HQ - 5S'
,'HQ - 5N'
,'HQ - 6S'
,'HQ - 6N'
,'HQ - 7S'
,'HQ - 7N'
,'HQ - 8S'
,'HQ - 8N'
,'HQ - 9S'
,'HQ - 9N'
,'HQ - AC'
,'HQ - 11'
,'HQ - 12'
,'HQ - 13'
,'HQ - Lift'
,'HQ - 10']
def extract_hq(file):
print("here")
df=pd.read_csv(file)
hq_df=pd.DataFrame()
floor=[]
for f in floors:
floor.append(df[df['location']==f])
hq_df=pd.concat(floor)
print(hq_df.head())
hq_df.to_csv('hq_jan.csv')
def main():
for file in ifiles:
extract_hq(file)
if __name__=='__main__' :
print("ok")
main() | [
"[email protected]"
] | |
1c90deae299ed6a990528539c555580748edee2a | bc441bb06b8948288f110af63feda4e798f30225 | /tuna_service_sdk/model/pipeline/build_pb2.pyi | b2d4c34548e7bc31341d04a0ced2cc56bb0cfe4a | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,876 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedScalarFieldContainer as google___protobuf___internal___containers___RepeatedScalarFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from tuna_service_sdk.model.pipeline.build_status_pb2 import (
BuildStatus as tuna_service_sdk___model___pipeline___build_status_pb2___BuildStatus,
)
from tuna_service_sdk.model.pipeline.git_meta_pb2 import (
GitMeta as tuna_service_sdk___model___pipeline___git_meta_pb2___GitMeta,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class Build(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class Artifact(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
packageName = ... # type: typing___Text
versionName = ... # type: typing___Text
ctime = ... # type: typing___Text
packageId = ... # type: typing___Text
versionId = ... # type: typing___Text
def __init__(self,
*,
packageName : typing___Optional[typing___Text] = None,
versionName : typing___Optional[typing___Text] = None,
ctime : typing___Optional[typing___Text] = None,
packageId : typing___Optional[typing___Text] = None,
versionId : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Build.Artifact: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Build.Artifact: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"ctime",b"ctime",u"packageId",b"packageId",u"packageName",b"packageName",u"versionId",b"versionId",u"versionName",b"versionName"]) -> None: ...
id = ... # type: typing___Text
sender = ... # type: typing___Text
created = ... # type: builtin___int
yaml_string = ... # type: typing___Text
number = ... # type: typing___Text
events = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text]
@property
def git_meta(self) -> tuna_service_sdk___model___pipeline___git_meta_pb2___GitMeta: ...
@property
def artifact(self) -> Build.Artifact: ...
@property
def status(self) -> tuna_service_sdk___model___pipeline___build_status_pb2___BuildStatus: ...
def __init__(self,
*,
id : typing___Optional[typing___Text] = None,
git_meta : typing___Optional[tuna_service_sdk___model___pipeline___git_meta_pb2___GitMeta] = None,
sender : typing___Optional[typing___Text] = None,
artifact : typing___Optional[Build.Artifact] = None,
created : typing___Optional[builtin___int] = None,
yaml_string : typing___Optional[typing___Text] = None,
status : typing___Optional[tuna_service_sdk___model___pipeline___build_status_pb2___BuildStatus] = None,
number : typing___Optional[typing___Text] = None,
events : typing___Optional[typing___Iterable[typing___Text]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Build: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Build: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"artifact",b"artifact",u"git_meta",b"git_meta",u"status",b"status"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"artifact",b"artifact",u"created",b"created",u"events",b"events",u"git_meta",b"git_meta",u"id",b"id",u"number",b"number",u"sender",b"sender",u"status",b"status",u"yaml_string",b"yaml_string"]) -> None: ...
| [
"[email protected]"
] | |
fc62026ad385c261dc340d5914e1490389de7b69 | 16abd82b9523f0fc7ae6df0aac11fd03e2e3d9f3 | /boards/tests/test_views.py | c6631a2dcbefbde8dc9659cd11ccf5750f89b5e0 | [] | no_license | msm3858/projektforum | cf5255a5781f3536db56cf1b680557ca876f8221 | c6a0abda9f147d3578e430012780bda3eb4f20b5 | refs/heads/master | 2021-09-10T10:03:32.962523 | 2018-03-24T06:26:18 | 2018-03-24T06:26:18 | 124,791,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,560 | py | from django.test import TestCase
from django.urls import reverse, resolve
from ..views import home, board_topics, new_topic
from ..models import Board, Topic, Post, User
from ..forms import NewTopicForm
# Create your tests here.
#########################
# TEST HOME
#########################
class HomeTests(TestCase):
def setUp(self):
self.board = Board.objects.create(
name='Django', description='Django board.')
url = reverse('boards:home')
self.response = self.client.get(url)
def test_home_view_status_code(self):
self.assertEquals(self.response.status_code, 200)
def test_home_url_resolves_home_view(self):
view = resolve('/')
self.assertEquals(view.func, home)
def test_home_view_contains_link_to_topics_page(self):
board_topics_url = reverse(
'boards:board_topics', kwargs={'pk': self.board.pk})
self.assertContains(
self.response, 'href="{0}"'.format(board_topics_url))
#########################
# TEST BOARD
#########################
class BoardTopicsTests(TestCase):
def setUp(self):
Board.objects.create(
name='Django', description='Django board.')
def test_board_topics_view_success_status_code(self):
url = reverse('boards:board_topics', kwargs={'pk': 1})
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
def test_board_topics_view_not_found_status_code(self):
url = reverse('boards:board_topics', kwargs={'pk': 99})
response = self.client.get(url)
self.assertEquals(response.status_code, 404)
def test_board_topics_url_resolves_board_topics_view(self):
view = resolve('/boards/1/')
self.assertEquals(view.func, board_topics)
def test_board_topics_view_contains_link_back_to_homepage(self):
board_topics_url = reverse('boards:board_topics', kwargs={'pk': 1})
response = self.client.get(board_topics_url)
homepage_url = reverse('boards:home')
self.assertContains(response, 'href="{0}"'.format(homepage_url))
def test_board_topics_view_contains_navigation_links(self):
board_topics_url = reverse('boards:board_topics', kwargs={'pk': 1})
homepage_url = reverse('boards:home')
new_topic_url = reverse('boards:new_topic', kwargs={'pk': 1})
response = self.client.get(board_topics_url)
self.assertContains(response, 'href="{0}"'.format(homepage_url))
self.assertContains(response, 'href="{0}"'.format(new_topic_url))
#########################
# TEST NEW TOPIC
#########################
class NewTopicTests(TestCase):
def setUp(self):
Board.objects.create(name='Django', description='Django board.')
User.objects.create_user(
username='marcin', email='[email protected]', password='123')
def test_new_topic_view_success_status_code(self):
url = reverse('boards:new_topic', kwargs={'pk': 1})
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
def test_new_topic_view_not_fount_status_code(self):
url = reverse('boards:new_topic', kwargs={'pk': 99})
response = self.client.get(url)
self.assertEquals(response.status_code, 404)
def test_new_topic_view_reselves_board_topics_view(self):
view = resolve('/boards/1/new/')
self.assertEquals(view.func, new_topic)
def test_new_topic_view_contains_link_back_to_board_topics_view(self):
new_topic_url = reverse('boards:new_topic', kwargs={'pk': 1})
board_topics_url = reverse('boards:board_topics', kwargs={'pk': 1})
response = self.client.get(new_topic_url)
self.assertContains(response, 'href="{0}"'.format(board_topics_url))
def test_csrf(self):
url = reverse('boards:new_topic', kwargs={'pk': 1})
response = self.client.get(url)
self.assertContains(response, 'csrfmiddlewaretoken')
def test_new_topic_valid_post_data(self):
url = reverse('boards:new_topic', kwargs={'pk': 1})
data = {
'subject': 'Test title',
'message': 'Lorem ipsum dolor sit amet'
}
response = self.client.post(url, data)
self.assertTrue(Topic.objects.exists())
self.assertTrue(Post.objects.exists())
def test_new_topic_invalid_post_data(self):
'''
Invalid post data should not redirect
The expected behaviour is to show the form again with validation errors
'''
url = reverse('boards:new_topic', kwargs={'pk': 1})
response = self.client.post(url, {})
form = response.context.get('form')
self.assertEquals(response.status_code, 200)
self.assertTrue(form.errors)
def test_new_topic_invalid_post_data_empty_fields(self):
'''
Invalid post data should not redirect
The expected behaviour is to show the form again with validation errors
'''
url = reverse('boards:new_topic', kwargs={'pk': 1})
data = {
'subject': '',
'message': ''
}
response = self.client.post(url, data)
self.assertEquals(response.status_code, 200)
self.assertFalse(Topic.objects.exists())
self.assertFalse(Post.objects.exists())
def test_contains_form(self):
url = reverse('boards:new_topic', kwargs={'pk': 1})
response = self.client.get(url)
form = response.context.get('form')
self.assertIsInstance(form, NewTopicForm)
| [
"="
] | = |
87416760e8d527e89eda7274e938fa35d0f5862c | ec551303265c269bf1855fe1a30fdffe9bc894b6 | /topic12_backtrack/T37_solveSudoku/interview.py | aa39e66a9273588c348549634ece2fa51180ca9a | [] | no_license | GongFuXiong/leetcode | 27dbda7a5ced630ae2ae65e19d418ebbc65ae167 | f831fd9603592ae5bee3679924f962a3ebce381c | refs/heads/master | 2023-06-25T01:05:45.683510 | 2021-07-26T10:05:25 | 2021-07-26T10:05:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,380 | py | '''
37. 解数独
编写一个程序,通过已填充的空格来解决数独问题。
一个数独的解法需遵循如下规则:
数字 1-9 在每一行只能出现一次。
数字 1-9 在每一列只能出现一次。
数字 1-9 在每一个以粗实线分隔的 3x3 宫内只能出现一次。
空白格用 '.' 表示。
'''
class Solution:
def solveSudoku(self, board):
"""
Do not return anything, modify board in-place instead.
"""
# 把所有没填数字的位置找到
all_points = []
for i in range(9):
for j in range(9):
if board[i][j] == ".":
all_points.append([i, j])
# check函数是为了检查是否在point位置k是合适的
def check(point, k):
row_i = point[0]
col_j = point[1]
for i in range(9):
# 检查 行
if i != row_i and board[i][col_j] == k:
return False
# 检查 列
if i != col_j and board[row_i][i] == k:
return False
# 检查块
for i in range(row_i//3*3 , row_i//3*3+3):
for j in range(col_j//3*3, col_j//3*3+3):
if i != row_i and j != col_j and board[i][j] == k:
return False
return True
def backtrack(i):
# 回溯终止条件
if i == len(all_points):
return True
for j in range(1, 10):
# 检查是否合适
if check(all_points[i],str(j)):
# 合适就把位置改过来
board[all_points[i][0]][all_points[i][1]] = str(j)
if backtrack(i+1): # 回溯下一个点
return True
board[all_points[i][0]][all_points[i][1]] = "."# 不成功把原来改回来
return False
backtrack(0)
print(f"board:{board}")
if __name__ == "__main__":
solution = Solution()
while 1:
str1 = input()
if str1 != "":
nums = [[c for c in s.split(",")] for s in str1.split(";")]
print(f"nums:{nums}")
res = solution.permute(nums)
print(res)
else:
break
| [
"[email protected]"
] | |
6fead26c5691ec527b0a25f5b1bb51407b45423b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03433/s234156745.py | 8af8f4093c0e131eec273745a1b4cdfd8539bffb | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | n = int(input().rstrip())
a = int(input().rstrip())
if n % 500 <= a:
print('Yes')
else:
print('No') | [
"[email protected]"
] | |
300c13b7d14b8eeb64fe0620787ba963d4b4a22d | 3c03ecb8e066f2d4eac73a469a75e5906734c66c | /_2019_2020/Classworks/_21_08_02_2020/_4.py | bb2dfe36b4e2eba4992e43a24a81bc1310665095 | [] | no_license | waldisjr/JuniorIT | af1648095ec36535cc52770b114539444db4cd0b | 6a67e713708622ae13db6d17b48e43e3d10611f2 | refs/heads/master | 2023-03-26T06:29:06.423163 | 2021-03-27T06:27:34 | 2021-03-27T06:27:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | file = open('1(4.py)', 'w')
for i in range(1000):
file.write(f"{i}\n")
file.close() | [
"[email protected]"
] | |
4e1efa5fc68c6cf783d434aebf74d1157be0268f | 6c1b28fce483c873f627104c8c58c90af54ef22a | /approach_3_solution_2.py | cbb82862b98488db1ddca6a2e88b15cc2ed1fb8c | [] | no_license | rajkan01/hands_on_code_review | dc873857a7d73f75c9d2caa5bba3fa93ba56a4a2 | ac28dabd6eb0d46345714208741ff57345f95149 | refs/heads/master | 2023-09-04T00:20:16.741717 | 2021-10-23T15:45:34 | 2021-10-25T10:56:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | # Approach 3 - Solution 2
from string import ascii_lowercase as lowercase_letters
def is_pangram(sentence):
actual_bits = 0
expected_bits = 0b11111111111111111111111111
for i, char in enumerate(sentence):
if char.isalpha():
letter_index = ord(char.lower()) - ord("a")
bit_shift = 1 << letter_index
actual_bits = actual_bits | bit_shift
return expected_bits == actual_bits
# Approach 3 - Solution 2 intentionally doesn't contain any comments.
# As discussed in the course, this is a practice problem for you: apply Approach 3 - study the code of others -- to this solution.
| [
"[email protected]"
] | |
bd29a3919e9e554eae311ed596991eb065b7db1f | b210903908d418d471e0df3b93c5f290ec1c05a9 | /gluon2pytorch/gluon2pytorch.py | ced44d371fe483100a99ec280b38330ca6939d3d | [
"MIT"
] | permissive | chipper1/gluon2pytorch | d7bcf71900172484f1e26c46ba6f051aa1e7d773 | e0fd770a28b1a8bf4d0aa352f360bf5765e8347d | refs/heads/master | 2020-04-19T07:49:38.974250 | 2019-01-22T13:17:23 | 2019-01-22T13:17:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,078 | py | import json
import torch
import torch.nn as nn
import torch.nn.functional as F
import mxnet as mx
import numpy as np
# Import comverters
from .layers import CONVERTERS
# Import PyTorch model template
from .pytorch_model_template import pytorch_model_template
def eval_model(pytorch_source, pytorch_dict, module_name):
# Tricky code
torch
nn
F
exec(pytorch_source)
globals()[module_name] = locals()[module_name]
pytorch_model = locals()[module_name]()
pytorch_model.load_state_dict(pytorch_dict)
return pytorch_model
def render_module(inits, calls, inputs, outputs, dst_dir, pytorch_dict, pytorch_module_name):
"""
Render model.
"""
inits = [i for i in inits if len(i) > 0]
output = pytorch_model_template.format(**{
'module_name': pytorch_module_name,
'module_name_lower': pytorch_module_name.lower(),
'inits': '\n'.join(inits),
'inputs': ', '.join(['x' + str(i) for i in inputs]),
'calls': '\n'.join(calls),
'outputs': ', '.join(['x' + str(i) for i in outputs]),
})
if dst_dir is not None:
import os
import errno
try:
os.makedirs(dst_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
with open(os.path.join(dst_dir, pytorch_module_name.lower() + '.py'), 'w+') as f:
f.write(output)
f.close()
torch.save(pytorch_dict, os.path.join(dst_dir, pytorch_module_name.lower() + '.pt'))
return output
def gluon2pytorch(net, args, dst_dir, pytorch_module_name, debug=True):
"""
Function to convert a model.
"""
x = [mx.nd.array(np.ones(i)) for i in args]
x = net(*x)
# Get network params
params = net.collect_params()
# Create a symbol to trace net
# x = mx.sym.var('data')
x = [mx.sym.var('__input__' + str(i)) for i in range(len(args))]
sym = net(*x)
if len(sym) > 1:
group = mx.sym.Group(sym)
else:
group = sym
# Get JSON-definition of the model
json_model = json.loads(group.tojson())['nodes']
# Create empty accumulators
nodes = []
is_skipped = []
pytorch_dict = {}
inits = []
calls = []
inputs = []
outputs = [i[0] for i in json.loads(group.tojson())['heads']]
last = 0
# Trace model
for i, node in enumerate(json_model):
# If the node has 'null' op, it means, that it's not a real op, but only parameter
# TODO: convert constants
if node['op'] == 'null':
if node['name'].find('__input__') == 0:
inputs.append(int(node['name'][9:]))
is_skipped.append(1)
continue
# It's not 'null'
is_skipped.append(0)
# Create dict with necessary node parameters
op = {
'name': node['name'][:-4],
'type': node['op'],
}
print(op, node)
if len(node['inputs']) > 0:
orginal_inputs = [i for i in np.array(node['inputs'])[:, 0] if i in inputs]
op['inputs'] = [i for i in np.array(node['inputs'])[:, 0] if is_skipped[i] != 1 or i in orginal_inputs]
else:
print(json_model)
op['inputs'] = []
try:
# Not all nodes have 'attrs'
op['attrs'] = node['attrs']
except KeyError:
op['attrs'] = {}
# Debug output
if debug:
print(op)
print('__')
# Append new node to list
nodes.append(op)
# If operation is in available convertors, convert it
if op['type'] in CONVERTERS:
init_str, call_str = CONVERTERS[op['type']](i, op, nodes, params, pytorch_dict)
inits.append(init_str)
calls.append(call_str)
else:
raise AttributeError('Layer isn\'t supported')
pytorch_source = render_module(inits, calls, inputs, outputs, dst_dir, pytorch_dict, pytorch_module_name)
return eval_model(pytorch_source, pytorch_dict, pytorch_module_name)
| [
"[email protected]"
] | |
52f8d22f90a6a6870ff064d288a72be4c6ab50de | 7d78a18fcb8f34cc84e9439bd19cf491e3e0ec49 | /Code/Particle_Identification/msc-hpc/hpc-mini-1/model8.py | 7fca90d0b9552dd533fb15cee80aeff0c4a24a33 | [] | no_license | PsycheShaman/MSc-thesis | 62767951b67b922ce5a21cad5bdb258998b7d2ea | 34504499df64c7d6cc7c89af9618cd58d6378e8e | refs/heads/master | 2022-03-12T07:17:57.309357 | 2019-12-10T21:17:39 | 2019-12-10T21:17:39 | 151,471,442 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,794 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 16 18:47:05 2019
@author: gerhard
"""
import glob
import numpy as np
#P_files = glob.glob("C:/Users/gerhard/Documents/msc-thesis-data/P_*.pkl", recursive=True)
x_files = glob.glob("/scratch/vljchr004/1_8_to_2_2_GeV/x_*.pkl")
y_files = glob.glob("/scratch/vljchr004/1_8_to_2_2_GeV/y_*.pkl")
#x_files = glob.glob("C:\\Users\\gerhard\\Documents\\msc-thesis-data\\cnn\\x_*.pkl")
#y_files = glob.glob("C:\\Users\\gerhard\\Documents\\msc-thesis-data\\cnn\\y_*.pkl")
import pickle
print("loading first x pickle........................................................................................")
with open(x_files[0], 'rb') as x_file0:
x = pickle.load(x_file0)
print("loading first y pickle........................................................................................")
with open(y_files[0], 'rb') as y_file0:
y = pickle.load(y_file0)
#with open(P_files[0], 'rb') as P_file0:
# P = pickle.load(P_file0)
x.shape = (x.shape[1],x.shape[2],x.shape[3])
print("x.shape")
print(x.shape)
print("recursively adding x pickles........................................................................................")
for i in x_files[1:]:
with open(i,'rb') as x_file:
print(i)
xi = pickle.load(x_file)
xi.shape = (xi.shape[1],xi.shape[2],xi.shape[3])
print("xi.shape")
print(xi.shape)
x = np.concatenate((x,xi),axis=0)
print("recursively adding y pickles........................................................................................")
for i in y_files[1:]:
with open(i,'rb') as y_file:
yi = pickle.load(y_file)
y = np.concatenate((y,yi),axis=None)
#for i in P_files[1:]:
# with open(i,'rb') as P_file:
# Pi = pickle.load(P_file)
# P = np.concatenate((P,Pi),axis=None)
#x_files = glob.glob("/scratch/vljchr004/data/msc-thesis-data/cnn/x_*.npy")
#y_files = glob.glob("/scratch/vljchr004/data/msc-thesis-data/cnn/y_*.npy")
#
#print("recursively adding x numpys........................................................................................")
#
#for i in x_files[0:]:
# with open(i,'rb') as x_file:
# print(i)
# xi = np.load(x_file)
# x = np.concatenate((x,xi),axis=0)
#
#print("recursively adding y numpys........................................................................................")
#
#for i in y_files[0:]:
# with open(i,'rb') as y_file:
# yi = np.load(y_file)
# y = np.concatenate((y,yi),axis=None)
nz = np.array([np.count_nonzero(i) for i in x])
zeros = np.where(nz==0)
x = np.delete(x,zeros,axis=0)
y = np.delete(y,zeros)
#P = np.delete(P,zeros)
x.shape = (x.shape[0],x.shape[1],x.shape[2],1)
#x.shape = (x.shape[0],x.shape[2],x.shape[1])
print("x.shape after reshape for lstm")
print(x.shape)
#GeV_range2 = np.where(P>=1.8 and P<=2.2)
#
#x = x[GeV_range2,:,:,:]
#y = y[GeV_range2]
electrons = np.where(y==1)
electrons = electrons[0]
pions = np.where(y==0)
pions = pions[0]
pions = pions[0:electrons.shape[0]]
x_1 = x[electrons,:,:]
x_2 = x[pions,:,:]
x = np.vstack((x_1,x_2))
y_1 = y[electrons]
y_2 = y[pions]
y = np.concatenate((y_1,y_2),axis=None)
ma = np.max(x)
x = x/ma
#ma = np.amax(x,axis=2)
#
#x = np.divide(x,ma)
#check the division above before running!!!!!!!!!!!1
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2,random_state=123456)
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
import tensorflow
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D, LSTM, Bidirectional, TimeDistributed
model = Sequential()
model.add(Conv2D(32,(6,6),input_shape=(17,24,1),padding="same",activation="relu"))
model.add(Conv2D(64,(6,6),padding="same",activation="relu"))
model.add(MaxPooling2D((2,2)))
model.add(Conv2D(64,(4,4),padding="same",activation="relu"))
model.add(Conv2D(128,(4,4),padding="same",activation="relu"))
model.add(MaxPooling2D((2,2)))
model.add(Conv2D(128,(3,3),padding="same",activation="relu"))
model.add(Conv2D(256,(3,3),padding="same",activation="relu"))
model.add(MaxPooling2D((2,2)))
model.add(Conv2D(256,(3,3),padding="same",activation="relu"))
model.add(Conv2D(512,(3,3),padding="same",activation="relu"))
model.add(MaxPooling2D((2,2)))
model.add(Flatten())
model.add(Dense(1024,activation="relu"))
model.add(Dense(1024,activation="relu"))
model.add(Dense(512,activation="relu"))
model.add(Dense(512,activation="relu"))
model.add(Dense(256,activation="relu"))
model.add(Dense(256,activation="relu"))
model.add(Dense(128,activation="relu"))
model.add(Dense(128,activation="relu"))
model.add(Dense(64,activation="relu"))
model.add(Dense(32,activation="relu"))
model.add(Dense(2,activation="softmax"))
adam = tensorflow.keras.optimizers.Adam()
# Let's train the model using RMSprop
model.compile(loss='binary_crossentropy',
optimizer=adam,
metrics=['accuracy'])
batch_size=32
epochs=50
history=model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_split=0.2,
shuffle=True)#,
#class_weight=class_weights)
import matplotlib.pyplot as plt
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('/home/vljchr004/hpc-mini/model8_history1.png', bbox_inches='tight')
# summarize history for loss
plt.close()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('/home/vljchr004/hpc-mini/model8_history2.png', bbox_inches='tight')
model.probs = model.predict_proba(x_test)
import numpy as np
np.savetxt("/home/vljchr004/hpc-mini/model8_results.csv", np.array(model.probs), fmt="%s")
np.savetxt("/home/vljchr004/hpc-mini/model8_y_test.csv", np.array(y_test), fmt="%s")
model.save('/home/vljchr004/hpc-mini/model8_.h5') # creates a HDF5 file 'my_model.h5'
del model
print("<-----------------------------done------------------------------------------>")
| [
"[email protected]"
] | |
fa852b15b22790660899f828bd2b36acf41ab473 | 2b477700384af7ceb67f97908f1bd5899d984596 | /mxonline/second_day/mxonline/mxonline/settings.py | 0c86916a2d658b263215bc8d182ed18fe7d4a103 | [] | no_license | ZhiqiKou/django | 58b743f962e0f7d85b3610e9d09a0e1db32ba9bb | e3d35c981e6b91130472114b121b65fd7d5cacf8 | refs/heads/master | 2020-03-28T20:44:56.286125 | 2018-09-07T02:21:29 | 2018-09-07T02:21:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,544 | py | """
Django settings for mxonline project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import sys
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(BASE_DIR, 'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'hw$ull9#yd)%((n32%_jx_cy+!kcr@u8-ywc_r4pg6kjmzx(f6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'users',
'organization',
'operation',
'courses',
]
# 此处重载使UserProfile生效
AUTH_USER_MODEL = "users.UserProfile"
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mxonline.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mxonline.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mxonline3',
'USER': 'root',
'PASSWORD': '123456',
'HOST': '127.0.0.1',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
) | [
"[email protected]"
] | |
a4b4a0a3244cac402cda7f3b4ed5278efc2fa651 | c4b47ba53d40e861571c82f8a968a989974dc433 | /fireball/blobs/admin.py | 454a72b4217a2e674b995a6f5a635ca10bde368e | [] | no_license | underlost/fireball | 4be3e441a82f6a0fbb603b33be8493f03019392e | 3cf312fa88860e9f2e9f34479b5b1962dae09f55 | refs/heads/master | 2016-09-01T18:45:18.059628 | 2013-06-03T16:26:12 | 2013-06-03T16:26:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | from django.contrib import admin
from fireball.blobs.models import Blob
class BlobAdmin(admin.ModelAdmin):
list_filter = ('user',)
search_fields = ['description','url',]
list_display = ('user', 'url',)
admin.site.register(Blob,BlobAdmin)
| [
"[email protected]"
] | |
596411c05f2c94b4b357beb48a6cac370bb39083 | 82fce9aae9e855a73f4e92d750e6a8df2ef877a5 | /Lab/venv/lib/python3.8/site-packages/OpenGL/GL/ARB/seamless_cube_map.py | 25e63a7d3f689d0ff11cc0c81f81b889b4c44394 | [] | no_license | BartoszRudnik/GK | 1294f7708902e867dacd7da591b9f2e741bfe9e5 | 6dc09184a3af07143b9729e42a6f62f13da50128 | refs/heads/main | 2023-02-20T19:02:12.408974 | 2021-01-22T10:51:14 | 2021-01-22T10:51:14 | 307,847,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,689 | py | '''OpenGL extension ARB.seamless_cube_map
This module customises the behaviour of the
OpenGL.raw.GL.ARB.seamless_cube_map to provide a more
Python-friendly API
Overview (from the spec)
When sampling from cube map textures, a three-dimensional texture
coordinate is used to select one of the cube map faces and generate
a two dimensional texture coordinate ( s t ), at which a texel is
sampled from the determined face of the cube map texture. Each face
of the texture is treated as an independent two-dimensional texture,
and the generated ( s t ) coordinate is subjected to the same
clamping and wrapping rules as for any other two dimensional texture
fetch.
Although it is unlikely that the generated ( s t ) coordinate lies
significantly outside the determined cube map face, it is often the
case that the locations of the individual elements required during a
linear sampling do not lie within the determined face, and their
coordinates will therefore be modified by the selected clamping and
wrapping rules. This often has the effect of producing seams or
other discontinuities in the sampled texture.
This extension allows implementations to take samples from adjacent
cube map faces, providing the ability to create seamless cube maps.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/seamless_cube_map.txt
'''
from OpenGL.raw.GL.ARB.seamless_cube_map import _EXTENSION_NAME
def glInitSeamlessCubeMapARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | [
"[email protected]"
] | |
d485cfa23c7f446ebfa1be31d86428513cf3a031 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_38775.py | aa5ba685bb6ffb0c7e77e41ce4af889ae20a5bd0 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,853 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((576.874, 540.822, 485.573), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((598.091, 565.823, 546.833), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((623.318, 587.057, 622.151), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((611.306, 453.071, 585.762), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((686.143, 680.092, 779.238), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((583.516, 558.339, 528.007), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((582.521, 557.977, 526.767), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((563.861, 574.065, 540.474), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((550.902, 557.517, 559.175), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((542.229, 538.937, 539.879), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((514.423, 537.738, 535.439), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((493.423, 550.371, 549.158), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((588.124, 560.504, 499.948), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((397.241, 545.752, 593.753), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((511.625, 649.589, 724.084), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((511.625, 649.589, 724.084), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((523.133, 647.98, 698.582), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((525.208, 637.211, 672.704), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((532.957, 627.817, 647.087), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((546.267, 619.066, 623.377), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((562.643, 609.598, 601.974), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((572.556, 598.08, 577.717), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((386.539, 560.102, 751.952), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((758.573, 634.065, 399.435), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((605.732, 621.954, 593.966), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((605.732, 621.954, 593.966), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((624.194, 600.594, 591.035), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((642.628, 579.779, 597.448), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((636.927, 556.397, 613.563), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((608.734, 497.014, 507.327), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((661.84, 608.133, 724.55), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((606.707, 539.554, 539.754), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((606.921, 539.122, 539.763), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((602.149, 532.701, 513.098), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((585.556, 539.87, 491.818), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((559.419, 549.009, 495.526), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((544.538, 565.047, 513.163), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((523.277, 578.335, 526.055), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((497.067, 580.776, 536.171), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((565.717, 630.616, 525.687), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((429.722, 525.801, 547.642), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((611.519, 627.195, 530.498), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((614.858, 606.49, 544.807), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((624.743, 560.967, 576.711), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((637.906, 513.521, 608.447), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((663.455, 481.934, 537.566), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((637.242, 473.186, 705.227), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((579.913, 519.906, 554.457), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((602.611, 526.699, 570.855), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((631.482, 529.241, 573.589), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((653.721, 545.788, 584.54), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((672.791, 567.689, 591.808), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((675.915, 594.142, 605.603), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((625.733, 581.142, 545.283), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((726.761, 616.193, 667.856), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
] | |
fcd0b3996dcc8bf3891d3ed563e44c660b62677b | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/D/dmsilv/facebook_fans.py | 3fc1f0e56bfce614a8af5c9b37936e98b95a0c94 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,020 | py | # Blank Python
import scraperwiki
from BeautifulSoup import BeautifulSoup
#define the order our columns are displayed in the datastore
scraperwiki.metadata.save('data_columns', ['Page Name', 'Fans'])
#scrape the fan section
def scrape_fans(soup):
data_table = soup.find("table",{ "class" : "uiGrid"}) #find the pages with most fans section
rows= data_table.findAll("tr") #find all the table rows
for row in rows: #loop through the rows
cells = row.findAll("td") #find all the cells
for cell in cells: #loop through the cells
#setup the data record
record={}
print cell
#table_cells=cell.findAll("p") #find all the p items
if table_cells: #if the item exists store it
record['Page Name'] = table_cells[0].text
record['Fans'] = table_cells[1].text[:-5]
scraperwiki.datastore.save(["Page Name"], record)
def scrape_page(url):
html = scraperwiki.scrape(url)
soup = BeautifulSoup(html)
#print soup.prettify()
link_table=soup.find("div", {"class" : "alphabet_list clearfix"})
#next_link=soup.findAll("a")
for link in link_table:
next_url=link['href']
#print next_url
html1 = scraperwiki.scrape(next_url)
soup1 = BeautifulSoup(html1)
scrape_fans(soup1)
#setup the base url
base_url = 'http://facebook.com/directory/pages/'
#setup the startup url
#call the scraping function
scrape_page(base_url)
# Blank Python
import scraperwiki
from BeautifulSoup import BeautifulSoup
#define the order our columns are displayed in the datastore
scraperwiki.metadata.save('data_columns', ['Page Name', 'Fans'])
#scrape the fan section
def scrape_fans(soup):
data_table = soup.find("table",{ "class" : "uiGrid"}) #find the pages with most fans section
rows= data_table.findAll("tr") #find all the table rows
for row in rows: #loop through the rows
cells = row.findAll("td") #find all the cells
for cell in cells: #loop through the cells
#setup the data record
record={}
print cell
#table_cells=cell.findAll("p") #find all the p items
if table_cells: #if the item exists store it
record['Page Name'] = table_cells[0].text
record['Fans'] = table_cells[1].text[:-5]
scraperwiki.datastore.save(["Page Name"], record)
def scrape_page(url):
html = scraperwiki.scrape(url)
soup = BeautifulSoup(html)
#print soup.prettify()
link_table=soup.find("div", {"class" : "alphabet_list clearfix"})
#next_link=soup.findAll("a")
for link in link_table:
next_url=link['href']
#print next_url
html1 = scraperwiki.scrape(next_url)
soup1 = BeautifulSoup(html1)
scrape_fans(soup1)
#setup the base url
base_url = 'http://facebook.com/directory/pages/'
#setup the startup url
#call the scraping function
scrape_page(base_url)
| [
"[email protected]"
] | |
982fb6dfb5536e8d2ea0d6d461feb007703ab20d | 4cdcd0e06497bdeb793abcd98c870db414700bdd | /pyblp/utilities/basics.py | bd2060c78e628c77393492763db5863b5c7a861b | [
"MIT"
] | permissive | markpham/pyblp | bc1d22e6820c0d28905615aec4e8deb79ee03efa | 59714a0d7c46afa10dcd7575bace21026ebb00c7 | refs/heads/master | 2020-07-16T04:14:00.834907 | 2019-08-31T23:05:00 | 2019-08-31T23:05:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,063 | py | """Basic functionality."""
import contextlib
import functools
import inspect
import multiprocessing.pool
import re
import time
import traceback
from typing import (
Any, Callable, Container, Dict, Hashable, Iterable, Iterator, List, Mapping, Optional, Set, Sequence, Type, Tuple,
Union
)
import numpy as np
from .. import options
# define common types
Array = Any
RecArray = Any
Data = Dict[str, Array]
Options = Dict[str, Any]
Bounds = Tuple[Array, Array]
# define a pool managed by parallel and used by generate_items
pool = None
@contextlib.contextmanager
def parallel(processes: int) -> Iterator[None]:
r"""Context manager used for parallel processing in a ``with`` statement context.
This manager creates a context in which a pool of Python processes will be used by any method that requires
market-by-market computation. These methods will distribute their work among the processes. After the context
created by the ``with`` statement ends, all worker processes in the pool will be terminated. Outside of this
context, such methods will not use multiprocessing.
Importantly, multiprocessing will only improve speed if gains from parallelization outweigh overhead from
serializing and passing data between processes. For example, if computation for a single market is very fast and
there is a lot of data in each market that must be serialized and passed between processes, using multiprocessing
may reduce overall speed.
Arguments
---------
processes : `int`
Number of Python processes that will be created and used by any method that supports parallel processing.
Examples
--------
.. raw:: latex
\begin{examplenotebook}
.. toctree::
/_notebooks/api/parallel.ipynb
.. raw:: latex
\end{examplenotebook}
"""
# validate the number of processes
if not isinstance(processes, int):
raise TypeError("processes must be an int.")
if processes < 2:
raise ValueError("processes must be at least 2.")
# start the process pool, wait for work to be done, and then terminate it
output(f"Starting a pool of {processes} processes ...")
start_time = time.time()
global pool
try:
with multiprocessing.pool.Pool(processes) as pool:
output(f"Started the process pool after {format_seconds(time.time() - start_time)}.")
yield
output(f"Terminating the pool of {processes} processes ...")
terminate_time = time.time()
finally:
pool = None
output(f"Terminated the process pool after {format_seconds(time.time() - terminate_time)}.")
def generate_items(keys: Iterable, factory: Callable[[Any], tuple], method: Callable) -> Iterator:
"""Generate (key, method(*factory(key))) tuples for each key. The first element returned by factory is an instance
of the class to which method is attached. If a process pool has been initialized, use multiprocessing; otherwise,
use serial processing.
"""
if pool is None:
return (generate_items_worker((k, factory(k), method)) for k in keys)
return pool.imap_unordered(generate_items_worker, ((k, factory(k), method) for k in keys))
def generate_items_worker(args: Tuple[Any, tuple, Callable]) -> Tuple[Any, Any]:
"""Call the the specified method of a class instance with any additional arguments. Return the associated key along
with the returned object.
"""
key, (instance, *method_args), method = args
return key, method(instance, *method_args)
def structure_matrices(mapping: Mapping) -> RecArray:
"""Structure a mapping of keys to (array or None, type) tuples as a record array in which each sub-array is
guaranteed to be at least two-dimensional.
"""
# determine the number of rows in all matrices
size = next(a.shape[0] for a, _ in mapping.values() if a is not None)
# collect matrices and data types
matrices: List[Array] = []
dtypes: List[Tuple[Union[str, Tuple[Hashable, str]], Any, Tuple[int]]] = []
for key, (array, dtype) in mapping.items():
matrix = np.zeros((size, 0)) if array is None else np.c_[array]
dtypes.append((key, dtype, (matrix.shape[1],)))
matrices.append(matrix)
# build the record array
structured = np.recarray(size, dtypes)
for dtype, matrix in zip(dtypes, matrices):
structured[dtype[0] if isinstance(dtype[0], str) else dtype[0][1]] = matrix
return structured
def update_matrices(matrices: RecArray, update_mapping: Dict) -> RecArray:
"""Update fields in a record array created by structure_matrices by re-structuring the matrices."""
mapping = update_mapping.copy()
for key in matrices.dtype.names:
if key not in mapping:
if len(matrices.dtype.fields[key]) > 2:
mapping[(matrices.dtype.fields[key][2], key)] = (matrices[key], matrices[key].dtype)
else:
mapping[key] = (matrices[key], matrices[key].dtype)
return structure_matrices(mapping)
def extract_matrix(structured_array_like: Mapping, key: Any) -> Optional[Array]:
"""Attempt to extract a field from a structured array-like object or horizontally stack field0, field1, and so on,
into a full matrix. The extracted array will have at least two dimensions.
"""
try:
matrix = np.c_[structured_array_like[key]]
return matrix if matrix.size > 0 else None
except Exception:
index = 0
parts: List[Array] = []
while True:
try:
part = np.c_[structured_array_like[f'{key}{index}']]
except Exception:
break
index += 1
if part.size > 0:
parts.append(part)
return np.hstack(parts) if parts else None
def extract_size(structured_array_like: Mapping) -> int:
"""Attempt to extract the number of rows from a structured array-like object."""
size = 0
getters = [
lambda m: m.shape[0],
lambda m: next(iter(structured_array_like.values())).shape[0],
lambda m: len(next(iter(structured_array_like.values()))),
lambda m: len(m)
]
for get in getters:
try:
size = get(structured_array_like)
break
except Exception:
pass
if size > 0:
return size
raise TypeError(
f"Failed to get the number of rows in the structured array-like object of type {type(structured_array_like)}. "
f"Try using a dictionary, a NumPy structured array, a Pandas DataFrame, or any other standard type."
)
def interact_ids(*columns: Array) -> Array:
"""Create interactions of ID columns."""
interacted = columns[0].flatten().astype(np.object)
if len(columns) > 1:
interacted[:] = list(zip(*columns))
return interacted
def output(message: Any) -> None:
"""Print a message if verbosity is turned on."""
if options.verbose:
if not callable(options.verbose_output):
raise TypeError("options.verbose_output should be callable.")
options.verbose_output(str(message))
def output_progress(iterable: Iterable, length: int, start_time: float) -> Iterator:
"""Yield results from an iterable while outputting progress updates at most every minute."""
elapsed = time.time() - start_time
next_minute = int(elapsed / 60) + 1
for index, iterated in enumerate(iterable):
yield iterated
elapsed = time.time() - start_time
if elapsed > 60 * next_minute:
output(f"Finished {index + 1} out of {length} after {format_seconds(elapsed)}.")
next_minute = int(elapsed / 60) + 1
def format_seconds(seconds: float) -> str:
"""Prepare a number of seconds to be displayed as a string."""
hours, remainder = divmod(int(round(seconds)), 60**2)
minutes, seconds = divmod(remainder, 60)
return f'{hours:02}:{minutes:02}:{seconds:02}'
def format_number(number: Any) -> str:
"""Prepare a number to be displayed as a string."""
if not isinstance(options.digits, int):
raise TypeError("options.digits must be an int.")
template = f"{{:^+{options.digits + 6}.{options.digits - 1}E}}"
formatted = template.format(float(number))
if "NAN" in formatted:
formatted = formatted.replace("+", " ")
return formatted
def format_se(se: Any) -> str:
"""Prepare a standard error to be displayed as a string."""
formatted = format_number(se)
for string in ["NAN", "-INF", "+INF"]:
if string in formatted:
return formatted.replace(string, f"({string})")
return f"({formatted})"
def format_options(mapping: Options) -> str:
"""Prepare a mapping of options to be displayed as a string."""
strings: List[str] = []
for key, value in mapping.items():
if callable(value):
value = f'{value.__module__}.{value.__qualname__}'
elif isinstance(value, float):
value = format_number(value)
strings.append(f'{key}: {value}')
joined = ', '.join(strings)
return f'{{{joined}}}'
def format_table(
header: Sequence[Union[str, Sequence[str]]], *data: Sequence, title: Optional[str] = None,
include_border: bool = True, include_header: bool = True, line_indices: Container[int] = ()) -> str:
"""Format table information as a string, which has fixed widths, vertical lines after any specified indices, and
optionally a title, border, and header.
"""
# construct the header rows
row_index = -1
header_rows: List[List[str]] = []
header = [[c] if isinstance(c, str) else c for c in header]
while True:
header_row = ["" if len(c) < -row_index else c[row_index] for c in header]
if not any(header_row):
break
header_rows.insert(0, header_row)
row_index -= 1
# construct the data rows
data_rows = [[str(c) for c in r] + [""] * (len(header) - len(r)) for r in data]
# compute column widths
widths = []
for column_index in range(len(header)):
widths.append(max(len(r[column_index]) for r in header_rows + data_rows))
# build the template
template = " " .join("{{:^{}}}{}".format(w, " |" if i in line_indices else "") for i, w in enumerate(widths))
# build the table
lines = []
if title is not None:
lines.append(f"{title}:")
if include_border:
lines.append("=" * len(template.format(*[""] * len(widths))))
if include_header:
lines.extend([template.format(*r) for r in header_rows])
lines.append(template.format(*("-" * w for w in widths)))
lines.extend([template.format(*r) for r in data_rows])
if include_border:
lines.append("=" * len(template.format(*[""] * len(widths))))
return "\n".join(lines)
def get_indices(ids: Array) -> Dict[Hashable, Array]:
"""get_indices takes a one-dimensional array input and returns a
dictionary such that the keys are the unique values of the array
and the values are the indices where the key appears in the array.
Examples
--------
>>> ids = np.array([1, 2, 1, 2, 3, 3, 1, 2])
>>> get_indices(ids)
{1: array([0, 2, 6]), 2: array([1, 3, 7]), 3: array([4, 5])}
"""
flat = ids.flatten()
sort_indices = flat.argsort(kind='mergesort')
sorted_ids = flat[sort_indices]
changes = np.ones(flat.shape, np.bool)
changes[1:] = sorted_ids[1:] != sorted_ids[:-1]
reduce_indices = np.nonzero(changes)[0]
return dict(zip(sorted_ids[reduce_indices], np.split(sort_indices, reduce_indices)[1:]))
class SolverStats(object):
"""Structured statistics returned by a generic numerical solver."""
converged: bool
iterations: int
evaluations: int
def __init__(self, converged: bool = True, iterations: int = 0, evaluations: int = 0) -> None:
"""Structure the statistics."""
self.converged = converged
self.iterations = iterations
self.evaluations = evaluations
class StringRepresentation(object):
"""Object that defers to its string representation."""
def __repr__(self) -> str:
"""Defer to the string representation."""
return str(self)
class Groups(object):
"""Computation of grouped statistics."""
sort_indices: Array
reduce_indices: Array
unique: Array
codes: Array
counts: Array
group_count: int
def __init__(self, ids: Array) -> None:
"""Sort and index IDs that define groups."""
# sort the IDs
flat = ids.flatten()
self.sort_indices = flat.argsort()
sorted_ids = flat[self.sort_indices]
# identify groups
changes = np.ones(flat.shape, np.bool)
changes[1:] = sorted_ids[1:] != sorted_ids[:-1]
self.reduce_indices = np.nonzero(changes)[0]
self.unique = sorted_ids[self.reduce_indices]
# encode the groups
sorted_codes = np.cumsum(changes) - 1
self.codes = sorted_codes[self.sort_indices.argsort()]
# compute counts
self.group_count = self.reduce_indices.size
self.counts = np.diff(np.append(self.reduce_indices, self.codes.size))
def sum(self, matrix: Array) -> Array:
"""Compute the sum of each group."""
return np.add.reduceat(matrix[self.sort_indices], self.reduce_indices)
def mean(self, matrix: Array) -> Array:
"""Compute the mean of each group."""
return self.sum(matrix) / self.counts[:, None]
def expand(self, statistics: Array) -> Array:
"""Expand statistics for each group to the size of the original matrix."""
return statistics[self.codes]
class Error(Exception):
"""Errors that are indistinguishable from others with the same message, which is parsed from the docstring."""
stack: Optional[str]
def __init__(self) -> None:
"""Optionally store the full current traceback for debugging purposes."""
if options.verbose_tracebacks:
self.stack = ''.join(traceback.format_stack())
else:
self.stack = None
def __eq__(self, other: Any) -> bool:
"""Defer to hashes."""
return hash(self) == hash(other)
def __hash__(self) -> int:
"""Hash this instance such that in collections it is indistinguishable from others with the same message."""
return hash((type(self).__name__, str(self)))
def __repr__(self) -> str:
"""Defer to the string representation."""
return str(self)
def __str__(self) -> str:
"""Replace docstring markdown with simple text."""
doc = inspect.getdoc(self)
# normalize LaTeX
while True:
match = re.search(r':math:`([^`]+)`', doc)
if match is None:
break
start, end = match.span()
doc = doc[:start] + re.sub(r'\s+', ' ', re.sub(r'[\\{}]', ' ', match.group(1))).lower() + doc[end:]
# normalize references
while True:
match = re.search(r':ref:`[a-zA-Z0-9]+:([^`]+)`', doc)
if match is None:
break
start, end = match.span()
doc = doc[:start] + re.sub(r'<[^>]+>', '', match.group(1)) + doc[end:]
# remove all remaining domains and compress whitespace
doc = re.sub(r'[\s\n]+', ' ', re.sub(r':[a-z\-]+:|`', '', doc))
# optionally add the full traceback
if self.stack is not None:
doc = f"{doc} Traceback:\n\n{self.stack}\n"
return doc
class NumericalError(Error):
"""Floating point issues."""
_messages: Set[str]
def __init__(self) -> None:
super().__init__()
self._messages: Set[str] = set()
def __str__(self) -> str:
"""Supplement the error with the messages."""
combined = ", ".join(sorted(self._messages))
return f"{super().__str__()} Errors encountered: {combined}."
class MultipleReversionError(Error):
"""Reversion of problematic elements."""
_bad: int
_total: int
def __init__(self, bad_indices: Array) -> None:
"""Store element counts."""
super().__init__()
self._bad = bad_indices.sum()
self._total = bad_indices.size
def __str__(self) -> str:
"""Supplement the error with the counts."""
return f"{super().__str__()} Number of reverted elements: {self._bad} out of {self._total}."
class InversionError(Error):
"""Problems with inverting a matrix."""
_condition: float
def __init__(self, matrix: Array) -> None:
"""Compute condition number of the matrix."""
super().__init__()
from .algebra import compute_condition_number
self._condition = compute_condition_number(matrix)
def __str__(self) -> str:
"""Supplement the error with the condition number."""
return f"{super().__str__()} Condition number: {format_number(self._condition)}."
class InversionReplacementError(InversionError):
"""Problems with inverting a matrix led to the use of a replacement such as an approximation."""
_replacement: str
def __init__(self, matrix: Array, replacement: str) -> None:
"""Store the replacement description."""
super().__init__(matrix)
self._replacement = replacement
def __str__(self) -> str:
"""Supplement the error with the description."""
return f"{super().__str__()} The inverse was replaced with {self._replacement}."
class NumericalErrorHandler(object):
"""Decorator that appends errors to a function's returned list when numerical errors are encountered."""
error: Type[NumericalError]
def __init__(self, error: Type[NumericalError]) -> None:
"""Store the error class."""
self.error = error
def __call__(self, decorated: Callable) -> Callable:
"""Decorate the function."""
@functools.wraps(decorated)
def wrapper(*args: Any, **kwargs: Any) -> Any:
"""Configure NumPy to detect numerical errors."""
detector = NumericalErrorDetector(self.error)
with np.errstate(divide='call', over='call', under='ignore', invalid='call'):
np.seterrcall(detector)
returned = decorated(*args, **kwargs)
if detector.detected is not None:
returned[-1].append(detector.detected)
return returned
return wrapper
class NumericalErrorDetector(object):
"""Error detector to be passed to NumPy's error call function."""
error: Type[NumericalError]
detected: Optional[NumericalError]
def __init__(self, error: Type[NumericalError]) -> None:
"""By default no error is detected."""
self.error = error
self.detected = None
def __call__(self, message: str, _: int) -> None:
"""Initialize the error and store the error message."""
if self.detected is None:
self.detected = self.error()
self.detected._messages.add(message)
| [
"[email protected]"
] | |
7b47c1b415e3ad729bdce1cdb26e32be6031bda6 | ef66e297a49d04098d98a711ca3fda7b8a9a657c | /snippets/ziroom/detail.py | 32e2f1c346b125a3b9d7882a7320a3f98a252f9a | [] | no_license | breezy1812/MyCodes | 34940357954dad35ddcf39aa6c9bc9e5cd1748eb | 9e3d117d17025b3b587c5a80638cb8b3de754195 | refs/heads/master | 2020-07-19T13:36:05.270908 | 2018-12-15T08:54:30 | 2018-12-15T08:54:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 850 | py | import os
from datetime import datetime
from time import sleep
from random import choice
import requests
from agents import AGENTS
url = 'http://www.ziroom.com/detail/info'
params = {
'id': '61155405',
'house_id': '60185997',
}
headers = {
'User-Agent': choice(AGENTS),
}
while True:
resp = requests.get(url, params=params, headers=headers)
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if resp.status_code != 200:
print(now, 'Failed')
sleep(5)
continue
try:
data = resp.json()['data']
status = data['status']
price = data['price']
print(now, status, price)
if status != 'tzpzz':
break
except Exception:
print(data)
sleep(10)
cmd = os.system('zsh -c "while true;do;afplay /System/Library/Sounds/Ping.aiff -v 30;done"')
| [
"[email protected]"
] | |
f588bf0d916ba7a047741568bb2946f4fd4c309d | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/53/usersdata/89/22195/submittedfiles/matriz2.py | cd1384dfef6761b0fbf48eaf1aa1f3eaef0a4bc4 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,491 | py | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
def somaDiagonalPrincipal(a):
soma=0
for i in range(0,a.shape[0],1):
soma=soma+a[i,i]
return soma
def somaDiagonalSecundaria(a):
soma=0
for i in range(0,a.shape[0],1):
soma=soma+a[i,a.shape[0]-i-1]
return soma
def somaLinhas(a):
s=[]
for i in range(0,a.shape[0],1):
soma=0
for j in range(0,a.shape[1],1):
soma=soma+a[i,j]
s.append(soma)
return s
def somaColunas(a):
r=[]
for j in range(0,a.shape[1],1):
soma=0
for i in range(0,a.shape[0],1):
soma=soma+a[i,j]
r.append(soma)
return r
def quadradoMagico(a):
sdP=somaDiagonalPrincipal(a)
sdS=somaDiagonalSecundaria(a)
somaL=somaLinhas(a)
somaC=somaColunas(a)
contador=0
for i in range(0,len(somaL),1):
if sdP==sdS==somaL[i]==somaC[i]:
contador=contador+1
if contador==len(somaL):
return True
else:
return False
#programa principal
n=input('digite o numero de linhas da matriz:')
#n=input('digite o numero de colunas da matriz:')
matriz=np.zeros((n,n))
for i in range(0,matriz.shape[0],1):
for j in range(0,matriz.shape[1],1):
matriz[i,j]=input('digite um elemento da matriz:')
if quadradoMagico(matriz):
print('S')
else:
print('N') | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.