blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f4a65671dde5f682aab0a747b66e2ba2cdc09f88 | c9d4d4c78703d009da11999e4e59b6a168a454a2 | /examples/Machine Learning In Action/reducer.py | 67e8edacbbd3347055d42c21781c0208b8451281 | [
"MIT"
] | permissive | AkiraKane/Python | 23df49d7f7ae0f375e0b4ccfe4e1b6a077b1a52b | 12e2dcb9a61e9ab0fc5706e4a902c48e6aeada30 | refs/heads/master | 2020-12-11T07:20:01.524438 | 2015-11-07T12:42:22 | 2015-11-07T12:42:22 | 47,440,128 | 1 | 0 | null | 2015-12-05T03:15:52 | 2015-12-05T03:15:51 | null | UTF-8 | Python | false | false | 1,139 | py | '''
-------------------------------------------------------------------------
Book: Machine Learning In Action
# Lesson: MapReduce - reducer
# Author: Kelly Chan
# Date: Feb 3 2014
-------------------------------------------------------------------------
'''
import sys
from numpy import mat, mean, power
def dataLoad(dataFile):
for line in dataFile:
yield line.rstrip()
# creating a list of lines from dataFile
data = dataLoad(sys.stdin)
# spliting data lines into separte items and storing in list of lists
mapperOut = [line.split('\t') for line in data]
# accumulating total number of samples, overall sum and overall sum squared
accumulateN = 0.0
accumulateSum = 0.0
accumulateSumSquared = 0.0
for instance in mapperOut:
thisN = float(instance[0])
accumulateN += thisN
accumulateSum += thisN * float(instance[1])
accumulateSumSquared += thisN * float(instance[2])
# calculating means
mean = accumulateSum / accumulateN
meanSq = accumulateSumSquared / accumulateN
# printing size, mean, mean squared
print "%d\t%f\t%f" % (accumulateN, mean, meanSq)
print >> sys.stderr, "report: still alive"
| [
"[email protected]"
] | |
574db95533b5d034f658f9a51a1f0232059e0a1b | 7c1b599c5b5be0ec5ad142e52e5c15a7c9c8ea12 | /venv/Lib/site-packages/tensorflow/contrib/quantize/python/fold_batch_norms.py | 61c90dba7d4a688497fdac4d4ba0332fa1d94cd3 | [] | no_license | namtran98/NSTAR---MuddHacks | 88d602a0847bb923088c7f0be6d5c2980b11a36d | cbc04873e1f02cb6b62a7b77c5c44eb4e9422ab8 | refs/heads/master | 2020-04-21T19:41:49.889253 | 2019-02-09T01:02:37 | 2019-02-09T01:02:37 | 169,816,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,924 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logic to fold batch norm into preceding convolution or FC layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib import graph_editor
from tensorflow.contrib.quantize.python import common
from tensorflow.contrib.quantize.python import graph_matcher
from tensorflow.contrib.quantize.python import input_to_ops
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import compat
def FoldBatchNorms(graph, is_training, freeze_batch_norm_delay=None):
"""Finds batch norm layers and folds them into preceding layers.
Folding only affects the following layers: Conv2D, fully connected, depthwise
convolution.
Args:
graph: Graph to walk and modify.
is_training: Bool, true if training.
freeze_batch_norm_delay: How many steps to wait before freezing moving mean
and variance and using them for batch normalization. This value is used
only when is_training is True.
Raises:
ValueError: When batch norm folding fails.
"""
_FoldFusedBatchNorms(
graph, is_training, freeze_batch_norm_delay=freeze_batch_norm_delay)
_FoldUnfusedBatchNorms(
graph,
is_training=is_training,
freeze_batch_norm_delay=freeze_batch_norm_delay)
def _FoldFusedBatchNorms(graph, is_training, freeze_batch_norm_delay):
"""Finds fused batch norm layers and folds them into preceding layers.
Folding only affects the following layers: Conv2D, fully connected, depthwise
convolution.
Args:
graph: Graph to walk and modify.
is_training: Bool, true if training.
freeze_batch_norm_delay: How many steps to wait before freezing moving mean
and variance and using them for batch normalization.
Raises:
ValueError: When batch norm folding fails.
"""
for match in _FindFusedBatchNorms(graph):
scope, sep, _ = match.layer_op.name.rpartition('/')
# Make sure new ops are added to `graph` and put on the same device as
# `bn_op`. The '/' (i.e. `sep`) ensures that we reuse the existing scope
# named `scope`. Otherwise, TF creates a unique scope whose name starts with
# `scope`.
with graph.as_default(), graph.name_scope(scope + sep):
with graph.name_scope(scope + sep + 'BatchNorm_Fold' + sep):
# new weights = old weights * gamma / sqrt(variance + epsilon)
# new biases = -mean * gamma / sqrt(variance + epsilon) + beta
multiplier_tensor = match.gamma_tensor * math_ops.rsqrt(
match.variance_tensor + match.bn_op.get_attr('epsilon'))
bias_tensor = math_ops.subtract(
match.beta_tensor,
match.mean_tensor * multiplier_tensor,
name='bias')
correction_scale, correction_recip, correction_offset = None, None, None
if is_training:
correction_scale, correction_recip, correction_offset = (
_ComputeBatchNormCorrections(
context='',
match=match,
freeze_batch_norm_delay=freeze_batch_norm_delay,
fused_batch_norm=True))
# The shape of depthwise weights is different, so we need to reshape the
# multiplier_tensor to ensure that the scaled_weight_tensor has the
# expected shape.
weights = match.weight_tensor
if match.layer_op.type == 'DepthwiseConv2dNative':
new_shape = [
match.weight_tensor.get_shape().as_list()[2],
match.weight_tensor.get_shape().as_list()[3]
]
multiplier_tensor = array_ops.reshape(
multiplier_tensor, new_shape, name='scale_reshape')
if correction_scale is not None:
correction_scale = array_ops.reshape(
correction_scale, new_shape, name='correction_reshape')
if correction_scale is not None:
weights = math_ops.multiply(
correction_scale, weights, name='correction_mult')
scaled_weight_tensor = math_ops.multiply(
weights, multiplier_tensor, name='mul_fold')
new_layer_tensor = _CloneWithNewOperands(
match.layer_op, match.input_tensor, scaled_weight_tensor,
match.batch_to_space_op)
if correction_recip is not None:
new_layer_tensor = math_ops.multiply(
correction_recip, new_layer_tensor, name='post_conv_mul')
new_layer_tensor = math_ops.add(new_layer_tensor, (correction_offset),
'correction_add')
bias_add_tensor = math_ops.add(
new_layer_tensor, bias_tensor, name='add_fold')
nodes_modified_count = graph_editor.reroute_ts(bias_add_tensor,
match.output_tensor)
if nodes_modified_count == 0:
raise ValueError('Folding batch norms failed, %s had no outputs.' %
match.output_tensor.name)
def _FindFusedBatchNorms(graph):
"""Finds all ops and tensors related to found FusedBatchNorms.
Args:
graph: Graph to inspect.
Yields:
_FusedBatchNormMatches.
"""
input_pattern = graph_matcher.OpTypePattern('*')
# In practice, the weight pattern can match a Variable or a SpaceToBatchND
# operation that follows a variable for atrous convolutions.
weight_pattern = graph_matcher.OpTypePattern('*')
gamma_pattern = graph_matcher.OpTypePattern('*')
beta_pattern = graph_matcher.OpTypePattern('*')
mean_pattern = graph_matcher.OpTypePattern('*')
variance_pattern = graph_matcher.OpTypePattern('*')
moving_average_pattern = graph_matcher.OpTypePattern('*')
bn_decay_pattern = graph_matcher.OpTypePattern('*')
layer_pattern = graph_matcher.OpTypePattern(
'Conv2D|DepthwiseConv2dNative|MatMul',
inputs=[input_pattern, weight_pattern])
batch_to_space_pattern = graph_matcher.OpTypePattern(
'BatchToSpaceND',
inputs=[
layer_pattern,
graph_matcher.OpTypePattern('*'),
graph_matcher.OpTypePattern('*')
])
layer_output_pattern = graph_matcher.OneofPattern(
[layer_pattern, batch_to_space_pattern])
# MatMul has a Reshape between it and FusedBatchNorm.
matmul_reshape_pattern = graph_matcher.OpTypePattern(
'Reshape',
inputs=[layer_output_pattern,
graph_matcher.OpTypePattern('*')])
batch_norm_pattern = graph_matcher.OpTypePattern(
'FusedBatchNorm',
inputs=[
graph_matcher.OneofPattern(
[matmul_reshape_pattern, layer_output_pattern]), gamma_pattern,
beta_pattern, mean_pattern, variance_pattern
])
matmul_bn_output_reshape_pattern = graph_matcher.OpTypePattern(
'Reshape', inputs=[batch_norm_pattern,
graph_matcher.OpTypePattern('*')])
bn_matcher = graph_matcher.GraphMatcher(
graph_matcher.OneofPattern(
[matmul_bn_output_reshape_pattern, batch_norm_pattern]))
moving_average_sub_pattern = graph_matcher.OpTypePattern(
'Sub', inputs=[moving_average_pattern, batch_norm_pattern])
moving_average_mul_pattern = graph_matcher.OpTypePattern(
'Mul', inputs=[moving_average_sub_pattern, bn_decay_pattern])
moving_avg_mul_matcher = graph_matcher.GraphMatcher(
moving_average_mul_pattern)
for match_result in bn_matcher.match_graph(graph):
moving_mean_tensor = None
moving_variance_tensor = None
bn_decay_mean_tensor = None
bn_decay_var_tensor = None
batch_to_space_op = None
layer_op = match_result.get_op(layer_pattern)
layer_tensor = match_result.get_tensor(layer_pattern)
bn_op = match_result.get_op(batch_norm_pattern)
batch_epsilon = bn_op.get_attr('epsilon')
# In the MatMul case, the output of batch norm is reshaped back into a
# 2D tensor, so the output_tensor is the output of the Reshape op.
output_tensor = bn_op.outputs[0]
if layer_op.type == 'MatMul':
output_reshape_op = match_result.get_op(matmul_bn_output_reshape_pattern)
# If the matcher didn't match matmul_bn_output_reshape, there will be
# another match for this 'MatMul' later, so we can skip this one.
if output_reshape_op is None:
continue
output_tensor = output_reshape_op.outputs[0]
# Ensure that the output tensor has consumers, otherwise this is a dangling
# node and not a match.
if not output_tensor.consumers():
continue
batch_to_space_op = match_result.get_op(batch_to_space_pattern)
input_tensor = match_result.get_tensor(input_pattern)
weight_tensor = match_result.get_tensor(weight_pattern)
gamma_tensor = match_result.get_tensor(gamma_pattern)
beta_tensor = match_result.get_tensor(beta_pattern)
# FusedBatchNorm in training is different from that in inference. It takes
# empty 'mean' and empty 'variance', and produces the mean and the variance
# of the batch. Therefore, when is_training is true, mean_tensor and
# variance_tensor point to 1st and 2nd (0-based) output of bn_op,
# respectively; when is_training is false, they point to bn_op's inputs.
is_training = bn_op.get_attr('is_training')
if is_training:
# FusedBatchNormGrad doesn't compute gradients of the batch_mean and
# batch_variance outputs, so we need to substitute our own custom
# gradient.
# TODO(suharshs, raghuramank): Find a way to avoid needing this hack.
# pylint: disable=protected-access
bn_op._set_attr(
'_gradient_op_type',
attr_value_pb2.AttrValue(s=compat.as_bytes('FoldFusedBatchNormGrad')))
# pylint: enable=protected-access
mean_tensor = bn_op.outputs[1]
# The batch variance used during forward and backward prop is biased,
# i.e it is calculated as: V=sum(x(k)-mu)^2/N. For the moving average
# calculation, the variance is corrected by the term N/N-1 (Bessel's
# correction). The variance tensor read from FuseBatchNorm has Bessel's
# correction applied, so we undo it here.
scope, sep, _ = bn_op.name.rpartition('/')
g = ops.get_default_graph()
with g.as_default(), g.name_scope(scope + sep):
n = math_ops.cast(
array_ops.size(layer_tensor) / array_ops.size(mean_tensor),
dtypes.float32)
variance_tensor = math_ops.multiply(
bn_op.outputs[2], (n - 1) / n, name='Undo_Bessel_Correction')
# TODO(suharshs): Find a way to get rid of this inner match.
for mul_match_result in moving_avg_mul_matcher.match_graph(graph):
sub_op = mul_match_result.get_op(moving_average_sub_pattern)
if sub_op.inputs[1].name == bn_op.outputs[1].name:
# During training: Batch Mean is bn_op.outputs[1]
moving_mean_tensor = sub_op.inputs[0]
bn_decay_mean_tensor = mul_match_result.get_tensor(bn_decay_pattern)
if sub_op.inputs[1].name == bn_op.outputs[2].name:
# During training: Batch Var is bn_op.outputs[2]
moving_variance_tensor = sub_op.inputs[0]
bn_decay_var_tensor = mul_match_result.get_tensor(bn_decay_pattern)
else:
mean_tensor = match_result.get_tensor(mean_pattern)
variance_tensor = match_result.get_tensor(variance_pattern)
yield _BatchNormMatch(
layer_op=layer_op,
bn_op=bn_op,
output_tensor=output_tensor,
input_tensor=input_tensor,
weight_tensor=weight_tensor,
gamma_tensor=gamma_tensor,
beta_tensor=beta_tensor,
mean_tensor=mean_tensor,
variance_tensor=variance_tensor,
moving_mean_tensor=moving_mean_tensor,
moving_variance_tensor=moving_variance_tensor,
bn_decay_mean_tensor=bn_decay_mean_tensor,
bn_decay_var_tensor=bn_decay_var_tensor,
batch_epsilon=batch_epsilon,
batch_to_space_op=batch_to_space_op)
def _ComputeBatchNormCorrections(context, match, freeze_batch_norm_delay,
fused_batch_norm):
"""Computes batch norm correction params.
Before batch normalization is frozen:
We use batch statistics for batch norm.
correction_scale = sigma_b/sigma_mv
correction_recip = 1/correction_scale
correction_offset = 0
After batch normalization is frozen:
correction_scale = sigma_b/sigma_mv
correction_recip = 1
correction_offset = gamma*(mu_b/sigma_b-mu_mv/sigma_mv).
Batch norm is frozen if global_step > bn_freeze_delay.
The corrections ensure that:
a) The weights are quantized after scaling by gamma/sigma_mv. This enables
smoother training as the scaling on the weights changes slowly, rather than
jump across mini-batches
b) Changing the values of the corrections allows for one to switch between
using batch statistics to using moving mean and average, without requiring
changes to batch_norm
Args:
context: The scope under which we look for batch norm params
match: Object containing required batch norm tensors for correction
computation.
freeze_batch_norm_delay: Delay in steps at which computation switches
from regular batch norm to frozen mean and variance.
fused_batch_norm: Bool, true if fused batch norm is used.
Returns:
A tuple of correction_scale, correction_recip, correction_offset
"""
g = ops.get_default_graph()
prefix = '' if not context else context + '/'
with g.name_scope(prefix + 'batch_norm_correction'):
recip_sigma_mv = math_ops.rsqrt(
match.moving_variance_tensor + match.batch_epsilon)
recip_sigma = math_ops.rsqrt(match.variance_tensor + match.batch_epsilon)
correction_scale = math_ops.divide(
recip_sigma_mv, recip_sigma, name='scale_compute')
correction_scale = array_ops.identity(
correction_scale, name='correction_scale')
correction_recip = math_ops.reciprocal(
correction_scale, name='reciprocal_compute')
correction_offset = math_ops.multiply(
match.gamma_tensor,
match.mean_tensor * recip_sigma -
match.moving_mean_tensor * recip_sigma_mv,
name='offset_compute')
if freeze_batch_norm_delay is not None:
use_mv_avg = math_ops.greater_equal(
common.CreateOrGetQuantizationStep(),
freeze_batch_norm_delay,
name='use_moving_average')
else:
use_mv_avg = False
bn_decay_zero = 0.0
bn_decay_mean_consumers = list(match.bn_decay_mean_tensor.consumers())
bn_decay_var_consumers = list(match.bn_decay_mean_tensor.consumers())
bn_decay_mean_out = utils.smart_cond(
use_mv_avg,
lambda: bn_decay_zero,
lambda: match.bn_decay_mean_tensor,
name='freeze_moving_mean')
graph_editor.reroute_ts(
[bn_decay_mean_out], [match.bn_decay_mean_tensor],
can_modify=bn_decay_mean_consumers)
bn_decay_var_consumers = list(match.bn_decay_var_tensor.consumers())
bn_decay_var_out = utils.smart_cond(
use_mv_avg,
lambda: bn_decay_zero,
lambda: match.bn_decay_var_tensor,
name='freeze_moving_var')
graph_editor.reroute_ts(
[bn_decay_var_out], [match.bn_decay_var_tensor],
can_modify=bn_decay_var_consumers)
correction_recip = utils.smart_cond(
use_mv_avg,
lambda: array_ops.ones(correction_scale.shape),
lambda: correction_recip,
name='correction_recip')
correction_offset = utils.smart_cond(
use_mv_avg,
lambda: correction_offset,
lambda: array_ops.zeros(correction_offset.shape),
name='correction_offset')
return correction_scale, correction_recip, correction_offset
def _CloneWithNewOperands(layer_op, input_tensor, weight_tensor,
batch_to_space_op):
"""Clones layer_op with input_tensor and weight_tensor as new inputs."""
new_layer_name = layer_op.name.split('/')[-1] + '_Fold'
if layer_op.type == 'Conv2D':
return nn_ops.conv2d(
input_tensor,
weight_tensor,
strides=layer_op.get_attr('strides'),
padding=layer_op.get_attr('padding'),
use_cudnn_on_gpu=layer_op.get_attr('use_cudnn_on_gpu'),
data_format=layer_op.get_attr('data_format'),
name=new_layer_name)
elif layer_op.type == 'MatMul':
return math_ops.matmul(
input_tensor,
weight_tensor,
transpose_a=layer_op.get_attr('transpose_a'),
transpose_b=layer_op.get_attr('transpose_b'),
name=new_layer_name)
elif layer_op.type == 'DepthwiseConv2dNative':
conv = nn.depthwise_conv2d(
input_tensor,
weight_tensor,
rate=layer_op.get_attr('dilations'),
strides=layer_op.get_attr('strides'),
padding=layer_op.get_attr('padding'),
name=new_layer_name)
# Copy the batch to space operation if we have a atrous convolution.
if batch_to_space_op:
batch_to_space_op = layer_op.outputs[0].consumers()[0]
# TODO(suharshs): It's hard to make this name match with the unfused name.
# Restructure this code to not rely on scope at all.
new_batch_to_space_name = batch_to_space_op.name.split('/')[-1] + '_Fold'
conv = array_ops.batch_to_space_nd(
conv,
batch_to_space_op.inputs[1],
batch_to_space_op.inputs[2],
name=new_batch_to_space_name)
return conv
else:
raise ValueError('Cannot handle operation of type: %s' % layer_op.type)
@ops.RegisterGradient('FoldFusedBatchNormGrad')
def _FoldFusedBatchNormGrad(op, unused_grad_y, grad_mean, grad_var, unused_1,
unused_2):
x = op.inputs[0]
n = math_ops.cast(
array_ops.size(x) / array_ops.size(grad_mean), dtypes.float32)
dmean_dx = grad_mean / n
dvar_dx = 2 * grad_var * (x - op.outputs[1]) / (n - 1)
return (dmean_dx + dvar_dx), None, None, None, None
def _FoldUnfusedBatchNorms(graph, is_training, freeze_batch_norm_delay):
"""Finds unfused batch norm layers and folds them into preceding layers.
Folding only affects the following layers: Conv2D, fully connected, depthwise
convolution.
Args:
graph: Graph to walk and modify.
is_training: Bool, True if training.
freeze_batch_norm_delay: How many steps to wait before freezing moving mean
and variance and using them for batch normalization.
Raises:
ValueError: When batch norm folding fails.
"""
input_to_ops_map = input_to_ops.InputToOps(graph)
for bn in common.BatchNormGroups(graph):
has_scaling = _HasScaling(graph, input_to_ops_map, bn)
if not _IsValidUnfusedBatchNorm(graph, bn):
continue
# The mangling code intimately depends on BatchNorm node's internals.
original_op, folded_op = _CreateFoldedOp(
graph,
bn,
has_scaling=has_scaling,
freeze_batch_norm_delay=freeze_batch_norm_delay,
is_training=is_training)
activation = common.GetEndpointActivationOp(graph, bn)
if activation:
nodes_modified_count = graph_editor.reroute_ts([folded_op.outputs[0]],
[original_op.outputs[0]],
can_modify=[activation])
if nodes_modified_count != 1:
raise ValueError('Unexpected inputs to op: %s' % activation.name)
continue
# Treat consumer ops in bypass modules differently since they have Add
# operations instead of Relu* above.
add_bypass_ctx = re.search(r'^(.*)/([^/]+)', bn).group(1)
add_bypass = graph.get_operation_by_name(add_bypass_ctx + '/Add')
nodes_modified_count = graph_editor.reroute_ts([folded_op.outputs[0]],
[original_op.outputs[0]],
can_modify=[add_bypass])
if nodes_modified_count != 1:
raise ValueError('Unexpected inputs to op: %s' % add_bypass.name)
def _IsValidUnfusedBatchNorm(graph, context):
"""Checks that the output of the unfused batch norm has consumers."""
add_shift = graph.get_operation_by_name(
context + '/BatchNorm/batchnorm_1/add_1')
# Ensure that the output tensor of batch norm has consumers, otherwise this
# is a dangling node and not a match.
return bool(add_shift.outputs[0].consumers())
def _FindMatchingTensor(graph, match_pattern, scope):
"""Finds best match of ops matching match_pattern with scope.
Example: _FindMatchingTensor(graph,'/BatchNorm/moments/Squeeze',
'MobilenetV1/MobilenetV1/Conv2d_0/') returns:
Tensor('MobilenetV1/Conv2d_0/BatchNorm/moments/Squeeze')
Args:
graph: Graph to inspect.
match_pattern: Part of the name of the op that we need to match, should
be present in the op's name
scope: The scope of the op. All the elements of the scope need not be
present in the op's name.
Returns:
Tensor from graph that provides the best match to the match_pattern and
scope
"""
oplist = graph.get_operations()
split_context = set(scope.split('/'))
match_dict = {}
for op in oplist:
if op.name.endswith(match_pattern):
split_name = op.name.split('/')
num_matches = len(set(split_name) & split_context)
if num_matches > 0:
match_dict[op.name] = num_matches
# match_dict contains matching op names from graph with values being
# number of matches to scope. We pick the key with the most matches
if match_dict:
max_key = max(match_dict, key=match_dict.get)
return graph.get_tensor_by_name(max_key + ':0')
else:
return None
def _GetBatchNormParams(graph, context, has_scaling):
"""Extracts relevant tensors for folding batch norms.
Args:
graph: Graph to inspect.
context: The scope under which we look for batch norm params
has_scaling: Bool that specifies if scaling is done as part of batch norm.
Returns:
_BatchNormMatch containing all required batch norm parameters.
"""
gamma_tensor = None
batch_mean_tensor = None
batch_variance_tensor = None
moving_mean_tensor = None
moving_variance_tensor = None
batch_epsilon = None
bn_decay_mean_tensor = None
bn_decay_var_tensor = None
# TODO(raghuramank) This code relies on string matching and needs to be
# updated if unfused batch norm continues to be widely used
# Matching variable names is brittle and relies on scoping
# conventions. Fused batch norm folding is more robust. Support for unfused
# batch norms will be deprecated as we move forward. Fused batch norms allow
# for faster training and should be used whenever possible.
# context contains part of the names of the tensors we are interested in:
# For MobilenetV1, the context has repetitions:
# MobilenetV1/MobilenetV1/Conv2d_3_depthwise
# when the moving_mean tensor has the name:
# MobilenetV1/Conv2d_3_depthwise/BatchNorm/moving_mean/read
# To pick the correct variable name, it is necessary to ignore the repeating
# header.
# For MobilenetV2, this problem does not exist:
# The context is: MobilenetV2/expanded_conv_3/depthwise
# and the names of the tensors start with a single MobilenetV2
# The moving mean for example, has the name:
# MobilenetV2/expanded_conv_3/depthwise/BatchNorm/moving_mean/read
# We identify the best match for an op by checking for
# 1. The suffix of the op is exactly matched
# 2. Maximum number of matches with the context.The matching
# score is given by the number of parts of context (split by /) that
# are present in the parts of the tensor name (again split by /).
# For example: scope= MobilenetV2/MobilenetV2/expanded_conv_3 and
# op.name = MobilenetV2/expanded_conv_3/depthwise/BatchNorm/moving_mean/read
# will have 2 matches,scope with a different conv layer will have one match.
op_suffix_mean = '/BatchNorm/moments/Squeeze'
op_suffix_variance = '/BatchNorm/moments/Squeeze_1'
op_suffix_epsilon = '/BatchNorm/batchnorm_1/add/y'
op_suffix_bn_decay_mean = '/BatchNorm/AssignMovingAvg/decay'
op_suffix_bn_decay_var = '/BatchNorm/AssignMovingAvg_1/decay'
if variable_scope.get_variable_scope().use_resource:
op_suffix_gamma = '/BatchNorm/gamma/Read/ReadVariableOp'
op_suffix_moving_variance = (
'/BatchNorm/moving_variance/Read/ReadVariableOp')
op_suffix_moving_mean = ('/BatchNorm/moving_mean/Read/ReadVariableOp')
else:
op_suffix_gamma = '/BatchNorm/gamma'
op_suffix_moving_variance = '/BatchNorm/moving_variance/read'
op_suffix_moving_mean = '/BatchNorm/moving_mean/read'
# Parse through list of ops to find relevant ops
batch_mean_tensor = _FindMatchingTensor(graph, op_suffix_mean, context)
batch_variance_tensor = _FindMatchingTensor(graph, op_suffix_variance,
context)
moving_mean_tensor = _FindMatchingTensor(graph, op_suffix_moving_mean,
context)
moving_variance_tensor = _FindMatchingTensor(graph, op_suffix_moving_variance,
context)
batch_epsilon = _FindMatchingTensor(graph, op_suffix_epsilon, context)
bn_decay_mean_tensor = _FindMatchingTensor(graph, op_suffix_bn_decay_mean,
context)
bn_decay_var_tensor = _FindMatchingTensor(graph, op_suffix_bn_decay_var,
context)
if batch_mean_tensor is None and moving_mean_tensor is None:
ValueError('Error folding unfused batch norms')
if has_scaling:
gamma_tensor = _FindMatchingTensor(graph, op_suffix_gamma, context)
if not has_scaling:
gamma_tensor = array_ops.ones(moving_mean_tensor.shape)
return _BatchNormMatch(
layer_op=None,
bn_op=None,
output_tensor=None,
input_tensor=None,
weight_tensor=None,
gamma_tensor=gamma_tensor,
beta_tensor=None,
mean_tensor=batch_mean_tensor,
variance_tensor=batch_variance_tensor,
moving_mean_tensor=moving_mean_tensor,
moving_variance_tensor=moving_variance_tensor,
bn_decay_mean_tensor=bn_decay_mean_tensor,
bn_decay_var_tensor=bn_decay_var_tensor,
batch_epsilon=batch_epsilon,
batch_to_space_op=None)
def _CreateFoldedOp(graph, context, has_scaling, freeze_batch_norm_delay,
is_training):
"""Folds in batch norm layer into preceding convolution or FC layer.
Creates 3 new nodes, connects their inputs and adds them to the graph:
mul is cloned into mul_fold, Conv2D or MatMul, or DepthwiseConv2d is cloned
into respective *_Fold, add is cloned into add_fold.
Args:
graph: Graph to modify.
context: String, batch norm context, i.e. node into which BatchNorm is
nested.
has_scaling: Whether the batch norm has scaling enabled.
freeze_batch_norm_delay: How many steps to wait before freezing moving mean
and variance and using them for batch normalization.
is_training: Bool, true if training.
Raises:
ValueError: When operation type is not supported, or input and output tensor
shapes mismatch for created operations: mul_fold, add_fold.
Returns:
A pair of Operations, the first is the original consumer node of the batch
norm (../BatchNorm/batchnorm_1/add_1), the second is the consumer node of
the folded graph (add_fold).
"""
mul_scale_name = 'mul_1' if has_scaling else 'mul'
mul_scale = graph.get_operation_by_name(context +
'/BatchNorm/batchnorm_1/' +
mul_scale_name)
op_below = mul_scale.inputs[0].op
# Skip over the BatchToSpace operation in the case of atrous convolutions.
batch_to_space_op = None
if op_below.type == 'BatchToSpaceND':
batch_to_space_op = op_below
op_below = op_below.inputs[0].op
weights = op_below.inputs[1]
match = _GetBatchNormParams(
graph=graph, context=context, has_scaling=has_scaling)
correction_scale, correction_recip, correction_offset = None, None, None
if is_training:
correction_scale, correction_recip, correction_offset = (
_ComputeBatchNormCorrections(
context=context,
match=match,
freeze_batch_norm_delay=freeze_batch_norm_delay,
fused_batch_norm=False))
# Special handling for weights of depthwise convolution.
if op_below.type == 'DepthwiseConv2dNative':
new_shape = [
weights.get_shape().as_list()[2],
weights.get_shape().as_list()[3]
]
scale_name = 'mul' if has_scaling else 'Rsqrt'
scale = graph.get_operation_by_name(
context + '/BatchNorm/batchnorm_1/' + scale_name)
scale = array_ops.reshape(scale.outputs[0], new_shape,
context + '/scale_reshape')
if correction_scale is not None:
correction_scale = array_ops.reshape(correction_scale, new_shape,
context + '/correction_reshape')
with ops.device(mul_scale.device):
weights = math_ops.multiply(correction_scale, weights,
context + '/correction_mult')
mul_fold = _CloneOp(mul_scale, context + '/mul_fold', [(0, weights),
(1, scale)])
elif op_below.type in ['Conv2D', 'MatMul']:
if correction_scale is not None:
with ops.device(mul_scale.device):
weights = math_ops.multiply(correction_scale, weights,
context + '/correction_mult')
mul_fold = _CloneOp(mul_scale, context + '/mul_fold', [(0, weights)])
else:
raise ValueError('Cannot handle operation of type: %s' % op_below.type)
_AssertShapesMatch('mul_fold', mul_fold.inputs[0], mul_fold.outputs[0])
conv_or_fc_folded = _CloneOp(op_below, op_below.name + '_Fold',
[(1, mul_fold.outputs[0])])
add_shift = graph.get_operation_by_name(
context + '/BatchNorm/batchnorm_1/add_1')
corrected_output = conv_or_fc_folded.outputs[0]
# Copy the batch to space operation if we have a atrous convolution.
if batch_to_space_op:
corrected_output = array_ops.batch_to_space_nd(
corrected_output,
batch_to_space_op.inputs[1],
batch_to_space_op.inputs[2],
name=batch_to_space_op.name + '_Fold')
if correction_offset is not None:
with ops.device(conv_or_fc_folded.device):
corrected_output = math_ops.multiply(correction_recip, corrected_output,
context + '/post_conv_mul')
corrected_output = math_ops.add(corrected_output, (correction_offset),
context + '/correction_add')
add_fold = _CloneOp(add_shift, context + '/add_fold', [(0, corrected_output)])
_AssertShapesMatch('add_fold', add_fold.inputs[0], add_fold.outputs[0])
return add_shift, add_fold
def _CloneOp(op, new_name, new_inputs):
"""Clones a given op, replaces its name and some of its inputs.
Args:
op: Operation to modify.
new_name: String, a new name to set on cloned op.
new_inputs: A list of tuples (idx, tensor), each input with corresponding
index will be replaced by the given Tensor in the cloned op.
Returns:
Operation, the cloned op.
Raises:
TypeError: When Operation type is not supported.
ValueError: When input shapes are incompatible.
"""
inputs = list(op.inputs)
for new_input in new_inputs:
inputs[new_input[0]] = new_input[1]
return _OP_CLONER.Clone(op, inputs, new_name)
class _OpCloner(object):
"""Helper class that clones tf.Operations based on their type."""
def __init__(self):
self.op_type_to_action = {
'Mul': self._CloneMul,
'Add': self._CloneAdd,
'Conv2D': self._CloneConv2d,
'DepthwiseConv2dNative': self._CloneDepthwiseConv2d,
'MatMul': self._CloneMatMul,
}
def _CloneMul(self, op, inputs, new_name):
del op # Unused.
return math_ops.multiply(inputs[0], inputs[1], name=new_name).op
def _CloneAdd(self, op, inputs, new_name):
del op # Unused.
return math_ops.add(inputs[0], inputs[1], name=new_name).op
def _CloneConv2d(self, op, inputs, new_name):
input_tensor = inputs[0]
weights = inputs[1]
self._AssertConvShapes(op.name, input_tensor, weights)
return nn_ops.conv2d(
input_tensor,
weights,
strides=op.get_attr('strides'),
padding=op.get_attr('padding'),
use_cudnn_on_gpu=op.get_attr('use_cudnn_on_gpu'),
data_format=op.get_attr('data_format'),
name=new_name).op
def _CloneDepthwiseConv2d(self, op, inputs, new_name):
input_tensor = inputs[0]
weights = inputs[1]
self._AssertConvShapes(op.name, input_tensor, weights)
return nn.depthwise_conv2d(
input_tensor,
weights,
strides=op.get_attr('strides'),
padding=op.get_attr('padding'),
name=new_name).op
def _CloneMatMul(self, op, inputs, new_name):
weights = inputs[0]
input_tensor = inputs[1]
self._AssertFCShapes(op.name, weights, input_tensor)
return math_ops.matmul(
weights,
input_tensor,
transpose_a=op.get_attr('transpose_a'),
transpose_b=op.get_attr('transpose_b'),
name=new_name).op
def Clone(self, op, inputs, new_name):
try:
return self.op_type_to_action[op.type](op, inputs, new_name)
except KeyError:
raise TypeError('Unsupported operation type: %s' % op.type)
def _AssertConvShapes(self, op_name, input_tensor, weights):
"""Makes sure that convolution inputs have compatible shapes.
Args:
op_name: Operation name, only used in error message.
input_tensor: Input that is convolved.
weights: Weights of the convolution filter.
Raises:
ValueError: When input shapes are incompatible.
"""
input_shape = input_tensor.get_shape()
weights_shape = weights.get_shape()
if (len(input_shape) != 4 or len(weights_shape) != 4 or
input_shape[3] != weights_shape[2]):
raise ValueError('Incompatible shapes for op %s inputs: %s and %s' %
(op_name, input_shape, weights_shape))
def _AssertFCShapes(self, op_name, weights, input_tensor):
"""Makes sure that FC layer inputs have compatible shapes.
Args:
op_name: Operation name, only used in error message.
weights: Weights used in FC layer.
input_tensor: Input into FC layer.
Raises:
ValueError: When input shapes are incompatible.
"""
weights_shape = weights.get_shape()
input_shape = input_tensor.get_shape()
if (len(weights_shape) != 2 or len(input_shape) != 2 or
weights_shape[1] != input_shape[0]):
raise ValueError('Incompatible shapes for op %s inputs: %s and %s' %
(op_name, weights_shape, input_shape))
_OP_CLONER = _OpCloner()
def _AssertShapesMatch(op_name, in_tensor, out_tensor):
"""Makes sure that shapes of input and output tensors are compatible.
Args:
op_name: String, operation name, only used in error message.
in_tensor: Tensor, input tensor.
out_tensor: Tensor, output tensor.
Raises:
ValueError: When input and output tensors have different shapes.
"""
in_shape = in_tensor.get_shape()
out_shape = out_tensor.get_shape()
if not in_shape.is_compatible_with(out_shape):
raise ValueError('%s should not change tensor shape: input %s, '
'output %s' % (op_name, in_shape, out_shape))
def _HasScaling(graph, input_to_ops_map, bn):
r"""Checks if batch norm has scaling enabled.
Difference between batch norm with scaling and without is that with scaling:
Rsqrt -> mul -> mul_1
\-> mul_2
where
mul multiplies gamma by inverse square root of EMA of batch variance,
mul_1 multiplies output of mul with output from the base operation
(convolution, FC or depthwise convolution),
mul_2 multiplies output of mul with EMA of batch mean,
and without scaling:
Rsqrt -> mul
\-> mul_1
where
mul multiplies the inverse square root of EMA of batch variance with output
from the base operation,
mul_1 multiplies inverse square root of EMA of batch variance with EMA
of batch mean.
Args:
graph: Graph to inspect.
input_to_ops_map: InputToOps object containing mapping from tensor's name
to ops that take it as input.
bn: Batch norm layer prefix string.
Returns:
A boolean indicating whether this batch norm layer has scaling enabled.
"""
rsqrt_op = graph.get_operation_by_name(bn + '/BatchNorm/batchnorm_1/Rsqrt')
rsqrt_consumers = input_to_ops_map.ConsumerOperations(rsqrt_op)
return sum(1 for op in rsqrt_consumers if op.type == 'Mul') == 1
class _BatchNormMatch(object):
"""Contains all information related to a found Fused/UnfusedBatchNorm."""
def __init__(self, layer_op, bn_op, output_tensor, input_tensor,
weight_tensor, gamma_tensor, beta_tensor, mean_tensor,
variance_tensor, moving_mean_tensor, moving_variance_tensor,
bn_decay_mean_tensor, bn_decay_var_tensor, batch_epsilon,
batch_to_space_op):
self._layer_op = layer_op
self._bn_op = bn_op
self._output_tensor = output_tensor
self._input_tensor = input_tensor
self._weight_tensor = weight_tensor
self._gamma_tensor = gamma_tensor
self._beta_tensor = beta_tensor
self._mean_tensor = mean_tensor
self._variance_tensor = variance_tensor
self._moving_mean_tensor = moving_mean_tensor
self._moving_variance_tensor = moving_variance_tensor
self._bn_decay_mean_tensor = bn_decay_mean_tensor
self._bn_decay_var_tensor = bn_decay_var_tensor
self._batch_epsilon = batch_epsilon
self._batch_to_space_op = batch_to_space_op
@property
def layer_op(self):
return self._layer_op
@property
def bn_op(self):
return self._bn_op
@property
def output_tensor(self):
return self._output_tensor
@property
def input_tensor(self):
return self._input_tensor
@property
def weight_tensor(self):
return self._weight_tensor
@property
def gamma_tensor(self):
return self._gamma_tensor
@property
def beta_tensor(self):
return self._beta_tensor
@property
def mean_tensor(self):
return self._mean_tensor
@property
def variance_tensor(self):
return self._variance_tensor
@property
def moving_mean_tensor(self):
return self._moving_mean_tensor
@property
def moving_variance_tensor(self):
return self._moving_variance_tensor
@property
def batch_epsilon(self):
return self._batch_epsilon
@property
def bn_decay_mean_tensor(self):
return self._bn_decay_mean_tensor
@property
def bn_decay_var_tensor(self):
return self._bn_decay_var_tensor
@property
def batch_to_space_op(self):
return self._batch_to_space_op
| [
"[email protected]"
] | |
4e55efc281a0895900555be7d28d0cb370371a1e | 21c098079d2724ffbd3f6cb01c7919c1f59f7875 | /src/aioquic/about.py | 82cd8f33d65dcbf2397d12c83c651cc899e91556 | [
"BSD-3-Clause"
] | permissive | MattyHsueh/aioquic | f9c54717b3acdb84bc8f963a5e8bd5f969ebeb4b | 2163f2d0940edd2a91a3773fb7cb061031fe87fa | refs/heads/master | 2022-09-01T12:33:23.987233 | 2020-05-29T05:57:37 | 2020-05-29T05:57:37 | 263,517,328 | 1 | 0 | BSD-3-Clause | 2020-05-13T03:39:45 | 2020-05-13T03:39:44 | null | UTF-8 | Python | false | false | 227 | py | __author__ = "Jeremy Lainé"
__email__ = "[email protected]"
__license__ = "BSD"
__summary__ = "An implementation of QUIC and HTTP/3"
__title__ = "aioquic"
__uri__ = "https://github.com/aiortc/aioquic"
__version__ = "0.8.7"
| [
"[email protected]"
] | |
ad83730199dd2c78f435dea6eee07e6fd00b8033 | c8adae98cd1c2614c1bacc59ecf52fb7e45ce481 | /0x1F-pascal_triangle/0-pascal_triangle.py | 92a551bac2bdfd2d51644f8f5a6ef858cc8ca0fc | [] | no_license | OctopusHugz/holbertonschool-interview | a75f1a9fe72227e46db1005796cc98fa10f1fd2f | 546f659ca128118438200ae1515096407bb438de | refs/heads/master | 2023-07-15T07:34:45.713801 | 2021-08-25T20:29:50 | 2021-08-25T20:29:50 | 319,363,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | #!/usr/bin/python3
""" This module implements a pascal triangle algorithm """
def pascal_triangle(n):
""" Returns a list of lists of integers representing the Pascal's triangle
of n """
triangle = []
for num in range(n):
row = []
for val in range(num + 1):
if val == 0 or val == num:
row.append(1)
continue
row.append(triangle[num - 1][val - 1] + triangle[num - 1][val])
triangle.append(row)
return triangle
| [
"[email protected]"
] | |
ac5c1a1bf8e0c61d8642c34763349393db9297ab | 4e8674d7c83254aba7f2d327f16d5ad202a189b6 | /src/select_timeout.py | e4c1fdb5f764751985134229c1ae1f4e1ed59f15 | [] | no_license | raysmith619/dots | 0f5e34b17675cfb0903a20eda86493d37676b500 | c44ff3ebf57ec73c6fd8b7898cbc186668f83915 | refs/heads/master | 2021-06-17T02:34:48.850425 | 2021-04-27T13:54:24 | 2021-04-27T13:54:24 | 205,397,035 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | # select_timeout.py
class SelectTimeout(Exception):
"""Base class for exceptions in this module."""
pass
| [
"[email protected]"
] | |
7da12be20b0cf7f6dfdaf4b6ed1e6a7b1fb4459c | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/6030/195006030.py | daab00096a42ba5ffccaf8a09c37c82a9d3352cd | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 2,476 | py | from bots.botsconfig import *
from records006030 import recorddefs
syntax = {
'version': '00603',
'functionalgroup': 'LA',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGN', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 99999},
{ID: 'QTY', MIN: 0, MAX: 99999},
{ID: 'PWK', MIN: 0, MAX: 99999},
{ID: 'CRC', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'NTE', MIN: 0, MAX: 99999},
]},
{ID: 'AMT', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'MSG', MIN: 0, MAX: 99999},
]},
{ID: 'N1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 3},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 5},
{ID: 'QTY', MIN: 0, MAX: 99999},
{ID: 'MEA', MIN: 0, MAX: 99999},
{ID: 'NTE', MIN: 0, MAX: 99999},
{ID: 'REF', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 99999},
{ID: 'MSG', MIN: 0, MAX: 99999},
]},
{ID: 'CRC', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'REF', MIN: 0, MAX: 99999},
]},
{ID: 'LM', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 99999},
{ID: 'QTY', MIN: 0, MAX: 99999},
{ID: 'MSG', MIN: 0, MAX: 99999},
]},
]},
{ID: 'PO1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'QTY', MIN: 0, MAX: 99999},
{ID: 'MEA', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LIE', MIN: 0, MAX: 99999},
]},
{ID: 'REF', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LIE', MIN: 0, MAX: 99999},
]},
{ID: 'LM', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 99999},
{ID: 'MEA', MIN: 0, MAX: 99999},
{ID: 'MSG', MIN: 0, MAX: 99999},
]},
{ID: 'N1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 3},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
]},
{ID: 'CRC', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'DTM', MIN: 0, MAX: 99999},
{ID: 'LM', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 99999},
]},
{ID: 'N1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 3},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
]},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"[email protected]"
] | |
788d714d928080162d37b7236fd6f219b53d2324 | ad59072be6c46c98782d8c04df97023a1cc6161c | /DL12-10-transfer-add-category.py | 356bc4124d6c2eb230a7631f4c8b09aa920c17f7 | [] | no_license | cyrilvincent/ML | 67c6bda2016bc70168bd197fe58eabc8dc3bfb00 | 42d11fad9b8b6ea3aba3d4173cb3bbdf7bbd638f | refs/heads/master | 2023-05-25T00:36:49.561860 | 2023-05-24T14:14:04 | 2023-05-24T14:14:04 | 191,420,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,240 | py | import tensorflow.keras as keras
model = keras.models.load_model('data/dogsvscats/vgg16model-small.h5')
newModel = keras.models.Sequential()
for layer in model.layers[:-1]:
newModel.add(layer)
layer.trainable = False
newModel.add(keras.layers.Dense(3, name="dense3"))
newModel.add(keras.layers.Activation('softmax'))
newModel.summary()
newModel.compile(loss='categorical_crossentropy',
optimizer="rmsprop",
metrics=['accuracy'])
trainset = keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255, validation_split=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
batchSize = 16
trainGenerator = trainset.flow_from_directory(
'data/dogsvscats/small/train',
target_size=(224, 224),
subset='training',
class_mode="categorical",
batch_size=batchSize)
validationGenerator = trainset.flow_from_directory(
'data/dogsvscats/small/train',
target_size=(224, 224),
class_mode="categorical",
subset = 'validation',
batch_size=batchSize)
newModel.fit(
trainGenerator,
epochs=30,
validation_data=validationGenerator,
)
newModel.save('data/dogsvscats/vgg16model-cows.h5')
| [
"[email protected]"
] | |
72bbbcb0b7231deff3c7aea39bd3d33ec372d704 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /robomaker_write_f/world-export-job_create.py | 9d9bc5bc86ab9a791055102b84c3526c3069944b | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
cancel-world-export-job : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/robomaker/cancel-world-export-job.html
describe-world-export-job : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/robomaker/describe-world-export-job.html
list-world-export-jobs : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/robomaker/list-world-export-jobs.html
"""
write_parameter("robomaker", "create-world-export-job") | [
"[email protected]"
] | |
cb50763a06562ed90a40194ed8ec0b365a2b6258 | 7278b31ebd6362bebf6986c2f3eca89d87201eb2 | /apgl/graph/test/MatrixGraphTest.py | 9ebf6bb8a71f60542fbfdb6dbe402a64a68a40d8 | [] | no_license | malcolmreynolds/APGL | c19827b1b834d3491d98a751c91838177aedc29e | 1703510cbb51ec6df0efe1de850cd48ef7004b00 | refs/heads/master | 2020-12-25T05:52:45.826947 | 2013-03-26T12:30:00 | 2013-03-26T12:30:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78,752 | py | from apgl.graph.VertexList import VertexList
from apgl.graph.GeneralVertexList import GeneralVertexList
from apgl.generator.BarabasiAlbertGenerator import BarabasiAlbertGenerator
from apgl.util.PathDefaults import PathDefaults
import numpy
import os
import logging
import pickle
import numpy.testing as nptst
"""
A class which encapsulates common tests for classes than inherit from AbtractMatrixGraph.
"""
class MatrixGraphTest():
def initialise(self):
numpy.set_printoptions(suppress = True)
numpy.random.seed(21)
self.numVertices = 6
self.numFeatures = 1
self.vList = VertexList(self.numVertices, self.numFeatures)
self.graph = self.GraphType(self.vList)
self.graph.addEdge(0, 1, 1)
self.graph.addEdge(1, 3, 1)
self.graph.addEdge(0, 2, 2)
self.graph.addEdge(2, 3, 5)
self.graph.addEdge(0, 4, 1)
self.graph.addEdge(3, 4, 1)
self.graph2 = self.GraphType(self.vList, False)
self.graph2.addEdge(0, 1, 1)
self.graph2.addEdge(1, 3, 1)
self.graph2.addEdge(0, 2, 2)
self.graph2.addEdge(2, 3, 5)
self.graph2.addEdge(0, 4, 1)
self.graph2.addEdge(3, 4, 1)
def testAddEdge(self):
self.graph.addEdge(1, 5, 2)
self.assertEquals(self.graph.getEdge(1,5), 2)
self.assertEquals(self.graph.getEdge(5,1), 2)
self.assertEquals(self.graph.getEdge(2,5), None)
self.assertRaises(ValueError, self.graph.addEdge, 1, 3, 0)
def testAddEdges(self):
numVertices = 5
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
vList.setVertices(numpy.random.rand(numVertices, numFeatures))
graph = self.GraphType(vList)
edgeIndexArray = numpy.array([[1,2], [2,3]])
graph.addEdges(edgeIndexArray)
self.assertEquals(graph.getEdge(1, 2), 1)
self.assertEquals(graph.getEdge(3, 2), 1)
self.assertEquals(graph.getEdge(2, 3), 1)
self.assertEquals(graph.getEdge(2, 1), 1)
self.assertEquals(graph.getNumEdges(), 2)
graph = self.GraphType(vList, False)
graph.addEdges(edgeIndexArray)
self.assertEquals(graph.getNumEdges(), 2)
self.assertEquals(graph.getEdge(1, 2), 1)
self.assertEquals(graph.getEdge(2, 3), 1)
edgeValues = numpy.array([0.1, 0.2])
graph.addEdges(edgeIndexArray, edgeValues)
self.assertEquals(graph.getEdge(1, 2), 0.1)
self.assertEquals(graph.getEdge(2, 3), 0.2)
graph = self.GraphType(vList)
graph.addEdges(edgeIndexArray, edgeValues)
self.assertEquals(graph.getEdge(1, 2), 0.1)
self.assertEquals(graph.getEdge(2, 3), 0.2)
self.assertEquals(graph.getEdge(2, 1), 0.1)
self.assertEquals(graph.getEdge(3, 2), 0.2)
edgeValues = numpy.array([0.1, 0.0])
self.assertRaises(ValueError, graph.addEdges, edgeIndexArray, edgeValues)
def testRemoveEdge(self):
self.graph.addEdge(1, 5, 2)
self.assertEquals(self.graph.getEdge(1,5), 2)
self.assertEquals(self.graph.getEdge(5,1), 2)
self.graph.removeEdge(1,5)
self.assertEquals(self.graph.getEdge(1,5), None)
self.assertEquals(self.graph.getEdge(5,2), None)
def testNeighbours(self):
numVertices = 10
numFeatures = 3
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(1, 5, 2)
graph.addEdge(1, 3, 5)
graph.addEdge(1, 9, 1)
graph.addEdge(2, 3, 2)
self.assertTrue((numpy.sort(graph.neighbours(1)) == numpy.array([3,5,9])).all())
self.assertTrue((graph.neighbours(2) == numpy.array([3])).all())
self.assertTrue((numpy.sort(graph.neighbours(3)) == numpy.array([1,2])).all())
self.assertTrue((graph.neighbours(4) == numpy.array([])).all())
#Test this function for directed graphs
graph = self.GraphType(vList, False)
graph.addEdge(1, 5, 2)
graph.addEdge(1, 3, 5)
graph.addEdge(9, 1, 1)
graph.addEdge(2, 3, 2)
self.assertTrue((numpy.sort(graph.neighbours(1)) == numpy.array([3,5])).all())
self.assertTrue((graph.neighbours(2) == numpy.array([3])).all())
self.assertTrue((graph.neighbours(3) == numpy.array([])).all())
self.assertTrue((graph.neighbours(9) == numpy.array([1])).all())
def testNeighbourOf(self):
numVertices = 10
numFeatures = 3
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(1, 5, 2)
graph.addEdge(1, 3, 5)
graph.addEdge(1, 9, 1)
graph.addEdge(2, 3, 2)
self.assertTrue((graph.neighbourOf(1) == numpy.array([3,5,9])).all())
self.assertTrue((graph.neighbourOf(2) == numpy.array([3])).all())
self.assertTrue((graph.neighbourOf(3) == numpy.array([1,2])).all())
self.assertTrue((graph.neighbourOf(4) == numpy.array([])).all())
#Test this function for directed graphs
graph = self.GraphType(vList, False)
graph.addEdge(1, 5, 2)
graph.addEdge(1, 3, 5)
graph.addEdge(9, 1, 1)
graph.addEdge(2, 3, 2)
self.assertTrue((graph.neighbourOf(1) == numpy.array([9])).all())
self.assertTrue((graph.neighbourOf(2) == numpy.array([])).all())
self.assertTrue((graph.neighbourOf(3) == numpy.array([1, 2])).all())
self.assertTrue((graph.neighbourOf(9) == numpy.array([])).all())
def testClusteringCoefficient(self):
numVertices = 3
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
#1st graph - take 3 nodes in a line
graph = self.GraphType(vList)
graph.addEdge(0, 1, 2)
graph.addEdge(1, 2, 5)
self.assertEqual(graph.clusteringCoefficient(), 0)
#Now, form a triangle
graph.addEdge(0, 2, 5)
self.assertEqual(graph.clusteringCoefficient(), 1)
#2nd Graph - taken from Newman
numVertices = 5
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(0, 1, 2)
graph.addEdge(0, 2, 2)
graph.addEdge(1, 2, 2)
graph.addEdge(2, 3, 2)
graph.addEdge(2, 4, 2)
self.assertEqual(graph.clusteringCoefficient(), float(3)/8)
#3rd graph - has no edges
numVertices = 5
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
self.assertEqual(graph.clusteringCoefficient(), 0.0)
def testDegreeDistribution(self):
numVertices = 5
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
self.assertTrue((graph.degreeDistribution() == numpy.array([])).all())
graph.addEdge(0, 1, 2)
graph.addEdge(0, 2, 2)
graph.addEdge(1, 2, 2)
graph.addEdge(2, 3, 2)
graph.addEdge(2, 4, 2)
self.assertTrue((graph.degreeDistribution() == numpy.array([0, 2, 2, 0, 1])).all())
#Try empty graph
numVertices = 5
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
self.assertTrue((graph.degreeDistribution() == numpy.array([5])).all())
#Try a star like graph
numVertices = 5
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(0, 0, 2)
graph.addEdge(0, 1, 2)
graph.addEdge(0, 2, 2)
graph.addEdge(0, 3, 2)
graph.addEdge(0, 4, 2)
self.assertTrue((graph.degreeDistribution() == numpy.array([0, 4, 0, 0, 0, 1])).all())
#Test obtaining a subgraph and then the degree distribution
subGraph = graph.subgraph([0,1,2,3])
#logging.debug(subGraph.degreeDistribution())
def testDijkstrasAlgorithm(self):
numVertices = 5
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(0, 1, 5)
graph.addEdge(1, 2, 2)
graph.addEdge(1, 3, 2)
graph.addEdge(2, 4, 2)
self.assertTrue((graph.dijkstrasAlgorithm(0) == numpy.array([0, 1, 2, 2, 3])).all())
self.assertTrue((graph.dijkstrasAlgorithm(1) == numpy.array([1, 0, 1, 1, 2])).all())
self.assertTrue((graph.dijkstrasAlgorithm(2) == numpy.array([2, 1, 0, 2, 1])).all())
self.assertTrue((graph.dijkstrasAlgorithm(3) == numpy.array([2, 1, 2, 0, 3])).all())
self.assertTrue((graph.dijkstrasAlgorithm(4) == numpy.array([3, 2, 1, 3, 0])).all())
#Test case which found a bug
self.assertTrue((self.graph.dijkstrasAlgorithm(2, self.graph.adjacencyList()) == numpy.array([2,3,0,4,3, float('inf')])).all())
#Test a graph which has an isolated node
numVertices = 5
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(0, 1, 5)
graph.addEdge(1, 2, 2)
graph.addEdge(1, 3, 2)
self.assertTrue((graph.dijkstrasAlgorithm(0) == numpy.array([0, 1, 2, 2, numpy.inf])).all())
#Test a graph in a ring
numVertices = 5
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(0, 1, 5)
graph.addEdge(1, 2, 2)
graph.addEdge(2, 3, 2)
graph.addEdge(3, 4, 2)
graph.addEdge(4, 0, 2)
self.assertTrue((graph.dijkstrasAlgorithm(0) == numpy.array([0, 1, 2, 2, 1])).all())
def testGeodesicDistance(self):
numVertices = 5
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(0, 1, 5)
graph.addEdge(1, 2, 2)
graph.addEdge(2, 3, 2)
graph.addEdge(3, 4, 2)
graph.addEdge(4, 0, 2)
P = graph.floydWarshall()
self.assertEquals(graph.geodesicDistance(), 37/15.0)
self.assertEquals(graph.geodesicDistance(P), 37/15.0)
#Test a string of vertices
numVertices = 5
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(0, 1, 1)
graph.addEdge(1, 2, 1)
graph.addEdge(2, 3, 1)
graph.addEdge(3, 4, 1)
P = graph.floydWarshall()
self.assertEquals(graph.geodesicDistance(), 4.0/3)
self.assertEquals(graph.geodesicDistance(P), 4.0/3)
#Test case with isolated node
numVertices = 5
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(0, 1, 1)
graph.addEdge(1, 2, 1)
graph.addEdge(2, 3, 1)
P = graph.floydWarshall()
self.assertEquals(graph.geodesicDistance(), 2.0/3)
self.assertEquals(graph.geodesicDistance(P), 2.0/3)
#Test directed graph
numVertices = 5
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, False)
graph.addEdge(0, 1, 1)
graph.addEdge(1, 2, 1)
P = graph.floydWarshall()
self.assertEquals(graph.geodesicDistance(), 4.0/25)
self.assertEquals(graph.geodesicDistance(P), 4.0/25)
def testHopCount(self):
numVertices = 10
numFeatures = 0
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(0, 1)
graph.addEdge(0, 2)
graph.addEdge(0, 3)
self.assertTrue((graph.hopCount() == numpy.array([10, 16, 22])).all())
graph.addEdge(0, 4)
self.assertTrue((graph.hopCount() == numpy.array([10, 18, 30])).all())
graph.addEdge(4, 5)
self.assertTrue((graph.hopCount() == numpy.array([10, 20, 34, 40])).all())
#Test case where we pass in P matrix
P = graph.floydWarshall()
self.assertTrue((graph.hopCount(P) == numpy.array([10, 20, 34, 40])).all())
#Test a directed graph
numVertices = 10
numFeatures = 0
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, False)
graph.addEdge(0, 1)
graph.addEdge(0, 2, 0.1)
graph.addEdge(0, 3)
self.assertTrue((graph.hopCount() == numpy.array([10, 13])).all())
P = graph.floydWarshall(False)
self.assertTrue((graph.hopCount(P) == numpy.array([10, 13])).all())
#Test empty graph and zero graph
graph = self.GraphType(vList, True)
self.assertTrue((graph.hopCount() == numpy.array([numVertices])).all())
vList = VertexList(0, 0)
graph = self.GraphType(vList, True)
self.assertTrue((graph.hopCount() == numpy.array([])).all())
def testHarmonicGeodesicDistance(self):
numVertices = 5
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(0, 1, 1)
graph.addEdge(1, 2, 1)
graph.addEdge(2, 3, 1)
graph.addEdge(3, 4, 1)
graph.addEdge(4, 0, 1)
self.assertEquals(graph.harmonicGeodesicDistance(), 2.0)
P = graph.floydWarshall(True)
self.assertEquals(graph.harmonicGeodesicDistance(P), 2.0)
#Test a string of vertices
numVertices = 5
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(0, 1, 1)
graph.addEdge(1, 2, 1)
graph.addEdge(2, 3, 1)
graph.addEdge(3, 4, 1)
self.assertAlmostEquals(graph.harmonicGeodesicDistance(), 180/77.0, places=5)
P = graph.floydWarshall(True)
self.assertAlmostEquals(graph.harmonicGeodesicDistance(P), 180/77.0, places=5)
#Test case with isolated node
numVertices = 5
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(0, 1, 1)
graph.addEdge(1, 2, 1)
graph.addEdge(2, 3, 1)
self.assertAlmostEquals(graph.harmonicGeodesicDistance(), 45/13.0, places=5)
P = graph.floydWarshall(True)
self.assertAlmostEquals(graph.harmonicGeodesicDistance(P), 45/13.0, places=5)
#Totally empty graph
graph = self.GraphType(vList)
self.assertEquals(graph.harmonicGeodesicDistance(), float('inf'))
#Test use of indices
graph = self.GraphType(vList)
graph.addEdge(0, 1, 1)
graph.addEdge(1, 2, 1)
graph.addEdge(2, 3, 1)
graph.addEdge(3, 4, 1)
P = graph.floydWarshall(True)
inds = [0, 4]
self.assertEquals(graph.harmonicGeodesicDistance(vertexInds=inds), 12.0)
#Test directed graph
graph = self.GraphType(vList, False)
graph.addEdge(0, 1, 1)
graph.addEdge(1, 2, 1)
graph.addEdge(2, 3, 1)
graph.addEdge(3, 4, 1)
P = graph.floydWarshall(True)
self.assertAlmostEquals(graph.harmonicGeodesicDistance(P), 300/77.0, places=5)
def testGetAllEdges(self):
numVertices = 5
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(0, 1, 5)
graph.addEdge(1, 2, 2)
graph.addEdge(2, 3, 2)
graph.addEdge(2, 2, 2)
edges = graph.getAllEdges()
self.assertEquals(edges.shape[0], 4)
self.assertTrue((edges[0, :]== numpy.array([1,0])).all())
self.assertTrue((edges[1, :]== numpy.array([2,1])).all())
self.assertTrue((edges[2, :]== numpy.array([2,2])).all())
self.assertTrue((edges[3, :]== numpy.array([3,2])).all())
#Test a directed graph
graph = self.GraphType(vList, False)
graph.addEdge(0, 1, 5)
graph.addEdge(1, 2, 2)
graph.addEdge(2, 3, 2)
graph.addEdge(2, 2, 2)
graph.addEdge(2, 1, 2)
edges = graph.getAllEdges()
self.assertEquals(edges.shape[0], 5)
self.assertTrue((edges[0, :]== numpy.array([0,1])).all())
self.assertTrue((edges[1, :]== numpy.array([1,2])).all())
self.assertTrue((edges[2, :]== numpy.array([2,1])).all())
self.assertTrue((edges[3, :]== numpy.array([2,2])).all())
self.assertTrue((edges[4, :]== numpy.array([2,3])).all())
#Test graph with no edges
graph = self.GraphType(vList)
edges = graph.getAllEdges()
self.assertEquals(edges.shape, (0, 2))
def testGetNumEdges(self):
numVertices = 10
numFeatures = 3
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(0, 1, 1)
self.assertEquals(graph.getNumEdges(), 1)
graph.addEdge(3, 4, 1)
graph.addEdge(3, 4, 1)
self.assertEquals(graph.getNumEdges(), 2)
graph.addEdge(5, 5, 1)
self.assertEquals(graph.getNumEdges(), 3)
graph.addEdge(8, 8, 1)
graph.addEdge(8, 8, 1)
self.assertEquals(graph.getNumEdges(), 4)
#Now test directed graphs
graph = self.GraphType(vList, False)
graph.addEdge(0, 1, 1)
self.assertEquals(graph.getNumEdges(), 1)
graph.addEdge(3, 4, 1)
graph.addEdge(3, 4, 1)
self.assertEquals(graph.getNumEdges(), 2)
graph.addEdge(5, 5, 1)
self.assertEquals(graph.getNumEdges(), 3)
graph.addEdge(8, 8, 1)
graph.addEdge(8, 8, 1)
self.assertEquals(graph.getNumEdges(), 4)
def testGetNumVertices(self):
numVertices = 10
numFeatures = 3
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
self.assertEquals(graph.getNumVertices(), numVertices)
def testGetEdge(self):
numVertices = 10
numFeatures = 3
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(2, 5, 1)
graph.addEdge(4, 8, 34)
self.assertEquals(graph.getEdge(2, 5), 1)
self.assertEquals(graph.getEdge(5, 2), 1)
self.assertEquals(graph.getEdge(4, 8), 34)
self.assertEquals(graph.getEdge(8, 4), 34)
self.assertEquals(graph.getEdge(4, 4), None)
def testGetVertex(self):
numVertices = 10
numFeatures = 3
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.setVertex(1, numpy.array([4, 5, 2]))
self.assertRaises(ValueError, graph.setVertex, -1, numpy.array([4, 5, 2]))
self.assertRaises(ValueError, graph.setVertex, 11, numpy.array([4, 5, 2]))
self.assertRaises(ValueError, graph.setVertex, 2, numpy.array([4, 5, 2, 8]))
self.assertRaises(ValueError, graph.setVertex, 2, numpy.array([4, 5]))
self.assertTrue((graph.getVertex(1) == numpy.array([4, 5, 2])).all())
self.assertTrue((graph.getVertex(0) == numpy.array([0, 0, 0])).all())
def testSetVertex(self):
numVertices = 10
numFeatures = 3
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.setVertex(1, numpy.array([4, 5, 2]))
self.assertTrue((graph.getVertex(1) == numpy.array([4, 5, 2])).all())
self.assertTrue((graph.getVertex(0) == numpy.array([0, 0, 0])).all())
graph.setVertex(1, numpy.array([8, 3, 1]))
self.assertTrue((graph.getVertex(1) == numpy.array([8, 3, 1])).all())
def testIsUndirected(self):
numVertices = 10
numFeatures = 3
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, True)
self.assertEquals(graph.isUndirected(), True)
graph = self.GraphType(vList, False)
self.assertEquals(graph.isUndirected(), False)
def testGetAllVertexIds(self):
numVertices = 10
numFeatures = 3
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
self.assertTrue((graph.getAllVertexIds() == numpy.array(list(range(0, numVertices)))).all())
def testSubgraph(self):
numVertices = 10
numFeatures = 3
vList = VertexList(numVertices, numFeatures)
vertices = numpy.random.rand(numVertices, numFeatures)
vList.setVertices(vertices)
graph = self.GraphType(vList)
graph.addEdge(0, 1)
graph.addEdge(0, 2)
graph.addEdge(0, 3)
graph.addEdge(2, 1)
graph.addEdge(2, 5)
graph.addEdge(2, 6)
graph.addEdge(6, 9)
subgraph = graph.subgraph([0,1,2,3])
self.assertEquals(subgraph.getNumVertices(), 4)
self.assertEquals(subgraph.getVertexList().getNumFeatures(), numFeatures)
self.assertTrue((subgraph.getVertexList().getVertices(list(range(0, 4))) == vertices[list(range(0,4)), :]).all())
self.assertEquals(subgraph.getNumEdges(), 4)
self.assertTrue(subgraph.getEdge(0, 1) == 1)
self.assertTrue(subgraph.getEdge(0, 2) == 1)
self.assertTrue(subgraph.getEdge(0, 3) == 1)
self.assertTrue(subgraph.getEdge(2, 1) == 1)
subgraph = graph.subgraph([1,2,5,6])
self.assertEquals(subgraph.getNumVertices(), 4)
self.assertEquals(subgraph.getVertexList().getNumFeatures(), numFeatures)
self.assertEquals(subgraph.getNumEdges(), 3)
self.assertTrue((subgraph.getVertexList().getVertices([0,1,2,3]) == vertices[[1,2,5,6], :]).all())
self.assertTrue(subgraph.getEdge(0, 1) == 1)
self.assertTrue(subgraph.getEdge(1, 2) == 1)
self.assertTrue(subgraph.getEdge(1, 3) == 1)
#Test case of directed graph
numVertices = 10
numFeatures = 3
vList = VertexList(numVertices, numFeatures)
vertices = numpy.random.rand(numVertices, numFeatures)
vList.setVertices(vertices)
graph = self.GraphType(vList, False)
graph.addEdge(0, 1)
graph.addEdge(0, 2)
graph.addEdge(0, 3)
graph.addEdge(2, 1)
graph.addEdge(2, 5)
graph.addEdge(2, 6)
graph.addEdge(6, 9)
subgraph = graph.subgraph([0,1,2,3])
self.assertEquals(subgraph.isUndirected(), False)
self.assertEquals(subgraph.getNumVertices(), 4)
self.assertEquals(subgraph.getVertexList().getNumFeatures(), numFeatures)
self.assertTrue((subgraph.getVertexList().getVertices(list(range(0, 4))) == vertices[list(range(0,4)), :]).all())
self.assertEquals(subgraph.getNumEdges(), 4)
self.assertTrue(subgraph.getEdge(0, 1) == 1)
self.assertTrue(subgraph.getEdge(0, 2) == 1)
self.assertTrue(subgraph.getEdge(0, 3) == 1)
self.assertTrue(subgraph.getEdge(2, 1) == 1)
subgraph = graph.subgraph([1,2,5,6])
self.assertEquals(subgraph.getNumVertices(), 4)
self.assertEquals(subgraph.getVertexList().getNumFeatures(), numFeatures)
self.assertEquals(subgraph.getNumEdges(), 3)
self.assertTrue((subgraph.getVertexList().getVertices([0,1,2,3]) == vertices[[1,2,5,6], :]).all())
self.assertTrue(subgraph.getEdge(1, 0) == 1)
self.assertTrue(subgraph.getEdge(1, 2) == 1)
self.assertTrue(subgraph.getEdge(1, 3) == 1)
subgraph = graph.subgraph([])
def testAdd(self):
numVertices = 5
numFeatures = 3
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, False)
graph.addEdge(0, 1)
graph.addEdge(0, 2)
graph.addEdge(0, 3)
graph.addEdge(2, 1)
graph2 = self.GraphType(vList, False)
graph2.addEdge(3, 2)
graph2.addEdge(0, 4)
graph2.addEdge(1, 3)
graph2.addEdge(2, 1)
newGraph = graph.add(graph2)
#Check old graph is the same
self.assertEquals(graph.getEdge(0,1) , 1)
self.assertEquals(graph.getEdge(0,2) , 1)
self.assertEquals(graph.getEdge(0,3) , 1)
self.assertEquals(graph.getEdge(2,1) , 1)
self.assertEquals(newGraph.getEdge(0,1) , 1)
self.assertEquals(newGraph.getEdge(0,2) , 1)
self.assertEquals(newGraph.getEdge(3,2) , 1)
self.assertEquals(newGraph.getEdge(2,1) , 2)
#Test edge addition of different sized graphs
vList2 = VertexList(numVertices-1, numFeatures)
graph2 = self.GraphType(vList2, False)
graph2.addEdge(3, 2)
self.assertRaises(ValueError, graph.add, graph2)
def testMultiply(self):
numVertices = 5
numFeatures = 3
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, False)
graph.addEdge(0, 1)
graph.addEdge(0, 2)
graph.addEdge(0, 3)
graph.addEdge(2, 1, 2)
graph2 = self.GraphType(vList, False)
graph2.addEdge(3, 2)
graph2.addEdge(0, 4)
graph2.addEdge(1, 3)
graph2.addEdge(2, 1, 3)
newGraph = graph.multiply(graph2)
#Test old graph is the same
self.assertEquals(graph.getEdge(0,1) , 1)
self.assertEquals(graph.getEdge(0,2) , 1)
self.assertEquals(graph.getEdge(0,3) , 1)
self.assertEquals(graph.getEdge(2,1) , 2)
self.assertEquals(newGraph.getNumEdges() , 1)
self.assertEquals(newGraph.getEdge(0,1) , None)
self.assertEquals(newGraph.getEdge(0,2) , None)
self.assertEquals(newGraph.getEdge(3,2) , None)
self.assertEquals(newGraph.getEdge(2,1) , 6)
#Test edge multiplication of different sized graphs
vList2 = VertexList(numVertices-1, numFeatures)
graph2 = self.GraphType(vList2, False)
graph2.addEdge(3, 2)
self.assertRaises(ValueError, graph.multiply, graph2)
def testCopy(self):
numVertices = 5
numFeatures = 3
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, False)
graph.addEdge(0, 1)
graph.addEdge(0, 2)
graph.addEdge(0, 3)
graph.addEdge(2, 1)
graph2 = graph.copy()
graph2.addEdge(3, 4)
self.assertEquals(graph2.getEdge(3, 4), 1)
self.assertEquals(graph.getEdge(3, 4), None)
def testDensity(self):
numVertices = 5
numFeatures = 3
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, False)
self.assertEquals(graph.density(), 0)
graph.addEdge(3, 4)
self.assertEquals(graph.density(), float(1)/20)
graph = self.GraphType(vList, True)
self.assertEquals(graph.density(), 0)
graph.addEdge(3, 4)
self.assertEquals(graph.density(), float(1)/10)
def testDepthFirstSearch(self):
numVertices = 10
numFeatures = 0
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(0, 1)
graph.addEdge(1, 2)
graph.addEdge(1, 3)
graph.addEdge(2, 6)
graph.addEdge(4, 5)
self.assertEquals(graph.depthFirstSearch(0), [0,1,2,6,3])
self.assertEquals(graph.depthFirstSearch(1), [1,0,2,6,3])
self.assertEquals(graph.depthFirstSearch(6), [6,2,1,0,3])
self.assertEquals(graph.depthFirstSearch(4), [4, 5])
self.assertEquals(graph.depthFirstSearch(5), [5, 4])
self.assertEquals(graph.depthFirstSearch(7), [7])
def testBreadthFirstSearch(self):
numVertices = 10
numFeatures = 0
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(0, 1)
graph.addEdge(0, 7)
graph.addEdge(7, 8)
graph.addEdge(7, 9)
graph.addEdge(1, 2)
graph.addEdge(1, 3)
graph.addEdge(2, 6)
graph.addEdge(4, 5)
self.assertEquals(graph.breadthFirstSearch(0), [0,1, 7,2,3,8,9,6])
self.assertEquals(graph.breadthFirstSearch(1), [1,0,2,3,7,6,8,9])
self.assertEquals(graph.breadthFirstSearch(6), [6, 2,1,0,3,7,8,9])
self.assertEquals(graph.breadthFirstSearch(4), [4, 5])
self.assertEquals(graph.breadthFirstSearch(5), [5, 4])
self.assertEquals(graph.breadthFirstSearch(7), [7, 0, 8, 9, 1, 2, 3, 6])
def testDiameter(self):
numVertices = 10
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, True)
graph.addEdge(0, 1)
graph.addEdge(1, 2)
graph.addEdge(1, 3)
self.assertEquals(graph.diameter(), 2)
graph.addEdge(3, 2)
self.assertEquals(graph.diameter(), 2)
graph.addEdge(3, 4)
self.assertEquals(graph.diameter(), 3)
graph.addEdge(4, 5)
self.assertEquals(graph.diameter(), 4)
graph.addEdge(0, 5)
self.assertEquals(graph.diameter(), 3)
P = graph.floydWarshall(False)
self.assertEquals(graph.diameter(P=P), 3)
#Now try directed graphs
graph = self.GraphType(vList, False)
graph.addEdge(0, 1)
graph.addEdge(1, 2)
graph.addEdge(1, 3)
self.assertEquals(graph.diameter(), 2)
graph.addEdge(4, 3)
self.assertEquals(graph.diameter(), 2)
graph.addEdge(5, 4)
graph.addEdge(6, 5)
self.assertEquals(graph.diameter(), 3)
graph.addEdge(6, 6)
self.assertEquals(graph.diameter(), 3)
P = graph.floydWarshall(False)
self.assertEquals(graph.diameter(P=P), 3)
#Test on graph with no edges
graph = self.GraphType(vList, False)
self.assertEquals(graph.diameter(), 0)
#Now, test graphs with weights
graph = self.GraphType(vList, True)
graph.addEdge(0, 1, 0.1)
graph.addEdge(1, 2, 0.5)
graph.addEdge(1, 3, 0.9)
self.assertAlmostEqual(graph.diameter(True), 1.4, places=7)
P = graph.floydWarshall(True)
self.assertAlmostEquals(graph.diameter(True, P=P), 1.4, places=7)
def testEffectiveDiameter(self):
numVertices = 10
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, True)
graph.addEdge(1, 4)
graph.addEdge(0, 1)
graph.addEdge(1, 2)
graph.addEdge(1, 3)
self.assertEquals(graph.diameter(), 2)
self.assertEquals(graph.effectiveDiameter(1.0), 2)
self.assertEquals(graph.effectiveDiameter(0.5), 2)
P = graph.floydWarshall(False)
self.assertEquals(graph.effectiveDiameter(1.0, P=P), 2)
self.assertEquals(graph.effectiveDiameter(0.5, P=P), 2)
graph = self.GraphType(vList, False)
graph.addEdge(0, 1)
graph.addEdge(2, 3)
graph.addEdge(4, 5)
graph.addEdge(5, 6)
graph.addEdge(7, 8)
graph.addEdge(8, 9)
self.assertEquals(graph.effectiveDiameter(1.0), 2)
self.assertEquals(graph.effectiveDiameter(0.75), 1)
self.assertEquals(graph.effectiveDiameter(0.5), 1)
P = graph.floydWarshall(False)
self.assertEquals(graph.effectiveDiameter(1.0, P=P), 2)
self.assertEquals(graph.effectiveDiameter(0.75, P=P), 1)
self.assertEquals(graph.effectiveDiameter(0.5, P=P), 1)
#Test on a disconnected graph
graph = self.GraphType(vList, True)
self.assertEquals(graph.effectiveDiameter(1.0), 0)
self.assertEquals(graph.effectiveDiameter(0.75), 0)
self.assertEquals(graph.effectiveDiameter(0.5), 0)
self.assertEquals(graph.effectiveDiameter(0.1), 0)
P = graph.floydWarshall(False)
self.assertEquals(graph.effectiveDiameter(1.0, P=P), 0)
self.assertEquals(graph.effectiveDiameter(0.75, P=P), 0)
self.assertEquals(graph.effectiveDiameter(0.5, P=P), 0)
self.assertEquals(graph.effectiveDiameter(0.1, P=P), 0)
graph = self.GraphType(vList, False)
self.assertEquals(graph.effectiveDiameter(1.0), 0)
self.assertEquals(graph.effectiveDiameter(0.75), 0)
self.assertEquals(graph.effectiveDiameter(0.5), 0)
self.assertEquals(graph.effectiveDiameter(0.1), 0)
P = graph.floydWarshall(False)
self.assertEquals(graph.effectiveDiameter(1.0, P=P), 0)
self.assertEquals(graph.effectiveDiameter(0.75, P=P), 0)
self.assertEquals(graph.effectiveDiameter(0.5, P=P), 0)
self.assertEquals(graph.effectiveDiameter(0.1, P=P), 0)
#Test on graph with 1 edge
graph = self.GraphType(vList, True)
graph.addEdge(0, 0)
self.assertEquals(graph.effectiveDiameter(1.0), 0)
self.assertEquals(graph.effectiveDiameter(0.75), 0)
self.assertEquals(graph.effectiveDiameter(0.5), 0)
self.assertEquals(graph.effectiveDiameter(0.1), 0)
P = graph.floydWarshall(False)
self.assertEquals(graph.effectiveDiameter(1.0, P=P), 0)
self.assertEquals(graph.effectiveDiameter(0.75, P=P), 0)
self.assertEquals(graph.effectiveDiameter(0.5, P=P), 0)
self.assertEquals(graph.effectiveDiameter(0.1, P=P), 0)
def testFindComponents(self):
numVertices = 10
numFeatures = 0
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(0, 1)
graph.addEdge(1, 2)
graph.addEdge(1, 3)
graph.addEdge(2, 6)
graph.addEdge(4, 5)
self.assertEquals(graph.findConnectedComponents()[0], [0,1,2,3,6])
self.assertEquals(graph.findConnectedComponents()[1], [4, 5])
graph = self.GraphType(vList, False)
self.assertRaises(ValueError, graph.findConnectedComponents)
#This doesn't seem to be a conclusive test
def testFitPowerLaw(self):
numVertices = 1000
numFeatures = 0
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, True)
ell = 2
m = 2
generator = BarabasiAlbertGenerator(ell, m)
graph = generator.generate(graph)
#logging.debug(graph.degreeDistribution())
alpha, ks, xmin = graph.fitPowerLaw()
self.assertAlmostEquals(alpha, 3.0, places=0)
def testFloydWarshall(self):
P = self.graph.floydWarshall()
P2 = numpy.zeros((self.numVertices, self.numVertices))
P2[0, :] = numpy.array([0, 1, 2, 2, 1, numpy.inf])
P2[1, :] = numpy.array([1, 0, 3, 1, 2, numpy.inf])
P2[2, :] = numpy.array([2, 3, 0, 4, 3, numpy.inf])
P2[3, :] = numpy.array([2, 1, 4, 0, 1, numpy.inf])
P2[4, :] = numpy.array([1, 2, 3, 1, 0, numpy.inf])
P2[5, :] = numpy.array([numpy.inf, numpy.inf, numpy.inf, numpy.inf, numpy.inf, 0])
self.assertTrue((P == P2).all())
#Now test the directed graph
P = self.graph2.floydWarshall()
P2 = numpy.zeros((self.numVertices, self.numVertices))
P2[0, :] = numpy.array([0, 1, 2, 2, 1, numpy.inf])
P2[1, :] = numpy.array([numpy.inf, 0, numpy.inf, 1, 2, numpy.inf])
P2[2, :] = numpy.array([numpy.inf, numpy.inf, 0, 5, 6, numpy.inf])
P2[3, :] = numpy.array([numpy.inf, numpy.inf, numpy.inf, 0, 1, numpy.inf])
P2[4, :] = numpy.array([numpy.inf, numpy.inf, numpy.inf, numpy.inf, 0, numpy.inf])
P2[5, :] = numpy.array([numpy.inf, numpy.inf, numpy.inf, numpy.inf, numpy.inf, 0])
self.assertTrue((P == P2).all())
def testFindAllDistances(self):
P = self.graph.findAllDistances()
P2 = numpy.zeros((self.numVertices, self.numVertices))
P2[0, :] = numpy.array([0, 1, 2, 2, 1, numpy.inf])
P2[1, :] = numpy.array([1, 0, 3, 1, 2, numpy.inf])
P2[2, :] = numpy.array([2, 3, 0, 4, 3, numpy.inf])
P2[3, :] = numpy.array([2, 1, 4, 0, 1, numpy.inf])
P2[4, :] = numpy.array([1, 2, 3, 1, 0, numpy.inf])
P2[5, :] = numpy.array([numpy.inf, numpy.inf, numpy.inf, numpy.inf, numpy.inf, 0])
self.assertTrue((P == P2).all())
#Now test the directed graph
P = self.graph2.findAllDistances()
P2 = numpy.zeros((self.numVertices, self.numVertices))
P2[0, :] = numpy.array([0, 1, 2, 2, 1, numpy.inf])
P2[1, :] = numpy.array([numpy.inf, 0, numpy.inf, 1, 2, numpy.inf])
P2[2, :] = numpy.array([numpy.inf, numpy.inf, 0, 5, 6, numpy.inf])
P2[3, :] = numpy.array([numpy.inf, numpy.inf, numpy.inf, 0, 1, numpy.inf])
P2[4, :] = numpy.array([numpy.inf, numpy.inf, numpy.inf, numpy.inf, 0, numpy.inf])
P2[5, :] = numpy.array([numpy.inf, numpy.inf, numpy.inf, numpy.inf, numpy.inf, 0])
self.assertTrue((P == P2).all())
def testEgoGraph(self):
numVertices = 6
numFeatures = 3
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, True)
graph.addEdge(0, 1)
graph.addEdge(0, 2)
graph.addEdge(0, 3)
graph.addEdge(2, 1)
graph.addEdge(2, 3)
graph.addEdge(4, 1)
egoGraph = graph.egoGraph(0)
self.assertTrue(egoGraph.getNumVertices() == 4)
self.assertTrue(egoGraph.getNumEdges() == 5)
self.assertEquals(egoGraph.getEdge(0,1), 1)
self.assertEquals(egoGraph.getEdge(0,2), 1)
self.assertEquals(egoGraph.getEdge(0,3), 1)
self.assertEquals(egoGraph.getEdge(2,1), 1)
self.assertEquals(egoGraph.getEdge(2,3), 1)
egoGraph = graph.egoGraph(4)
self.assertTrue(egoGraph.getNumVertices() == 2)
self.assertTrue(egoGraph.getNumEdges() == 1)
self.assertEquals(egoGraph.getEdge(1,0), 1)
egoGraph = graph.egoGraph(3)
self.assertTrue(egoGraph.getNumVertices() == 3)
self.assertTrue(egoGraph.getNumEdges() == 3)
self.assertEquals(egoGraph.getEdge(0,2), 1)
self.assertEquals(egoGraph.getEdge(0,1), 1)
self.assertEquals(egoGraph.getEdge(2,1), 1)
egoGraph = graph.egoGraph(5)
self.assertTrue(egoGraph.getNumVertices() == 1)
self.assertTrue(egoGraph.getNumEdges() == 0)
def testStr(self):
logging.debug((self.graph))
def testRemoveAllEdges(self):
numVertices = 6
numFeatures = 3
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, True)
graph.addEdge(0, 1)
graph.addEdge(0, 2)
graph.addEdge(0, 3)
graph.addEdge(2, 1)
graph.addEdge(2, 3)
graph.addEdge(4, 1)
self.assertEquals(graph.getNumEdges(), 6)
graph.removeAllEdges()
self.assertTrue(graph.getEdge(0,1) == None)
self.assertEquals(graph.getNumEdges(), 0)
def testAdjacencyMatrix(self):
numVertices = 3
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, False)
graph.addEdge(0, 1, 0.5)
graph.addEdge(2, 1, 0.2)
graph.addEdge(1, 1, 0.1)
A = graph.adjacencyMatrix()
W = graph.getWeightMatrix()
W2 = numpy.zeros((numVertices, numVertices))
A2 = numpy.zeros((numVertices, numVertices))
W2[0,1]= 0.5
W2[2,1]= 0.2
W2[1,1]= 0.1
A2[0,1]= 1
A2[2,1]= 1
A2[1,1]= 1
self.assertTrue((W == W2).all())
self.assertTrue((A == A2).all())
def testComplement(self):
numVertices = 10
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph3 = graph.complement()
self.assertTrue(graph3.isUndirected())
self.assertEquals(graph3.getNumEdges(), (numVertices**2 + numVertices)/2)
graph.addEdge(0, 1, 0.1)
graph.addEdge(2, 1, 0.2)
graph.addEdge(4, 2, 0.5)
graph.addEdge(6, 7, 0.9)
graph.addEdge(3, 3, 1.1)
graph2 = graph.complement()
self.assertTrue(graph2.isUndirected())
self.assertEquals(graph2.getEdge(0, 1), None)
self.assertEquals(graph2.getEdge(2, 1), None)
self.assertEquals(graph2.getEdge(4, 2), None)
self.assertEquals(graph2.getEdge(6, 7), None)
self.assertEquals(graph2.getEdge(3, 3), None)
self.assertEquals(graph2.getEdge(0,0), 1)
self.assertEquals(graph2.getNumEdges(), 50)
#Now test on directed graphs
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, False)
graph3 = graph.complement()
self.assertEquals(graph3.getNumEdges(), numVertices**2)
graph.addEdge(0, 1, 0.1)
graph.addEdge(2, 1, 0.2)
graph.addEdge(4, 2, 0.5)
graph.addEdge(6, 7, 0.9)
graph.addEdge(3, 3, 1.1)
graph2 = graph.complement()
self.assertFalse(graph2.isUndirected())
self.assertEquals(graph2.getEdge(0, 1), None)
self.assertEquals(graph2.getEdge(2, 1), None)
self.assertEquals(graph2.getEdge(4, 2), None)
self.assertEquals(graph2.getEdge(6, 7), None)
self.assertEquals(graph2.getEdge(3, 3), None)
self.assertEquals(graph2.getEdge(0,0), 1)
self.assertEquals(graph2.getEdge(1,0), 1)
self.assertEquals(graph2.getNumEdges(), 95)
def testFindTrees(self):
numVertices = 10
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, False)
graph.addEdge(0, 1, 1)
graph.addEdge(0, 2, 1)
graph.addEdge(1, 3, 1)
graph.addEdge(4, 5, 1)
graph.addEdge(6, 7, 1)
trees = graph.findTrees()
self.assertEquals(trees[0], [0,1,2,3])
self.assertEquals(trees[1], [6,7])
self.assertEquals(trees[2], [4,5])
self.assertEquals(trees[3], [9])
self.assertEquals(trees[4], [8])
#Make sure the output tree sizes are in order
graph = self.GraphType(vList, False)
graph.addEdge(1, 2, 1)
graph.addEdge(3, 4, 1)
graph.addEdge(3, 5, 1)
graph.addEdge(6, 7, 1)
graph.addEdge(6, 8, 1)
graph.addEdge(8, 9, 1)
trees = graph.findTrees()
self.assertEquals(set(trees[0]), set([6,7,8,9]))
self.assertEquals(trees[1], [3,4,5])
self.assertEquals(trees[2], [1,2])
self.assertEquals(trees[3], [0])
#Test on size 1 graph
numVertices = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, False)
trees = graph.findTrees()
self.assertEquals([len(x) for x in trees], [1])
def testSetWeightMatrix(self):
numVertices = 5
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
vList.setVertices(numpy.random.rand(numVertices, numFeatures))
graph = self.GraphType(vList)
graph.addEdge(0, 1)
graph.addEdge(0, 2)
W = numpy.zeros((numVertices, numVertices))
W[1, 1] = 1
W[2, 1] = 1
W[1, 2] = 1
graph.setWeightMatrix(W)
self.assertTrue((graph.getAllEdges() == numpy.array([[1, 1], [2, 1]])).all())
W[1, 3] = 1
self.assertRaises(ValueError, graph.setWeightMatrix, W)
W = numpy.zeros((numVertices, numVertices+1))
self.assertRaises(ValueError, graph.setWeightMatrix, W)
#Now, see if it works for undirected graphs
graph = self.GraphType(vList, False)
W = numpy.zeros((numVertices, numVertices))
W[1, 0] = 1
W[3, 1] = 1
W[1, 3] = 1
graph.setWeightMatrix(W)
self.assertTrue((graph.getAllEdges() == numpy.array([[1, 0], [1,3], [3, 1]])).all())
def testGetNumDirEdges(self):
numVertices = 10
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(0, 1, 0.1)
graph.addEdge(1, 2, 0.1)
self.assertTrue(graph.getNumDirEdges() == 4)
graph.addEdge(1, 1)
self.assertTrue(graph.getNumDirEdges() == 5)
graph = self.GraphType(vList, False)
graph.addEdge(0, 1)
graph.addEdge(1, 2)
self.assertTrue(graph.getNumDirEdges() == 2)
graph.addEdge(1, 1)
self.assertTrue(graph.getNumDirEdges() == 3)
def testOutDegreeSequence(self):
numVertices = 10
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(0, 1, 0.1)
graph.addEdge(1, 2, 0.2)
graph.addEdge(1, 5)
self.assertTrue((graph.outDegreeSequence() == numpy.array([1, 3, 1, 0,0,1,0,0,0,0])).all() )
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, False)
graph.addEdge(0, 1)
graph.addEdge(1, 2)
graph.addEdge(1, 5)
graph.addEdge(3, 3)
self.assertTrue((graph.outDegreeSequence() == numpy.array([1, 2, 0, 1,0,0,0,0,0,0])).all() )
def testInDegreeSequence(self):
numVertices = 10
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(0, 1)
graph.addEdge(1, 2)
graph.addEdge(1, 5)
self.assertTrue((graph.inDegreeSequence() == numpy.array([1, 3, 1, 0,0,1,0,0,0,0])).all() )
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, False)
graph.addEdge(0, 1, 0.1)
graph.addEdge(1, 2, 0.2)
graph.addEdge(1, 5)
graph.addEdge(2, 1)
graph.addEdge(3, 3)
self.assertTrue((graph.inDegreeSequence() == numpy.array([0, 2, 1, 1,0,1,0,0,0,0])).all() )
def testInDegreeDistribution(self):
numVertices = 5
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
self.assertTrue((graph.inDegreeDistribution() == numpy.array([])).all())
graph.addEdge(0, 1, 2)
graph.addEdge(0, 2, 2)
graph.addEdge(1, 2, 2)
graph.addEdge(2, 3, 2)
graph.addEdge(2, 4, 2)
self.assertTrue((graph.inDegreeDistribution() == numpy.array([0, 2, 2, 0, 1])).all())
#Try empty graph
numVertices = 5
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
self.assertTrue((graph.inDegreeDistribution() == numpy.array([5])).all())
#Try a star like graph
numVertices = 5
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(0, 0, 2)
graph.addEdge(0, 1, 2)
graph.addEdge(0, 2, 2)
graph.addEdge(0, 3, 2)
graph.addEdge(0, 4, 2)
self.assertTrue((graph.inDegreeDistribution() == numpy.array([0, 4, 0, 0, 0, 1])).all())
#Ought to try a directed graph
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, False)
self.assertTrue((graph.inDegreeDistribution() == numpy.array([])).all())
graph.addEdge(0, 1, 2)
graph.addEdge(0, 2, 2)
graph.addEdge(1, 2, 2)
graph.addEdge(2, 3, 2)
graph.addEdge(2, 4, 2)
self.assertTrue((graph.inDegreeDistribution() == numpy.array([1, 3, 1])).all())
def testGeneralVertexList(self):
#Very brief test to make sure sparse graph works with general vertex lists
numVertices = 10
vList = GeneralVertexList(numVertices)
vList.setVertex(0, "a")
vList.setVertex(1, "b")
vList.setVertex(5, "c")
graph = self.GraphType(vList)
graph.addEdge(0, 1)
graph.addEdge(1, 2)
graph.addEdge(1, 5)
def testFromNetworkXGraph(self):
try:
import networkx
except ImportError as error:
logging.debug(error)
return
nxGraph = networkx.Graph()
nxGraph.graph["VListType"] = GeneralVertexList
#nxGraph.graph["numFeatures"] = 2
#nxGraph.add_node(0)
nxGraph.add_edge(0, 1)
nxGraph.add_edge(1, 2)
nxGraph.add_edge(1, 3)
graph = self.GraphType.fromNetworkXGraph(nxGraph)
self.assertTrue(graph.getNumVertices() == 4)
self.assertTrue(graph.isUndirected() == True)
self.assertTrue(graph.getNumEdges() == 3)
self.assertTrue(graph.getEdge(0, 1) == 1)
self.assertTrue(graph.getEdge(1, 2) == 1)
self.assertTrue(graph.getEdge(1, 3) == 1)
#Try directed graphs
nxGraph = networkx.DiGraph()
nxGraph.graph["VListType"] = GeneralVertexList
#nxGraph.add_node(0)
nxGraph.add_edge(0, 1)
nxGraph.add_edge(1, 2)
nxGraph.add_edge(1, 3)
graph = self.GraphType.fromNetworkXGraph(nxGraph)
self.assertTrue(graph.getNumVertices() == 4)
self.assertTrue(graph.isUndirected() == False)
self.assertTrue(graph.getNumEdges() == 3)
self.assertTrue(graph.getEdge(0, 1) == 1)
self.assertTrue(graph.getEdge(1, 2) == 1)
self.assertTrue(graph.getEdge(1, 3) == 1)
#Using a multigraph should fail
nxGraph = networkx.MultiGraph()
self.assertRaises(ValueError, self.GraphType.fromNetworkXGraph, nxGraph)
#Test node labels
nxGraph = networkx.DiGraph()
nxGraph.graph["VListType"] = GeneralVertexList
nxGraph.add_node("a", label="abc")
nxGraph.add_node("b", label="i")
nxGraph.add_node("c", label="am")
nxGraph.add_node("d", label="here")
nxGraph.add_edge("a", "b")
nxGraph.add_edge("b", "c")
nxGraph.add_edge("b", "d")
graph = self.GraphType.fromNetworkXGraph(nxGraph)
nodeDict = {}
for i in range(len(nxGraph.nodes())):
nodeDict[nxGraph.nodes()[i]] = i
self.assertTrue(graph.getNumVertices() == 4)
self.assertTrue(graph.isUndirected() == False)
self.assertTrue(graph.getNumEdges() == 3)
self.assertTrue(graph.getEdge(nodeDict["a"], nodeDict["b"]) == 1)
self.assertTrue(graph.getEdge(nodeDict["b"], nodeDict["c"]) == 1)
self.assertTrue(graph.getEdge(nodeDict["b"], nodeDict["d"]) == 1)
self.assertTrue(graph.getVertex(0) == "abc")
self.assertTrue(graph.getVertex(1) == "am")
self.assertTrue(graph.getVertex(2) == "i")
self.assertTrue(graph.getVertex(3) == "here")
#Test in conjunction with toNetworkXGraph
numVertices = 10
numFeatures = 2
vList = VertexList(numVertices, numFeatures)
vList.setVertices(numpy.random.rand(numVertices, numFeatures))
graph = self.GraphType(vList)
graph.addEdge(0, 1)
graph.addEdge(0, 5)
graph.addEdge(2, 5)
graph.addEdge(3, 4)
nxGraph = graph.toNetworkXGraph()
graph2 = self.GraphType.fromNetworkXGraph(nxGraph)
tol = 10**-6
self.assertTrue(numpy.linalg.norm(graph.getVertexList().getVertices(list(range(numVertices))) -graph2.getVertexList().getVertices(list(range(numVertices)))) < tol)
self.assertEquals(graph.getNumEdges(), graph2.getNumEdges())
for i in range(numVertices):
for j in range(numVertices):
self.assertEquals(graph.getEdge(i, j), graph2.getEdge(i, j))
#Use a GeneralVertexList
numVertices = 10
vList = GeneralVertexList(numVertices)
for i in range(numVertices):
vList.setVertex(i, "s" + str(i))
graph = self.GraphType(vList)
graph.addEdge(0, 1)
graph.addEdge(0, 5)
graph.addEdge(2, 5)
graph.addEdge(3, 4)
nxGraph = graph.toNetworkXGraph()
graph2 = self.GraphType.fromNetworkXGraph(nxGraph)
for i in range(numVertices):
self.assertEquals(graph.getVertex(i), graph2.getVertex(i))
self.assertEquals(graph.getNumEdges(), graph2.getNumEdges())
for i in range(numVertices):
for j in range(numVertices):
self.assertEquals(graph.getEdge(i, j), graph2.getEdge(i, j))
def testDiameter2(self):
numVertices = 10
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, True)
graph.addEdge(0, 1)
graph.addEdge(1, 2)
graph.addEdge(1, 3)
self.assertEquals(graph.diameter2(), 2)
graph.addEdge(3, 2)
self.assertEquals(graph.diameter2(), 2)
graph.addEdge(3, 4)
self.assertEquals(graph.diameter2(), 3)
graph.addEdge(4, 5)
self.assertEquals(graph.diameter2(), 4)
graph.addEdge(0, 5)
self.assertEquals(graph.diameter2(), 3)
#Now try directed graphs
graph = self.GraphType(vList, False)
graph.addEdge(0, 1)
graph.addEdge(1, 2)
graph.addEdge(1, 3)
self.assertEquals(graph.diameter2(), 2)
graph.addEdge(4, 3)
self.assertEquals(graph.diameter2(), 2)
graph.addEdge(5, 4)
graph.addEdge(6, 5)
self.assertEquals(graph.diameter2(), 3)
graph.addEdge(6, 6)
self.assertEquals(graph.diameter2(), 3)
#Test on graph with no edges
graph = self.GraphType(vList, False)
self.assertEquals(graph.diameter2(), 0)
def testLaplacianMatrix(self):
numVertices = 10
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, True)
graph.addEdge(0, 1)
graph.addEdge(1, 2)
graph.addEdge(1, 3)
L = numpy.zeros((numVertices, numVertices))
A = graph.adjacencyMatrix()
for i in range(numVertices):
for j in range(numVertices):
if i == j:
L[i, j] = numpy.sum(A[i, :])
elif A[i, j] != 0:
L[i, j] = -1
else:
L[i, j] = 0
self.assertTrue((L == graph.laplacianMatrix() ).all())
def testLoad(self):
try:
numVertices = 10
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
vList.setVertices(numpy.random.rand(numVertices, numFeatures))
graph = self.GraphType(vList, True)
graph.addEdge(0, 1, 0.1)
graph.addEdge(1, 2, 0.2)
graph.addEdge(1, 3, 0.3)
tempDir = PathDefaults.getTempDir()
tempFile = tempDir + "testGraph"
graph.save(tempFile)
dataDir = PathDefaults.getDataDir()
os.chdir(dataDir)
currentPath = os.getcwd()
graph2 = self.GraphType.load(tempFile)
#Make sure save doesn't change the path
self.assertEquals(os.getcwd(), currentPath)
self.assertEquals(graph.getNumVertices(), graph.getNumVertices())
self.assertEquals(graph.getNumEdges(), graph.getNumEdges())
self.assertTrue(graph2.isUndirected() == True)
self.assertTrue((graph.getVertexList().getVertices(list(range(numVertices))) == graph2.getVertexList().getVertices(list(range(numVertices)))).all())
self.assertTrue((graph.getAllEdges() == graph2.getAllEdges()).all())
self.assertTrue(graph2.getEdge(0, 1) == 0.1)
self.assertTrue(graph2.getEdge(1, 2) == 0.2)
self.assertTrue(graph2.getEdge(1, 3) == 0.3)
#Test if loading of old-style graph files works
testDir = PathDefaults.getDataDir() + "test/"
graphFilename = testDir + "fd"
graph = self.GraphType.load(graphFilename)
self.assertEquals(graph.getEdge(1, 1), 1)
self.assertEquals(graph.getEdge(2, 2), 1)
self.assertEquals(graph.getNumVertices(), 10)
except IOError as e:
logging.warn(e)
pass
except OSError as e:
logging.warn(e)
pass
def testMaxEigenvector(self):
tol = 10**-6
numVertices = 5
numFeatures = 0
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, False)
graph.addEdge(0, 1)
graph.addEdge(1, 2, 0.1)
graph.addEdge(2, 0)
v = graph.maxEigenvector()
W = graph.getWeightMatrix()
lmbda, U = numpy.linalg.eig(W)
i = numpy.argmax(lmbda)
self.assertTrue(numpy.linalg.norm(U[:, i] - v) < tol)
def testMaxProductPaths(self):
numVertices = 6
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(0, 1, 0.1)
graph.addEdge(1, 3, 0.1)
graph.addEdge(0, 2, 0.2)
graph.addEdge(2, 3, 0.5)
graph.addEdge(0, 4, 0.1)
graph.addEdge(3, 4, 0.1)
P = graph.maxProductPaths()
P2 = numpy.zeros((numVertices, numVertices))
P2[0, :] = numpy.array([0.04, 0.1, 0.2, 0.1, 0.1, 0])
P2[1, :] = numpy.array([0.1, 0.01, 0.05, 0.1, 0.01, 0])
P2[2, :] = numpy.array([0.2, 0.05, 0.25, 0.5, 0.05, 0])
P2[3, :] = numpy.array([0.1, 0.1, 0.5, 0.25, 0.1, 0])
P2[4, :] = numpy.array([0.1, 0.01, 0.05, 0.1, 0.01, 0])
P2[5, :] = numpy.array([0,0,0,0,0,0])
self.assertAlmostEquals(numpy.linalg.norm(P - P2), 0, places=6)
#Now test on a directed graph
graph = self.GraphType(vList, False)
graph.addEdge(0, 1, 0.1)
graph.addEdge(1, 3, 0.1)
graph.addEdge(0, 2, 0.2)
graph.addEdge(2, 3, 0.5)
graph.addEdge(0, 4, 0.1)
graph.addEdge(3, 4, 0.1)
P = graph.maxProductPaths()
P2 = numpy.zeros((numVertices, numVertices))
P2[0, :] = numpy.array([0, 0.1, 0.2, 0.1, 0.1, 0])
P2[1, :] = numpy.array([0, 0, 0, 0.1, 0.01, 0])
P2[2, :] = numpy.array([0, 0, 0, 0.5, 0.05, 0])
P2[3, :] = numpy.array([0, 0, 0, 0, 0.1, 0])
P2[4, :] = numpy.array([0,0,0,0,0,0])
P2[5, :] = numpy.array([0,0,0,0,0,0])
self.assertAlmostEquals(numpy.linalg.norm(P - P2), 0, places=6)
def testMaybeIsomorphicWith(self):
numVertices = 6
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, True)
graph.addEdge(0, 1, 0.1)
graph.addEdge(1, 3, 0.1)
graph.addEdge(0, 2, 0.2)
graph.addEdge(2, 3, 0.5)
graph.addEdge(0, 4, 0.1)
graph.addEdge(3, 4, 0.1)
graph2 = self.GraphType(vList, True)
graph2.addEdge(0, 1, 0.1)
graph2.addEdge(1, 3, 0.1)
graph2.addEdge(0, 2, 0.2)
graph2.addEdge(2, 3, 0.5)
graph2.addEdge(0, 4, 0.1)
graph2.addEdge(3, 4, 0.1)
graph2.addEdge(4, 5, 0.1)
graph3 = self.GraphType(vList, True)
graph3.addEdge(2, 4, 0.1)
graph3.addEdge(4, 5, 0.1)
graph3.addEdge(2, 1, 0.2)
graph3.addEdge(1, 5, 0.5)
graph3.addEdge(2, 0, 0.1)
graph3.addEdge(5, 0, 0.1)
self.assertTrue(graph.maybeIsomorphicWith(graph))
self.assertFalse(graph.maybeIsomorphicWith(graph2))
self.assertTrue(graph.maybeIsomorphicWith(graph3))
def testSave(self):
try:
numVertices = 10
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
vList.setVertices(numpy.random.rand(numVertices, numFeatures))
graph = self.GraphType(vList, False)
graph.addEdge(0, 1, 0.1)
graph.addEdge(1, 2, 0.2)
graph.addEdge(1, 3, 0.3)
dataDir = PathDefaults.getDataDir()
os.chdir(dataDir)
tempDir = PathDefaults.getTempDir()
currentPath = os.getcwd()
graph.save(tempDir + "testGraph")
#Make sure save doesn't change the path
self.assertEquals(os.getcwd(), currentPath)
except IOError as e:
logging.warn(e)
pass
except OSError as e:
logging.warn(e)
pass
def testSetVertices(self):
numVertices = 10
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
vList.setVertices(numpy.random.rand(numVertices, numFeatures))
graph = self.GraphType(vList, False)
X = numpy.random.rand(numVertices, numFeatures)
vertexIndices =list(range(0, numVertices))
graph.setVertices(vertexIndices, X)
vertexIndices2 = graph.getAllVertexIds()
vertices2 = graph.getVertices(vertexIndices2)
self.assertEquals(vertexIndices, vertexIndices2)
self.assertTrue((X == vertices2).all())
def testToNetworkXGraph(self):
try:
import networkx
except ImportError as error:
logging.debug(error)
return
numVertices = 10
numFeatures = 3
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(5, 1, 4)
graph.addEdge(5, 2, 2)
graph.addEdge(2, 7, 4)
graph.addEdge(1, 9, 6)
graph2 = self.GraphType(vList, False)
graph2.addEdge(5, 1, 4)
graph2.addEdge(5, 2, 2)
graph2.addEdge(2, 7, 4)
graph2.addEdge(1, 9, 6)
networkXGraph = graph.toNetworkXGraph()
self.assertEquals(networkXGraph.get_edge_data(5, 1), {'value' : 4.0})
self.assertEquals(networkXGraph.get_edge_data(5, 2), {'value' : 2.0})
self.assertEquals(networkXGraph.get_edge_data(2, 7), {'value' : 4.0})
self.assertEquals(networkXGraph.get_edge_data(1, 9), {'value' : 6.0})
self.assertEquals(networkXGraph.get_edge_data(9, 1), {'value' : 6.0})
vertexIndexList = []
for i in networkXGraph.__iter__():
vertexIndexList.append(i)
vertexIndexList.sort()
self.assertTrue(vertexIndexList == list(range(numVertices)))
self.assertTrue(networkXGraph.edges() == [(1, 9), (1, 5), (2, 5), (2, 7)])
self.assertTrue(type(networkXGraph) == networkx.Graph)
#Now we test the case where we have a directed graph
networkXGraph = graph2.toNetworkXGraph()
self.assertEquals(networkXGraph.get_edge_data(5, 1), {'value' : 4.0})
self.assertEquals(networkXGraph.get_edge_data(5, 2), {'value' : 2.0})
self.assertEquals(networkXGraph.get_edge_data(2, 7), {'value' : 4.0})
self.assertEquals(networkXGraph.get_edge_data(1, 9), {'value' : 6.0})
self.assertFalse(networkXGraph.has_edge(9, 1))
vertexIndexList = []
for i in networkXGraph.__iter__():
vertexIndexList.append(i)
vertexIndexList.sort()
self.assertTrue(vertexIndexList == list(range(numVertices)))
self.assertTrue(networkXGraph.edges() == [(1, 9), (2, 7), (5, 1), (5, 2)])
self.assertTrue(type(networkXGraph) == networkx.DiGraph)
#Test a graph with no edges
numVertices = 10
numFeatures = 3
vList = VertexList(numVertices, numFeatures)
vList.setVertices(numpy.random.rand(numVertices, numFeatures))
graph = self.GraphType(vList)
networkXGraph = graph.toNetworkXGraph()
self.assertTrue(networkXGraph.order() == numVertices)
self.assertTrue(networkXGraph.size() == 0)
self.assertTrue((networkXGraph.nodes(data=True)[0][1]['label'] ==graph.getVertex(0)).all())
def testTriangleSequence(self):
tol = 10**-6
numVertices = 5
numFeatures = 0
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, True)
seq = graph.triangleSequence()
self.assertTrue(numpy.linalg.norm(seq - numpy.array([0, 0, 0, 0, 0])) < tol)
graph.addEdge(0, 1)
graph.addEdge(0, 2, 0.1)
graph.addEdge(1, 2)
seq = graph.triangleSequence()
self.assertTrue(numpy.linalg.norm(seq - numpy.array([2, 2, 2, 0, 0])) < tol)
graph.addEdge(2, 3)
graph.addEdge(3, 0, -0.3)
seq = graph.triangleSequence()
self.assertTrue(numpy.linalg.norm(seq - numpy.array([4, 2, 4, 2, 0])) < tol)
graph.removeAllEdges()
graph.addEdge(0, 0)
seq = graph.triangleSequence()
self.assertTrue(numpy.linalg.norm(seq - numpy.array([0, 0, 0, 0, 0])) < tol)
#Test on directed graphs
graph = self.GraphType(vList, False)
graph.addEdge(0, 1)
graph.addEdge(1, 2, 0.1)
graph.addEdge(2, 0)
seq = graph.triangleSequence()
self.assertTrue(numpy.linalg.norm(seq - numpy.array([1, 1, 1, 0, 0])) < tol)
graph.addEdge(0, 3)
graph.addEdge(3, 4, 0.1)
graph.addEdge(4, 0)
seq = graph.triangleSequence()
self.assertTrue(numpy.linalg.norm(seq - numpy.array([2, 1, 1, 1, 1])) < tol)
def testUnion(self):
numVertices = 10
numFeatures = 0
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, True)
graph.addEdge(0, 1, 0.1)
graph.addEdge(5, 2, 0.1)
graph.addEdge(6, 0, 0.1)
graph2 = self.GraphType(vList, True)
graph2.addEdge(0, 2, 0.1)
graph2.addEdge(5, 3, 0.1)
graph2.addEdge(5, 2, 0.1)
newGraph = graph.union(graph2)
#Test original graph is the same
self.assertEquals(graph.getEdge(0, 1), 0.1)
self.assertEquals(graph.getEdge(5, 2), 0.1)
self.assertEquals(graph.getEdge(6, 0), 0.1)
self.assertEquals(newGraph.getNumEdges(), 5)
self.assertEquals(newGraph.getEdge(0, 1), 1)
self.assertEquals(newGraph.getEdge(5, 2), 1)
self.assertEquals(newGraph.getEdge(6, 0), 1)
self.assertEquals(newGraph.getEdge(0, 2), 1)
self.assertEquals(newGraph.getEdge(5, 3), 1)
#Test union of graph 2 with itself
newGraph = graph2.union(graph2)
self.assertEquals(newGraph.getNumEdges(), 3)
self.assertEquals(newGraph.getEdge(0, 2), 1)
self.assertEquals(newGraph.getEdge(5, 3), 1)
self.assertEquals(newGraph.getEdge(5, 2), 1)
def testIntersect(self):
numVertices = 10
numFeatures = 0
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, True)
graph.addEdge(0, 1, 0.1)
graph.addEdge(5, 2, 0.1)
graph.addEdge(6, 0, 0.1)
graph2 = self.GraphType(vList, True)
graph2.addEdge(0, 2, 0.1)
graph2.addEdge(5, 3, 0.1)
graph2.addEdge(5, 2, 0.1)
newGraph = graph.intersect(graph2)
#Test old graph is the same
self.assertEquals(graph.getEdge(0, 1), 0.1)
self.assertEquals(graph.getEdge(5, 2), 0.1)
self.assertEquals(graph.getEdge(6, 0), 0.1)
self.assertEquals(newGraph.getNumEdges(), 1)
self.assertEquals(newGraph.getEdge(5, 2), 1)
#Test intersect of graph 2 with itself
newGraph = graph2.intersect(graph2)
self.assertEquals(newGraph.getNumEdges(), 3)
self.assertEquals(newGraph.getEdge(0, 2), 1)
self.assertEquals(newGraph.getEdge(5, 3), 1)
self.assertEquals(newGraph.getEdge(5, 2), 1)
def testIsTree(self):
numVertices = 3
numFeatures = 0
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, False)
graph.addEdge(0, 1)
self.assertFalse(graph.isTree())
graph.addEdge(0, 2)
self.assertTrue(graph.isTree())
graph.addEdge(2, 0)
self.assertFalse(graph.isTree())
graph = self.GraphType(vList, True)
self.assertRaises(ValueError, graph.isTree)
#Try a bigger graph
numVertices = 6
numFeatures = 0
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, False)
graph.addEdge(0, 1)
graph.addEdge(0, 2)
graph.addEdge(0, 3)
graph.addEdge(0, 4)
graph.addEdge(0, 5)
self.assertTrue(graph.isTree())
graph.removeEdge(0, 5)
graph.addEdge(1, 5)
self.assertTrue(graph.isTree())
#Try 1 node graph
numVertices = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, False)
self.assertTrue(graph.isTree())
def testBetweenness(self):
tol = 10**-6
numVertices = 5
numFeatures = 0
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, True)
graph.addEdge(0, 1)
graph.addEdge(1, 2, 0.1)
graph.addEdge(2, 3, 0.1)
graph.addEdge(0, 3, 0.1)
#logging.debug(graph.betweenness())
def testSetVertexList(self):
numVertices = 5
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
vList.setVertices(numpy.random.rand(numVertices, numFeatures))
graph = self.GraphType(vList, False)
graph.addEdge(0, 1, 0.1)
graph.addEdge(1, 2, 0.2)
self.assertTrue((graph.getVertex(0) == vList.getVertex(0)).all())
self.assertTrue((graph.getVertex(1) == vList.getVertex(1)).all())
self.assertTrue((graph.getVertex(2) == vList.getVertex(2)).all())
vList2 = VertexList(numVertices, numFeatures+2)
vList2.setVertices(numpy.random.rand(numVertices, numFeatures+2))
graph.setVertexList(vList2)
self.assertTrue((graph.getVertex(0) == vList2.getVertex(0)).all())
self.assertTrue((graph.getVertex(1) == vList2.getVertex(1)).all())
self.assertTrue((graph.getVertex(2) == vList2.getVertex(2)).all())
vList3 = VertexList(numVertices+1, numFeatures)
self.assertRaises(ValueError, graph.setVertexList, vList3)
def testNormalisedLaplacianSym(self):
numVertices = 10
numFeatures = 0
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(0, 1)
graph.addEdge(0, 2)
graph.addEdge(0, 9)
graph.addEdge(1, 1)
graph.addEdge(1, 5)
L = graph.normalisedLaplacianSym()
W = graph.getWeightMatrix()
L2 = numpy.zeros((numVertices, numVertices))
d = graph.outDegreeSequence()
for i in range(numVertices):
for j in range(numVertices):
if d[i] != 0 and d[j]!= 0:
Wij = W[i, j]/(numpy.sqrt(d[i]*d[j]))
else:
Wij = 0
if i == j:
L2[i, j] = 1 - Wij
else:
L2[i, j] = -Wij
tol = 10**-6
self.assertTrue(numpy.linalg.norm(L2 - L) < tol)
def testNormalisedLaplacianRw(self):
numVertices = 10
numFeatures = 0
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph.addEdge(0, 1)
graph.addEdge(0, 2)
graph.addEdge(0, 9)
graph.addEdge(1, 1)
graph.addEdge(1, 5)
L = graph.normalisedLaplacianRw()
W = graph.getWeightMatrix()
L2 = numpy.zeros((numVertices, numVertices))
d = graph.outDegreeSequence()
for i in range(numVertices):
for j in range(numVertices):
if d[i] != 0 and d[j]!= 0:
Wij = W[i, j]/(d[i])
else:
Wij = 0
if i == j:
L2[i, j] = 1 - Wij
else:
L2[i, j] = -Wij
tol = 10**-6
self.assertTrue(numpy.linalg.norm(L2 - L) < tol)
def testSetDiff(self):
numVertices = 10
numFeatures = 0
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, True)
graph.addEdge(0, 1, 0.1)
graph.addEdge(5, 2, 0.1)
graph.addEdge(6, 0, 0.1)
graph.addEdge(6, 1, 0.1)
graph2 = self.GraphType(vList, True)
graph2.addEdge(0, 1, 0.1)
graph2.addEdge(5, 3, 0.1)
graph2.addEdge(5, 2, 0.1)
newGraph = graph.setDiff(graph2)
#Test old graph is the same
self.assertEquals(graph.getEdge(0, 1), 0.1)
self.assertEquals(graph.getEdge(5, 2), 0.1)
self.assertEquals(graph.getEdge(6, 0), 0.1)
self.assertEquals(graph.getEdge(6, 1), 0.1)
self.assertEquals(newGraph.getNumEdges(), 2)
self.assertEquals(newGraph.getEdge(6, 0), 1)
self.assertEquals(newGraph.getEdge(6, 1), 1)
#Test setdiff of graph 2 with itself
newGraph = graph2.setDiff(graph2)
self.assertEquals(newGraph.getNumEdges(), 0)
def testIncidenceMatrix(self):
numVertices = 5
numFeatures = 0
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, True)
graph.addEdge(0, 1, 0.1)
graph.addEdge(1, 2, 0.1)
graph.addEdge(3, 0, 0.1)
graph.addEdge(4, 1, 0.1)
X = graph.incidenceMatrix().todense()
L = X.dot(X.T)
L2 = graph.laplacianMatrix()
#In the case of undirected graphs we get the laplacian
self.assertTrue((L==L2).all())
#Directed graph - we get something different
graph = self.GraphType(vList, False)
graph.addEdge(0, 1, 0.1)
graph.addEdge(1, 2, 0.1)
graph.addEdge(3, 0, 0.1)
graph.addEdge(4, 1, 0.1)
X = graph.incidenceMatrix().todense()
L = X.dot(X.T)
L2 = graph.laplacianMatrix()
def testDegreeSequence(self):
numVertices = 5
numFeatures = 0
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, True)
graph.addEdge(0, 1, 0.1)
graph.addEdge(1, 2, 0.1)
graph.addEdge(3, 0, 0.1)
graph.addEdge(4, 1, 0.1)
self.assertTrue((graph.degreeSequence() == [2, 3, 1, 1, 1]).all())
#Now add a self edge
graph.addEdge(0, 0)
self.assertTrue((graph.degreeSequence() == [4, 3, 1, 1, 1]).all())
graph.addEdge(1, 1)
self.assertTrue((graph.degreeSequence() == [4, 5, 1, 1, 1]).all())
def testAdjacencyList(self):
numVertices = 5
numFeatures = 0
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList, True)
graph.addEdge(0, 1, 0.1)
graph.addEdge(1, 2, 0.2)
graph.addEdge(3, 0, 0.3)
graph.addEdge(4, 1, 0.4)
L, W = graph.adjacencyList()
for i in range(numVertices):
self.assertTrue((L[i]==numpy.sort(graph.neighbours(i))).all())
self.assertTrue(W[0][0] == 0.1)
self.assertTrue(W[0][1] == 0.3)
self.assertTrue(W[4][0] == 0.4)
#Now use just adjacencies
L, W = graph.adjacencyList(False)
for i in range(numVertices):
self.assertTrue((L[i]==numpy.sort(graph.neighbours(i))).all())
self.assertTrue(W[0][0] == 1)
self.assertTrue(W[0][1] == 1)
self.assertTrue(W[4][0] == 1)
def testGetItem(self):
numVertices = 5
graph = self.GraphType(GeneralVertexList(numVertices))
graph.addEdge(1, 1, 0.1)
graph.addEdge(1, 3, 0.5)
graph.addEdge(2, 4, 1)
graph.addEdge(2, 3, 2)
graph.setVertex(0, "abc")
self.assertEquals(graph[1,1], 0.1)
self.assertEquals(graph[1,3], 0.5)
def testSetItem(self):
numVertices = 5
graph = self.GraphType(GeneralVertexList(numVertices))
graph.addEdge(1, 1, 0.1)
graph.addEdge(1, 3, 0.5)
self.assertEquals(graph[1,3], 0.5)
graph[1, 3] = 2
self.assertEquals(graph[1,3], 2)
def testToIGraph(self):
try:
import igraph
except ImportError as error:
logging.debug(error)
return
numVertices = 7
graph = self.GraphType(GeneralVertexList(numVertices))
graph.addEdge(1, 1, 0.1)
graph.addEdge(1, 3, 0.5)
graph.addEdge(1, 5, 0.5)
graph.addEdge(3, 5, 0.5)
graph.addEdge(5, 6, 0.1)
graph.setVertex(1, "a")
graph.setVertex(2, "b")
graph.setVertex(3, "c")
igraph = graph.toIGraph()
self.assertEquals(len(igraph.vs), graph.getNumVertices())
self.assertEquals(len(igraph.es), graph.getNumEdges())
self.assertEquals(igraph.vs["label"][1], "a")
self.assertEquals(igraph.vs["label"][2], "b")
self.assertEquals(igraph.vs["label"][3], "c")
edges = igraph.get_edgelist()
i = 0
for e in edges:
self.assertTrue(graph.getEdge(e[0], e[1]) == igraph.es[i]["value"])
i += 1
def testPickle(self):
numVertices = 10
numFeatures = 1
vList = VertexList(numVertices, numFeatures)
graph = self.GraphType(vList)
graph[0, 0] = 1
graph[3, 5] = 0.1
graph.setVertex(0, numpy.array([12.3]))
output = pickle.dumps(graph)
newGraph = pickle.loads(output)
graph[2, 2] = 1
self.assertEquals(newGraph[0, 0], 1)
self.assertEquals(newGraph[3, 5], 0.1)
self.assertEquals(newGraph[2, 2], 0.0)
self.assertEquals(newGraph.getNumEdges(), 2)
self.assertEquals(newGraph.getNumVertices(), numVertices)
self.assertEquals(newGraph.isUndirected(), True)
self.assertEquals(graph[0, 0], 1)
self.assertEquals(graph[3, 5], 0.1)
self.assertEquals(graph[2, 2], 1)
self.assertEquals(graph.getNumEdges(), 3)
self.assertEquals(graph.getNumVertices(), numVertices)
self.assertEquals(graph.isUndirected(), True)
for i in range(numVertices):
nptst.assert_array_equal(graph.getVertex(i), newGraph.getVertex(i))
def testToDictGraph(self):
dictGraph = self.graph.toDictGraph()
edges = self.graph.getAllEdges()
edges = numpy.array(edges, numpy.int)
for i in range(edges.shape[0]):
self.assertEquals(dictGraph[edges[i, 0], edges[i, 1]], self.graph[int(edges[i, 0]), int(edges[i, 1])])
dictGraph2 = self.graph2.toDictGraph()
edges2 = self.graph2.getAllEdges()
for i in range(edges2.shape[0]):
self.assertEquals(dictGraph2[edges2[i, 0], edges2[i, 1]], self.graph[int(edges2[i, 0]), int(edges2[i, 1])])
| [
"[email protected]"
] | |
b586baa8d46a591e777d5a5235059c44e5991d32 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2773/60662/287216.py | fd60f99029185c3b352f686730065a2f640c4b78 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,176 | py | matrix = []
for i in range(0, 4):
s = input()
if 0 < i < 4:
temp = list(map(int, s.strip(' [],').split(',')))
matrix.append(temp)
a = len(matrix)
dic = {}
nums_max = 1
if a == 0:
nums_max = 0
else:
b = len(matrix[0])
for i in range(a):
for j in range(b):
dic[(i, j)] = matrix[i][j]
v = dic.keys()
nums1 = [[1 for i in range(b)] for j in range(a)]
dic = sorted(dic.items(), key=lambda x: x[1])
for k in dic:
i = k[0][0]
j = k[0][1]
if (i + 1, j) in v and matrix[i + 1][j] < matrix[i][j] and nums1[i][j] < nums1[i + 1][j] + 1:
nums1[i][j] = nums1[i + 1][j] + 1
if (i, j + 1) in v and matrix[i][j + 1] < matrix[i][j] and nums1[i][j] < nums1[i][j + 1] + 1:
nums1[i][j] = nums1[i][j + 1] + 1
if (i - 1, j) in v and matrix[i - 1][j] < matrix[i][j] and nums1[i][j] < nums1[i - 1][j] + 1:
nums1[i][j] = nums1[i - 1][j] + 1
if (i, j - 1) in v and matrix[i][j - 1] < matrix[i][j] and nums1[i][j] < nums1[i][j - 1] + 1:
nums1[i][j] = nums1[i][j - 1] + 1
nums_max = max(nums_max, nums1[i][j])
print(nums_max)
| [
"[email protected]"
] | |
d149f7fc4838a57eb5d387bf9dd33399983d202b | e40a882c3717b3982db0fbc7ae42430746636ff0 | /dvalib/yolo/test_yolo.py | e2274640d2ca3e97769741d4a88bb08caeb74ff6 | [] | no_license | longchuan1985/DeepVideoAnalytics | 7dbe4bb9aab3ce15bc5bbcffcd3dbcea7157bea4 | 4264239ad6f9b23e450f90671c0120511c971678 | refs/heads/master | 2021-01-23T04:14:12.516312 | 2017-05-31T07:48:01 | 2017-05-31T07:48:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,046 | py | #! /usr/bin/env python
"""Run a YOLO_v2 style detection model on test images."""
import argparse
import colorsys
import imghdr
import os
import random
import numpy as np
from keras import backend as K
from keras.models import load_model
from PIL import Image
from yad2k.models.keras_yolo import yolo_eval, yolo_head
def _main():
args = {
'anchors_path': 'model_data/yolo_anchors.txt',
'classes_path': 'model_data/coco_classes.txt',
'test_path': 'images',
'output_path': 'images/out',
'score_threshold': 0.3,
'iou': 0.5,
}
model_path = os.path.expanduser(args['model_path'])
anchors_path = os.path.expanduser(args['anchors_path'])
classes_path = os.path.expanduser(args['classes_path'])
test_path = os.path.expanduser(args['test_path'])
output_path = os.path.expanduser(args['output_path'])
if not os.path.exists(output_path):
print('Creating output path {}'.format(output_path))
os.mkdir(output_path)
sess = K.get_session()
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
anchors = np.array(anchors).reshape(-1, 2)
yolo_model = load_model(model_path)
num_classes = len(class_names)
num_anchors = len(anchors)
# TODO: Assumes dim ordering is channel last
model_output_channels = yolo_model.layers[-1].output_shape[-1]
assert model_output_channels == num_anchors * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes. ' \
'Specify matching anchors and classes with --anchors_path and ' \
'--classes_path flags.'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Check if model is fully convolutional, assuming channel last order.
model_image_size = yolo_model.layers[0].input_shape[1:3]
is_fixed_size = model_image_size != (None, None)
hsv_tuples = [(x / len(class_names), 1., 1.)for x in range(len(class_names))]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),colors))
random.seed(10101) # Fixed seed for consistent colors across runs.
random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.
random.seed(None) # Reset seed to default.
yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(yolo_outputs,input_image_shape,score_threshold=args['score_threshold'],iou_threshold=args['iou_threshold'])
for image_file in os.listdir(test_path):
try:
image_type = imghdr.what(os.path.join(test_path, image_file))
if not image_type:
continue
except:
continue
image = Image.open(os.path.join(test_path, image_file))
if is_fixed_size: # TODO: When resizing we can use minibatch input.
resized_image = image.resize(
tuple(reversed(model_image_size)), Image.BICUBIC)
image_data = np.array(resized_image, dtype='float32')
else:
# Due to skip connection + max pooling in YOLO_v2, inputs must have
# width and height as multiples of 32.
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
resized_image = image.resize(new_image_size, Image.BICUBIC)
image_data = np.array(resized_image, dtype='float32')
print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = sess.run(
[boxes, scores, classes],
feed_dict={
yolo_model.input: image_data,
input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
print('Found {} boxes for {}'.format(len(out_boxes), image_file))
thickness = (image.size[0] + image.size[1]) // 300
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
image.save(os.path.join(output_path, image_file), quality=90)
sess.close()
if __name__ == '__main__':
_main(parser.parse_args())
| [
"[email protected]"
] | |
880f93512f34e2e80dc747776e6ed3b406dd4715 | 0d14a4be28107b9487c16fde5865f661c34f3595 | /examples/common_features/species_2.py | f1c19320ad83c6dcd28d3f54c607a253cde10f4a | [
"MIT"
] | permissive | lamyj/sycomore | 729780544e5ac3940e47493c205797556c7f81b8 | d0335f1b8b26facb2a0581de6c19e6e999517599 | refs/heads/master | 2023-09-01T18:02:56.062085 | 2023-08-06T16:06:53 | 2023-08-06T16:06:53 | 199,385,133 | 22 | 4 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | import sycomore
from sycomore.units import *
species = sycomore.Species(1000*ms, 100*ms)
# Assign the diffusion coefficient as a scalar
species.D = 3*um**2/s
# The diffusion coefficient is stored on the diagonal of the tensor
print(species.D[0,0])
# Assign the diffusion coefficient as a tensor
species.D = [
[3*um**2/s, 0*um**2/s, 0*um**2/s],
[0*um**2/s, 2*um**2/s, 0*um**2/s],
[0*um**2/s, 0*um**2/s, 1*um**2/s]]
print(species.D)
| [
"[email protected]"
] | |
be591fb5e2d1805a2ef27f18908ad61e4fb28266 | 6dfba71133c5b93cef5b944dcfb50d6eebceca26 | /src/acsf_feat.py | 604fa9e5091700a7964e38ab3b336f034d10358e | [] | no_license | matsuken92/molecular | 67b223be7be604cdf907dcd66b9948faf9119433 | 759a697070efaac681aff89f645ff2a6a79f0b78 | refs/heads/master | 2022-02-18T01:41:01.674199 | 2019-08-29T04:01:25 | 2019-08-29T04:01:25 | 190,421,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,377 | py | # 基本ライブラリ
import pandas as pd
import pandas.io.sql as psql
import numpy as np
import numpy.random as rd
import gc
import multiprocessing as mp
import os
import sys
import pickle
from collections import defaultdict
from glob import glob
import math
from datetime import datetime as dt
from pathlib import Path
import scipy.stats as st
import re
import shutil
from tqdm import tqdm_notebook as tqdm
import datetime
from dscribe.descriptors import ACSF
from dscribe.core.system import System
sys.path.append('..')
from lib.line_notif import send_message
from lib.utils import matrics_rotate
from lib.utils import reduce_mem_usage, current_time, unpickle, to_pickle
SYMBOL=['H', 'C', 'N', 'O', 'F']
ACSF_GENERATOR = ACSF(
species = SYMBOL,
rcut = 6.0,
g2_params=[[1, 1], [1, 2], [1, 3]],
g4_params=[[1, 1, 1], [1, 2, 1], [1, 1, -1], [1, 2, -1]],
)
def get_scsf(data):
ret_list = []
for molecule_name in data["mol_names"]:
df = gb_structure.get_group(molecule_name)
df = df.sort_values(['atom_index'], ascending=True)
a = df.atom.values.tolist()
xyz = df[['x','y','z']].values
atom = System(symbols=a, positions=xyz)
acsf = ACSF_GENERATOR.create(atom)
acsf_df = pd.DataFrame(acsf)
acsf_df.columns = [f"acsf_{c}" for c in range(acsf_df.shape[1])]
acsf_df = pd.concat([df[["molecule_name", "atom_index"]].reset_index(drop=True),
acsf_df.reset_index(drop=True)], axis=1)
ret_list.append(acsf_df)
return pd.concat(ret_list, axis=0)
print("loading structures")
structures = pd.read_csv("../input/structures.csv")
molecule_names = np.sort(structures.molecule_name.unique())
gb_structure = structures.groupby("molecule_name")
n_split = mp.cpu_count()
unit = np.ceil(len(molecule_names) / n_split).astype(int)
indexer = [[unit * (i), unit * (i + 1)] for i in range(n_split)]
split_mol_names = []
for idx in indexer:
split_mol_names.append(molecule_names[idx[0]:idx[1]])
mp_data = [{"mol_names": m} for m in split_mol_names]
print("start multiprocessing")
num_workers = mp.cpu_count()
with mp.Pool(num_workers) as executor:
features_chunk = executor.map(get_scsf, mp_data)
df = pd.concat(features_chunk)
to_pickle("../processed/v003/acsf_feat.pkl", df)
#df.to_csv("../processed/v003/acsf_feat.csv")
print("finished.") | [
"[email protected]"
] | |
198c683ce8f8d6109e25e666ec663c387887bcf4 | 7c66bba92b484e5fa6ee282ef39f2c26875ca775 | /auto_login/weibo_auto_login.py | adbac7ea6274fa21e0b7a49a1bb7cc6022b031ae | [] | no_license | KqSMea8/PythonTools | a5ac17182b2689a706180dc349d59c2484d3984c | 7279570b82fecbf59b71aa6b58ef975e90c660df | refs/heads/master | 2020-04-13T04:19:19.209243 | 2018-12-24T05:13:12 | 2018-12-24T05:13:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,876 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@version: 1.0
@author: ‘yuxuecheng‘
@contact: [email protected]
@software: PyCharm Community Edition
@file: weibo_auto_login.py
@time: 26/10/2017 09:49
"""
import sys
import urllib
import urllib2
import cookielib
import base64
import re
import json
import rsa
import binascii
import logging
import time
import os
import traceback
# import requests
# from bs4 import BeautifulSoup
# 新浪微博的模拟登陆
class WeiboLogin(object):
def __init__(self):
# 获取一个保存cookies的对象
# self.cj = cookielib.CookieJar()
self.cj = cookielib.LWPCookieJar()
def enable_cookies(self):
# 将一个保存cookies对象和一个HTTP的cookie的处理器绑定
cookie_support = urllib2.HTTPCookieProcessor(self.cj)
# 创建一个opener,设置一个handler用于处理http的url打开
opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
# 安装opener,此后调用urlopen()时会使用安装过的opener对象
urllib2.install_opener(opener)
@staticmethod
def get_server_data():
"""
预登陆获得 servertime, nonce, pubkey, rsakv
:return:
"""
# url = 'http://login.sina.com.cn/sso/prelogin.php?entry=weibo&callback=sinaSSOController.preloginCallBack&su=ZW5nbGFuZHNldSU0MDE2My5jb20%3D&rsakt=mod&checkpin=1&client=ssologin.js(v1.4.18)&_=1442991685270'
prelogin_url_format = "https://login.sina.com.cn/sso/prelogin.php?entry=weibo&callback=sinaSSOController.preloginCallBack&su=&rsakt=mod&client=ssologin.js(v1.4.19)&_=%d"
cur_time = int((time.time() * 1000))
prelogin_url = prelogin_url_format % cur_time
data = urllib2.urlopen(prelogin_url).read()
try:
json_data = re.search(r'(\(.*\))', data).group(0)
data = json.loads(json_data[1:-1])
server_time = str(data['servertime'])
nonce = data['nonce']
pubkey = data['pubkey']
rsakv = data['rsakv']
return server_time, nonce, pubkey, rsakv
except:
logging.error('Get severtime error!')
return None
@staticmethod
def get_password(password, servertime, nonce, pubkey):
"""
获取加密后的密码
:param password:
:param servertime:
:param nonce:
:param pubkey:
:return:
"""
rsa_publickey = int(pubkey, 16)
key = rsa.PublicKey(rsa_publickey, 65537) # 创建公钥
message = str(servertime) + '\t' + str(nonce) + '\n' + str(password) # 拼接明文js加密文件中得到
password = rsa.encrypt(message, key) # 加密
password = binascii.b2a_hex(password) # 将加密信息转换为16进制。
return password
@staticmethod
def get_username(user_name):
"""
获取加密后的用户名
:param user_name:
:return:
"""
user_name = urllib.quote(user_name)
user_name = base64.encodestring(user_name)[:-1]
return user_name
@staticmethod
def get_form_data( user_name, password, servertime, nonce, pubkey, rsakv ):
"""
获取需要提交的表单数据
:param user_name:
:param password:
:param servertime:
:param nonce:
:param pubkey:
:param rsakv:
:return:
"""
user_name = WeiboLogin.get_username(user_name)
psw = WeiboLogin.get_password(password, servertime, nonce, pubkey)
form_data = {
'entry': 'weibo',
'gateway': '1',
'from': '',
'savestate': '7',
'useticket': '1',
'pagerefer': 'http://weibo.com/p/1005052679342531/home?from=page_100505&mod=TAB&pids=plc_main',
'vsnf': '1',
'su': user_name,
'service': 'miniblog',
'servertime': servertime,
'nonce': nonce,
'pwencode': 'rsa2',
'rsakv': rsakv,
'sp': psw,
'sr': '1366*768',
'encoding': 'UTF-8',
'prelt': '115',
'url': 'http://weibo.com/ajaxlogin.php?framelogin=1&callback=parent.sinaSSOController.feedBackUrlCallBack',
'returntype': 'META'
}
form_data = urllib.urlencode(form_data)
return form_data
# 登陆函数
def login(self, username, password):
self.enable_cookies()
url = 'https://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.19)'
servertime, nonce, pubkey, rsakv = WeiboLogin.get_server_data()
formData = WeiboLogin.get_form_data(username, password, servertime, nonce, pubkey, rsakv)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0'}
req = urllib2.Request(
url=url,
data=formData,
headers=headers
)
result = urllib2.urlopen(req)
text = result.read()
logging.info("login data: %s" % text.decode("gb2312"))
# 还没完!!!这边有一个重定位网址,包含在脚本中,获取到之后才能真正地登陆
try:
url_data = re.search(r'(\(.*\))', text).group(0)
login_url = url_data[2:-2]
logging.info("login_url: %s" % login_url)
login_req = urllib2.Request(
url=login_url,
headers=headers
)
# 由于之前的绑定,cookies信息会直接写入
urllib2.urlopen(login_req)
logging.info("Login success!")
except urllib2.URLError as urle:
traceback.print_exc(urle)
logging.error('Login error! Error message: %s' % urle.message)
return -1
except Exception as e:
logging.error(e)
return -1
# 访问主页,把主页写入到文件中
# url = 'http://weibo.com/u/2679342531/home?topnav=1&wvr=6'
url = 'http://www.weibo.com/linusyuno1/home?wvr=5&lf=reg'
request = urllib2.Request(url)
response = urllib2.urlopen(request)
logging.info(response.headers.dict)
text = response.read()
filename = os.getcwd() + os.path.sep + "weibo.html"
fp_raw = open(filename, "w+")
fp_raw.write(text)
fp_raw.close()
logging.info(text.decode("gbk"))
if __name__ == "__main__":
init_logging("weibo")
logging.info(u'新浪微博模拟登陆:')
# username = raw_input(u'用户名:')
# password = raw_input(u'密码:')
username = "[email protected]"
password = "yuxc870704"
weibologin = WeiboLogin()
weibologin.login(username, password)
filename = os.getcwd() + os.path.sep + 'cookie.txt'
weibologin.cj.save(filename) | [
"[email protected]"
] | |
847889e16e3bd3550569db29c5361a86553d6bf7 | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib64/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/ident/contextelement.py | b380cc640393755a40695dbc4560e4f854377a06 | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 4,121 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class ContextElement(Mo):
"""
The identity context element.
"""
meta = ClassMeta("cobra.model.ident.ContextElement")
meta.moClassName = "identContextElement"
meta.rnFormat = "id-[%(eDn)s]"
meta.category = MoCategory.REGULAR
meta.label = "None"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.ident.Context")
meta.rnPrefixes = [
('id-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "eDn", "eDn", 347, PropCategory.REGULAR)
prop.label = "Element DN"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("eDn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "sDn", "sDn", 348, PropCategory.REGULAR)
prop.label = "Segment DN"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("sDn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "eDn"))
getattr(meta.props, "eDn").needDelimiter = True
def __init__(self, parentMoOrDn, eDn, markDirty=True, **creationProps):
namingVals = [eDn]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
9a0e63c5b5b8525ef929e64c55a91bb636cdfab2 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/Cases/2938/.mooctest/answer.py | 8566383381c158a21b1b6cefb037db2930fa8950 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | #include<bits/stdc++.h>//头文件
using namespace std;
string a[100];//定义要排序的字符串数组
stringstream ss;//百度一下,你就知道
int main(){
for(int i=1;i<=100;i++){//开始存入1-1000的数
ss<<i;
ss>>a[i-1];
ss.str("");//清空缓存
ss.clear();//充值(重置)状态
}
sort(a,a+100);//排序
for(int i=0;i<100;i++)
cout<<a[i]<<endl;//输出
return 0;//完美结束
} | [
"[email protected]"
] | |
8885b77cdd0914bc461b0303e7c24a2db6ac1e80 | 2be8a9f06d4003d12c0a727fb83d284c31a53050 | /HoudiniHotBox17.0/lib/Cd_Material.py | 78316d6bcb22180bb4bd80f79268dcbe13118016 | [] | no_license | LiuLiangFx/SmileHotBOX | 7551d9578b2defe612950cb8e3bffdb85024cede | 8bd8eac69b3c2a9824b9aa4488ca77789bea8d85 | refs/heads/master | 2021-01-01T10:22:26.959731 | 2020-02-09T03:16:32 | 2020-02-09T03:16:32 | 239,236,801 | 0 | 0 | null | 2020-02-09T02:47:18 | 2020-02-09T02:47:18 | null | UTF-8 | Python | false | false | 1,494 | py | import hou
class Cd_Material:
def __init__(self):
self.pane=hou.ui.paneTabOfType(hou.paneTabType.NetworkEditor)
self.node= hou.selectedNodes()[0]
fl=open('material.txt', 'w')
fl.write(self.node.path())
fl.close()
def run(self):
if self.node.type().name() == "material" and self.node.parm("shop_materialpath1").eval() == "":
self.pane.cd("/shop")
elif self.node.type().name() == "material" and self.node.parm("shop_materialpath1").eval() != "":
try:
mNode = hou.node(self.node.parm("shop_materialpath1").eval())
mNode.allowEditingOfContents()
self.pane.cd(mNode.path())
except:
self.pane.cd("/shop")
if self.node.type().name() == "geo" and self.node.parm("shop_materialpath").eval() == "":
self.pane.cd("/shop")
elif self.node.type().name() == "geo" and self.node.parm("shop_materialpath").eval() != "":
try:
mNode = hou.node(self.node.parm("shop_materialpath").eval())
mNode.allowEditingOfContents()
self.pane.cd(mNode.path())
except:
self.pane.cd("/shop")
a= Cd_Material()
a.run() | [
"[email protected]"
] | |
82d781cf2b96438286ea7fd29e2c1490e21df986 | 6191bad7750404bc0bcaec43a8dea51b52980f04 | /Seção_07/Collections/deque.py | b4b481fd5e7d3175e21b72433dcb6f37509d11ff | [] | no_license | Lehcs-py/guppe | abfbab21c1b158b39251fa6234a4a98ce5f31c2a | 2ff007bce88e065e6d3020971efd397ec7f7084b | refs/heads/main | 2023-02-26T18:43:06.052699 | 2021-02-07T18:22:53 | 2021-02-07T18:22:53 | 330,180,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | from collections import deque
deq = deque('LEHCS')
print(deq)
deq.append('A')
print(deq)
deq.appendleft('D')
print(deq)
print(deq.pop())
print(deq)
print(deq.popleft())
print(deq)
| [
"[email protected]"
] | |
438033b0dd31378c6fc09ace40f3c3bee1d9bafe | e5b4ed93d6666e195e96a265d3e7cfe4243a7300 | /hunter/hunter.py | 0914e6258d70bb3bcec304a85ffabec2451f20d3 | [] | no_license | Spider251/python | 934f5b8b923c2b61186a6df8445957290e5c4c74 | 8b1931f862e1d5c29fed9af624bcac94c1d25755 | refs/heads/master | 2020-04-05T11:58:04.558098 | 2018-11-09T12:06:06 | 2018-11-09T12:06:06 | 156,852,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py | '''
实现猎人的功能
需要传入的参数:1.所有人员的字典
'''
# person 所有角色共有的属性
# survival 所有人员存活情况{1:0,2:1...} 键1代表角色号码,值0为死亡,1为存活
class hunter:
def __init__(self,survival):
self.survival = survival
def fun(self):
for i in self.survival:
if i == 2:
if self.survival[i] == 1:
pass
elif self.survival[i] == 0:
print("猎人已经死亡")
self.say()
def say(self):
while True:
a = input("杀人Y/放弃N:")
if a == 'N':
print("结束")
elif a == 'Y':
print("请选择要带走的角色:",end="")
for i in self.survival:
if i != 2:
print(i,end=" ")
print()
a = input("杀死:")
print(a,"已死")
break
if __name__ == '__main__':
a = {1:0,2:0,3:1}
hunter = hunter(a)
hunter.fun() | [
"[email protected]"
] | |
1fc8cb941330ce78d411f3edb862796a171a89fc | e77b92df446f0afed18a923846944b5fd3596bf9 | /Inflearn_algo/section7_dfs_bfs/pro1_maxScore_re.py | 7feac790f2615b319b2bc54fde9f4010284f4fdd | [] | no_license | sds1vrk/Algo_Study | e40ca8eb348d1fc6f88d883b26195b9ee6f35b2e | fbbc21bb06bb5dc08927b899ddc20e6cde9f0319 | refs/heads/main | 2023-06-27T05:49:15.351644 | 2021-08-01T12:43:06 | 2021-08-01T12:43:06 | 356,512,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | # 최대 점수 구하기 (DFS)
# 문제를 푼다, 안푼다라는 개념으로 가야됨
import sys
sys.stdin=open("input.txt","r")
n,m=map(int,input().split())
ss=[]
tt=[]
for i in range(n):
a,b=map(int,input().split())
ss.append(a)
tt.append(b)
max_score=-1
def dfs(l,s,t):
global max_score
# 가지치기 t가 m을 넘으면 더이상 할 필요 없음
if t>m:
return
if l==n:
if s>max_score:
max_score=s
else :
# 1번 문제를 푼다
dfs(l+1,s+ss[l],t+tt[l])
# 문제를 풀지 않는다
dfs(l+1,s,t)
dfs(0,0,0)
print(max_score) | [
"[email protected]"
] | |
3674de65b0e09eba8a92b497cf4a7530fb460826 | d53bc632503254ca0d5099fe457c02c07212a131 | /cookieproject1/cookieproject1/wsgi.py | 0e0d958b3a4961808057c49586b4e5768c75d831 | [] | no_license | srikar1993/django | ba8428f6e1162cc40f2d034126e7baf29eb62edc | 2199d5d94accc7bce5b3fac4a4b7b1444e39b35f | refs/heads/master | 2023-07-14T21:10:52.654992 | 2021-08-26T06:37:04 | 2021-08-26T06:37:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for cookieproject1 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cookieproject1.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
eb6f175b0c5788c950623020ee524b875e28fc23 | bafd37fdbaf76d5d7dabd9c07985969b3924f9c8 | /example_client/example.py | 9671da3b0660af7e0bb9fe806d6331467f1918ae | [
"Apache-2.0"
] | permissive | k24dizzle/nagios_registration | 2c1c95c7c871ee8ed31de46d555c812f2c0f41c8 | be18dbadd2c08def81e795e4afe2fe2cf41775cf | refs/heads/master | 2020-03-08T11:54:30.569982 | 2015-07-16T18:01:07 | 2015-07-16T18:01:07 | 128,111,583 | 1 | 0 | null | 2018-04-04T19:32:53 | 2018-04-04T19:32:52 | null | UTF-8 | Python | false | false | 3,263 | py | import oauth2
import json
###
#
# This script will create 2 hosts, and add them to a host group.
# It will then create a service, and assign that service to both hosts.
# It will then deploy a new nagios configuration file.
#
###
consumer_key = "OAUTH_KEY"
consumer_secret = "OAUTH_SECRET"
registration_server = "http://localhost:8000"
###
#
# You can create a consumer key and secret on the nagios_registration
# server with a django management command:
#
# python manage.py create_consumer
#
###
consumer = oauth2.Consumer(key=consumer_key, secret=consumer_secret)
client = oauth2.Client(consumer)
# Variables used by the actual requests below
hostname1 = "example app host"
address1 = "127.0.0.1"
hostname2 = "second app host"
address2 = "127.0.0.2"
groupname = "example_app_servers"
alias = "Example App Servers"
base_service = "24x7-active-service"
service_description = "Disk Usage"
check_command = "check_remote!disk_check.py!98!99"
# End of settings, now just making requests to the server
# Create the 2 hosts
client.request("%s/api/v1/host" % (registration_server),
method='POST',
body=json.dumps({"name": hostname1, "address": address1}),
headers={"Content-Type": "application/json"})
client.request("%s/api/v1/host" % (registration_server),
method='POST',
body=json.dumps({"name": hostname2, "address": address2}),
headers={"Content-Type": "application/json"})
# Create the hostgroup
client.request("%s/api/v1/hostgroup" % (registration_server),
method='POST',
body=json.dumps({"name": groupname, "alias": alias}),
headers={"Content-Type": "application/json"})
# Add the hosts to the hostgroup
client.request("%s/api/v1/hostgroup" % (registration_server),
method='PATCH',
body=json.dumps({"group": groupname, "host": hostname1}),
headers={"Content-Type": "application/json"})
client.request("%s/api/v1/hostgroup" % (registration_server),
method='PATCH',
body=json.dumps({"group": groupname, "host": hostname2}),
headers={"Content-Type": "application/json"})
# Create a service
client.request("%s/api/v1/service" % (registration_server),
method='POST',
body=json.dumps({"base_service": base_service,
"description": service_description,
"check_command": check_command}),
headers={"Content-Type": "application/json"})
# Add the service to the 2 hosts
client.request("%s/api/v1/service" % (registration_server),
method='PATCH',
body=json.dumps({"service": service_description,
"host": hostname1}),
headers={"Content-Type": "application/json"})
client.request("%s/api/v1/service" % (registration_server),
method='PATCH',
body=json.dumps({"service": service_description,
"host": hostname2}),
headers={"Content-Type": "application/json"})
# Deploy the changes
client.request("%s/api/v1/deploy" % (registration_server), method="POST")
print "Done!"
| [
"[email protected]"
] | |
05546c27ea40660996b98f84d8a1a0f04a42c288 | 85bf9a13bf62c1f074894d134c23dd992ae8688c | /problems/p317/Solution.py | 6d55e5a066806320f5503f718d38b8fa74f2166f | [] | no_license | pololee/oj-leetcode | 4cca3d309b2c9931d15d3cec4b07b5d9d22733ef | 78a8b27ee108ba93aa7b659665976112f48fc2c2 | refs/heads/master | 2020-06-21T02:15:26.882273 | 2020-02-06T04:56:21 | 2020-02-06T04:56:21 | 197,320,113 | 0 | 0 | null | 2020-02-06T04:56:23 | 2019-07-17T05:20:02 | Python | UTF-8 | Python | false | false | 2,336 | py | import collections
import sys
class Solution:
DIRECTIONS = [(1, 0), (0, 1), (-1, 0), (0, -1)]
def shortestDistance(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
if not grid:
return 0
row_size = len(grid)
col_size = len(grid[0])
distance = [[0 for _ in range(col_size)]
for _ in range(row_size)]
reaches = [[0 for _ in range(col_size)]
for _ in range(row_size)]
num_of_buildings = 0
for i in range(row_size):
for j in range(col_size):
if grid[i][j] == 1:
num_of_buildings += 1
self.bfs(grid, distance, reaches, i, j)
shortest = sys.maxsize
for i in range(row_size):
for j in range(col_size):
if grid[i][j] == 0 and reaches[i][j] == num_of_buildings:
shortest = min(shortest, distance[i][j])
if shortest == sys.maxsize:
return -1
return shortest
def bfs(self, grid, distance, reaches, istart, jstart):
row_size = len(grid)
col_size = len(grid[0])
visited = [[False for _ in range(col_size)]
for _ in range(row_size)]
queue = collections.deque()
queue.append((istart, jstart))
visited[istart][jstart] = True
level = 0
while queue:
size = len(queue)
for _ in range(size):
row, col = queue.popleft()
if grid[row][col] == 0:
distance[row][col] += level
reaches[row][col] += 1
for drow, dcol in self.DIRECTIONS:
new_row = row + drow
new_col = col + dcol
if new_row >= 0 and new_row < row_size and new_col >= 0 and new_col < col_size and grid[new_row][new_col] == 0 and not visited[new_row][new_col]:
visited[new_row][new_col] = True
queue.append((new_row, new_col))
level += 1
def main():
test = [[1, 0, 2, 0, 1], [0, 0, 0, 0, 0], [0, 0, 1, 0, 0]]
sol = Solution()
print(sol.shortestDistance(test))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
acbbec5e9fde66dc1ec45b08c53724a5018010e7 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/tenderli.py | 28ba6f38f41106cd5c55dcedcbdf538f5a5cc99c | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 835 | py | ii = [('GodwWSL2.py', 4), ('FerrSDO3.py', 3), ('WilbRLW.py', 1), ('WilbRLW4.py', 2), ('AubePRP2.py', 1), ('CookGHP.py', 1), ('KembFJ1.py', 2), ('WilbRLW5.py', 2), ('TennAP.py', 1), ('BailJD2.py', 3), ('WilbRLW2.py', 1), ('LyttELD.py', 3), ('CoopJBT2.py', 1), ('GrimSLE.py', 1), ('AinsWRR3.py', 2), ('RoscTTI2.py', 2), ('ClarGE.py', 8), ('LandWPA.py', 2), ('GilmCRS.py', 3), ('AinsWRR.py', 1), ('MedwTAI.py', 1), ('LandWPA2.py', 2), ('FerrSDO2.py', 7), ('TalfTIT.py', 1), ('CoopJBT.py', 3), ('SoutRD2.py', 1), ('WheeJPT.py', 3), ('HowiWRL2.py', 1), ('BailJD3.py', 3), ('MereHHB.py', 1), ('HogaGMM.py', 2), ('MartHRW.py', 1), ('DequTKM.py', 1), ('KembFJ2.py', 2), ('AinsWRR2.py', 1), ('ClarGE3.py', 2), ('RogeSIP.py', 2), ('DibdTRL.py', 2), ('HogaGMM2.py', 1), ('MartHSI.py', 1), ('BowrJMM3.py', 1), ('ClarGE4.py', 2), ('AdamJOA.py', 1)] | [
"[email protected]"
] | |
4120d60565a39b46cd5b6d64ed972b8c46931722 | 5a298ece5b17e6e993d50a855027f265e115e2bd | /utilities/filter_data.py | 99687a7e234137d21978c275dd56b29a9d74c2f1 | [] | no_license | hvk3/IR_project | 86b8a1176f6a8ed541f179f1c541eb139dde0295 | ae6deea2276f0a76bfa23482fd1b7a4c1f039264 | refs/heads/master | 2021-10-16T17:33:11.258479 | 2019-02-12T08:45:51 | 2019-02-12T08:45:51 | 118,168,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | from pymongo import MongoClient
from langdetect import detect
from tqdm import tqdm
client = MongoClient()
db = client.youtube8m
ds_1 = db.iteration3
ds_2 = db.iteration4
ds_2.remove()
print("Before:", ds_1.find().count())
for record in tqdm(ds_1.find()):
title = record['metadata']['title']
description = record['metadata']['description']
# if len(description) > 0 and len(title) > 0:
# ds_2.insert_one(record)
try:
if detect(description) == 'en': #3: title, #4: description
ds_2.insert_one(record)
except:
continue
print("After:", ds_2.find().count())
| [
"[email protected]"
] | |
87466cd291f6c19586b503ef7109c6a64acf8ca6 | 39157a854806af4db51b986adf5096bd342bacdb | /fuzznumpy/main.py | 68c4295fe915c1b180f9319956c9abc08d8c52e3 | [] | no_license | xcainiao/fuzzing | b6b43550f7a5c05595a180d111d9ec03e4710293 | 5cadbe3e1bcc9090a68b1006cb5b6b76db990ae1 | refs/heads/master | 2020-03-30T01:51:59.811511 | 2018-09-27T14:25:05 | 2018-09-27T14:25:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | import fuzz
import numpy as np
from ctypes import CDLL
test = CDLL("c/test.so")
test.init()
fuzz.init()
while 1:
func = fuzz.generate()
# func = """import numpy\nnumpy.half(-1).choose(numpy.void(1), numpy.broadcast_arrays((1,)))"""
test.copybuff(func)
try:
exec(func, {"np":np})
except Exception as e:
# print e
continue
print func
fuzz.register(func)
| [
"[email protected]"
] | |
1df190b393e91b1201a3c30b120bc9a49a40a1b8 | 65329299fca8dcf2e204132624d9b0f8f8f39af7 | /napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/__init__.py | e65b58e4df63d726184cc67034cd38bbeac27625 | [
"Apache-2.0"
] | permissive | darylturner/napalm-yang | bf30420e22d8926efdc0705165ed0441545cdacf | b14946b884ad2019b896ee151285900c89653f44 | refs/heads/master | 2021-05-14T12:17:37.424659 | 2017-11-17T07:32:49 | 2017-11-17T07:32:49 | 116,404,171 | 0 | 0 | null | 2018-01-05T16:21:37 | 2018-01-05T16:21:36 | null | UTF-8 | Python | false | false | 9,972 | py |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import tlvs
class segment_routing_sid_label_range(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/router-information/tlvs/tlv/segment-routing-sid-label-range. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: The Segment Identifier (SID) or label ranges that are supported by
the local system for Segment Routing
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__tlvs',)
_yang_name = 'segment-routing-sid-label-range'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__tlvs = YANGDynClass(base=tlvs.tlvs, is_container='container', yang_name="tlvs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'ospfv2', u'areas', u'area', u'lsdb', u'lsa-types', u'lsa-type', u'lsas', u'lsa', u'opaque-lsa', u'router-information', u'tlvs', u'tlv', u'segment-routing-sid-label-range']
def _get_tlvs(self):
"""
Getter method for tlvs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs (container)
YANG Description: Sub-TLVs of the SID/Label range TLV of the RI LSA
"""
return self.__tlvs
def _set_tlvs(self, v, load=False):
"""
Setter method for tlvs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_tlvs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tlvs() directly.
YANG Description: Sub-TLVs of the SID/Label range TLV of the RI LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=tlvs.tlvs, is_container='container', yang_name="tlvs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tlvs must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=tlvs.tlvs, is_container='container', yang_name="tlvs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
})
self.__tlvs = t
if hasattr(self, '_set'):
self._set()
def _unset_tlvs(self):
self.__tlvs = YANGDynClass(base=tlvs.tlvs, is_container='container', yang_name="tlvs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)
tlvs = __builtin__.property(_get_tlvs)
_pyangbind_elements = {'tlvs': tlvs, }
import tlvs
class segment_routing_sid_label_range(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/router-information/tlvs/tlv/segment-routing-sid-label-range. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: The Segment Identifier (SID) or label ranges that are supported by
the local system for Segment Routing
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__tlvs',)
_yang_name = 'segment-routing-sid-label-range'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__tlvs = YANGDynClass(base=tlvs.tlvs, is_container='container', yang_name="tlvs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'ospfv2', u'areas', u'area', u'lsdb', u'lsa-types', u'lsa-type', u'lsas', u'lsa', u'opaque-lsa', u'router-information', u'tlvs', u'tlv', u'segment-routing-sid-label-range']
def _get_tlvs(self):
"""
Getter method for tlvs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs (container)
YANG Description: Sub-TLVs of the SID/Label range TLV of the RI LSA
"""
return self.__tlvs
def _set_tlvs(self, v, load=False):
"""
Setter method for tlvs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_tlvs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tlvs() directly.
YANG Description: Sub-TLVs of the SID/Label range TLV of the RI LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=tlvs.tlvs, is_container='container', yang_name="tlvs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tlvs must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=tlvs.tlvs, is_container='container', yang_name="tlvs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
})
self.__tlvs = t
if hasattr(self, '_set'):
self._set()
def _unset_tlvs(self):
self.__tlvs = YANGDynClass(base=tlvs.tlvs, is_container='container', yang_name="tlvs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)
tlvs = __builtin__.property(_get_tlvs)
_pyangbind_elements = {'tlvs': tlvs, }
| [
"[email protected]"
] | |
d33d903e7de59d03eac8b1c9b2af624e056b3328 | b8faf65ea23a2d8b119b9522a0aa182e9f51d8b1 | /vmraid/website/doctype/social_link_settings/social_link_settings.py | 35954b6ce718f192fa921627f23f3e2a83b1b277 | [
"MIT"
] | permissive | vmraid/vmraid | a52868c57b1999a8d648441eb9cd05815204345d | 3c2e2a952003ba7ea2cf13673b9e79e127f4166e | refs/heads/main | 2022-07-29T18:59:28.585133 | 2022-04-22T08:02:52 | 2022-04-22T08:02:52 | 372,473,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2020, VMRaid Technologies and contributors
# License: MIT. See LICENSE
# import vmraid
from vmraid.model.document import Document
class SocialLinkSettings(Document):
pass
| [
"[email protected]"
] | |
f819a142bd8930f08e51e57ed6af15a211801e81 | 4bcae7ca3aed842d647d9112547522cffa805d51 | /0674.最长连续递增序列.py | 43854333b238384701a6a84adb3ed71f0d9e3655 | [] | no_license | SLKyrim/vscode-leetcode | fd5a163f801661db0dfae1d4fdfa07b79fdb82b6 | 65a271c05258f447d3e56755726f02179780eb8a | refs/heads/master | 2021-07-03T03:15:28.883786 | 2021-02-23T06:19:18 | 2021-02-23T06:19:18 | 226,062,540 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | #
# @lc app=leetcode.cn id=674 lang=python3
#
# [674] 最长连续递增序列
#
# https://leetcode-cn.com/problems/longest-continuous-increasing-subsequence/description/
#
# algorithms
# Easy (45.18%)
# Likes: 89
# Dislikes: 0
# Total Accepted: 30.7K
# Total Submissions: 68K
# Testcase Example: '[1,3,5,4,7]'
#
# 给定一个未经排序的整数数组,找到最长且连续的的递增序列,并返回该序列的长度。
#
#
#
# 示例 1:
#
# 输入: [1,3,5,4,7]
# 输出: 3
# 解释: 最长连续递增序列是 [1,3,5], 长度为3。
# 尽管 [1,3,5,7] 也是升序的子序列, 但它不是连续的,因为5和7在原数组里被4隔开。
#
#
# 示例 2:
#
# 输入: [2,2,2,2,2]
# 输出: 1
# 解释: 最长连续递增序列是 [2], 长度为1。
#
#
#
#
# 注意:数组长度不会超过10000。
#
#
# @lc code=start
class Solution:
def findLengthOfLCIS(self, nums: List[int]) -> int:
res = 0
n = len(nums)
if n == 0:
return 0
if n == 1:
return 1
cnt = 1
for i in range(1, n):
if nums[i] > nums[i-1]:
cnt += 1
else:
res = max(res, cnt)
cnt = 1
return max(res, cnt)
# @lc code=end
| [
"[email protected]"
] | |
b26dfc2cc4ffb4aa822cac635d3e83c1522e9304 | 04b3a30ca30c3a9cc459b06fe1842a500dd5ab51 | /addresss/views.py | 06c24539f8cc82d589e80f97797e2431e41d5162 | [] | no_license | rahulsayon/Final-Ecommerce | 17b7830e44ab86b7513f48d80fc1bb7f12c36516 | ca0c860653ec1b80f0a3f012e338ecc2189019ac | refs/heads/master | 2022-12-11T01:12:03.500783 | 2020-09-13T20:09:40 | 2020-09-13T20:09:40 | 295,228,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,454 | py | from django.shortcuts import render
from .forms import AddressForm
from django.shortcuts import redirect
from billing.models import BillingProfile
from django.utils.http import is_safe_url
from addresss.models import Address
# Create your views here.
def checkout_address_create_view(request):
form = AddressForm(request.POST or None)
context = { "form" : form }
next_ = request.GET.get('next')
next_post = request.POST.get('next')
redirect_path = next_ or next_post or None
if form.is_valid():
print(form.cleaned_data)
instance = form.save(commit=False)
billing_profile , billing_profile_created = BillingProfile.objects.new_or_get(request)
if billing_profile is not None:
address_type = request.POST.get('address_type' , 'shipping')
print("billinf profile" , billing_profile)
instance.billing_profile = billing_profile
instance.address_type = request.POST.get('address_type' , 'shipping')
instance.save()
request.session[address_type + "_address_id"] = instance.id
print(address_type +"_address_id")
else:
print("error")
return redirect("cart:checkout")
if is_safe_url(redirect_path , request.get_host()):
return redirect(redirect_path)
else:
return redirect("cart:checkout")
return redirect("cart:checkout")
def checkout_address_reuse_view(request):
if request.user.is_authenticated:
context = {}
next_ = request.GET.get('next')
next_post = request.POST.get('next')
redirect_path = next_ or next_post or None
if request.method == "POST":
print(request.POST)
shipping_address = request.POST.get('shipping_address', None)
address_type = request.POST.get('address_type', 'shipping')
billing_profile, billing_profile_created = BillingProfile.objects.new_or_get(request)
if shipping_address is not None:
qs = Address.objects.filter(billing_profile=billing_profile, id=shipping_address)
if qs.exists():
request.session[address_type + "_address_id"] = shipping_address
if is_safe_url(redirect_path, request.get_host()):
return redirect(redirect_path)
return redirect("cart:checkout")
| [
"[email protected]"
] | |
a75d04852aca116b804d4a5aa819b764cddff608 | 5d9636dcae2471d700da5583cfc0359644c7322d | /pugsley/auth/routes.py | 78e16175c4ac581d44b4ba571f9a66b393c72966 | [
"MIT"
] | permissive | kfields/pugsley-lite | 93a4c7c334fd9b4f3ab68acc565b1f29a4a31b99 | 9fdd4868895b38fb81855952f19bdf9cca1635b3 | refs/heads/master | 2023-01-24T18:29:15.338112 | 2019-08-11T20:33:30 | 2019-08-11T20:33:30 | 179,791,236 | 1 | 0 | MIT | 2023-01-09T22:22:33 | 2019-04-06T05:09:26 | CSS | UTF-8 | Python | false | false | 4,521 | py | from flask import render_template, redirect, url_for, flash, request, jsonify
from werkzeug.urls import url_parse
from flask_login import login_user, logout_user, current_user
from flask_babel import _
from pugsley import db
from pugsley.jwt import encode_auth_token
from pugsley.auth import bp
from pugsley.auth.forms import LoginForm, RegistrationForm, \
ResetPasswordRequestForm, ResetPasswordForm
from pugsley.models.users import User
from pugsley.auth.emails import send_password_reset_email
@bp.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = LoginForm()
if form.validate_on_submit():
email = form.email.data
if '@' in email:
user = User.query.filter_by(email=form.email.data).first()
else:
user = User.query.filter_by(username=form.email.data).first()
if user is None or not user.check_password(form.password.data):
flash(_('Invalid email or password'))
return redirect(url_for('auth.login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('main.index')
return redirect(next_page)
# return render_template('login.html', title=_('Log In'), form=form)
return render_template('layouts/auth-default.html',
content=render_template( 'pages/login.html', form=form ) )
@bp.route('/logout')
def logout():
logout_user()
return redirect(url_for('main.index'))
@bp.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = RegistrationForm()
if form.validate_on_submit():
# user = User(first_name=form.first_name.data, last_name=form.last_name.data, username=form.username.data, email=form.email.data)
user = User(username=form.email.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash(_('Congratulations, you are now a registered user!'))
return redirect(url_for('auth.login'))
# return render_template('register.html', title=_('Register'), form=form)
return render_template('layouts/auth-default.html',
content=render_template( 'pages/register.html', form=form ) )
@bp.route('/reset_password_request', methods=['GET', 'POST'])
def reset_password_request():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = ResetPasswordRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
send_password_reset_email(user)
flash(
_('Check your email for the instructions to reset your password'))
return redirect(url_for('auth.login'))
return render_template('reset_password_request.html',
title=_('Reset Password'), form=form)
@bp.route('/reset_password/<token>', methods=['GET', 'POST'])
def reset_password(token):
if current_user.is_authenticated:
return redirect(url_for('main.index'))
user = User.verify_reset_password_token(token)
if not user:
return redirect(url_for('main.index'))
form = ResetPasswordForm()
if form.validate_on_submit():
user.set_password(form.password.data)
db.session.commit()
flash(_('Your password has been reset.'))
return redirect(url_for('auth.login'))
return render_template('reset_password.html', form=form)
@bp.route('/token', methods=['POST'])
def token():
if not request.is_json:
return jsonify({"msg": "Missing JSON in request"}), 400
username = request.json.get('username', None)
password = request.json.get('password', None)
if not username:
return jsonify({"msg": "Missing username parameter"}), 400
if not password:
return jsonify({"msg": "Missing password parameter"}), 400
user = User.query.filter_by(username=username).first()
if user is None or not user.check_password(password):
return jsonify({"msg": "Bad username or password"}), 401
# Identity can be any data that is json serializable
access_token = encode_auth_token(sub=username, id=user.id)
print(access_token)
return jsonify({"token": access_token.decode('utf-8')}), 200 | [
"[email protected]"
] | |
d8982a501517e741145cac724e03b59326021d7d | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /modules/dxtbx/command_line/print_header.py | 0927cef0df1adb502a68d0f8709b4377dcad155a | [
"BSD-3-Clause"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 1,633 | py | from __future__ import absolute_import, division, print_function
import sys
from scitbx.array_family import flex
from dxtbx.format.FormatMultiImage import FormatMultiImage
from dxtbx.format.Registry import Registry
def print_header():
# this will do the lookup for every frame - this is strictly not needed
# if all frames are from the same instrument
for arg in sys.argv[1:]:
print("=== %s ===" % arg)
format_class = Registry.find(arg)
print("Using header reader: %s" % format_class.__name__)
i = format_class(arg)
beam = i.get_beam()
goniometer = i.get_goniometer()
detector = i.get_detector()
scan = i.get_scan()
if beam is None:
print("No beam model found")
else:
print(beam)
if detector is None:
print("No detector model found")
else:
print(detector)
if goniometer is None:
print("No goniometer model found")
else:
print(goniometer)
if scan is None:
print("No scan model found")
else:
print(scan)
if not issubclass(format_class, FormatMultiImage):
try:
raw_data = i.get_raw_data()
if not isinstance(raw_data, tuple):
raw_data = (raw_data,)
d = [p.as_1d() for p in raw_data]
print("Total Counts: %d" % sum([flex.sum(p.select(p >= 0)) for p in d]))
except AttributeError:
print("Could not read image data")
if __name__ == "__main__":
print_header()
| [
"[email protected]"
] | |
1c930c629d264c1b02af2492b5b962be70f570d9 | d6589ff7cf647af56938a9598f9e2e674c0ae6b5 | /nlp-automl-20191111/setup.py | 7541638ca97742a773686465bb0c04174993e7bc | [
"Apache-2.0"
] | permissive | hazho/alibabacloud-python-sdk | 55028a0605b1509941269867a043f8408fa8c296 | cddd32154bb8c12e50772fec55429a9a97f3efd9 | refs/heads/master | 2023-07-01T17:51:57.893326 | 2021-08-02T08:55:22 | 2021-08-02T08:55:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,647 | py | # -*- coding: utf-8 -*-
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import os
from setuptools import setup, find_packages
"""
setup module for alibabacloud_nlp-automl20191111.
Created on 30/12/2020
@author: Alibaba Cloud SDK
"""
PACKAGE = "alibabacloud_nlp_automl20191111"
NAME = "alibabacloud_nlp-automl20191111" or "alibabacloud-package"
DESCRIPTION = "Alibaba Cloud nlp-automl (20191111) SDK Library for Python"
AUTHOR = "Alibaba Cloud SDK"
AUTHOR_EMAIL = "[email protected]"
URL = "https://github.com/aliyun/alibabacloud-python-sdk"
VERSION = __import__(PACKAGE).__version__
REQUIRES = [
"alibabacloud_tea_util>=0.3.1, <1.0.0",
"alibabacloud_tea_openapi>=0.1.0, <1.0.0",
"alibabacloud_openapi_util>=0.0.3, <1.0.0",
"alibabacloud_endpoint_util>=0.0.3, <1.0.0"
]
LONG_DESCRIPTION = ''
if os.path.exists('./README.md'):
with open("README.md", encoding='utf-8') as fp:
LONG_DESCRIPTION = fp.read()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache License 2.0",
url=URL,
keywords=["alibabacloud","nlp","automl20191111"],
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
platforms="any",
install_requires=REQUIRES,
python_requires=">=3.6",
classifiers=(
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
"Topic :: Software Development"
)
)
| [
"[email protected]"
] | |
48692f6bb82436458dcda51926e85f92d86ed1ad | 589b5eedb71d83c15d44fedf60c8075542324370 | /project/stock_project/alpha_model/alpha_factor/ARAPIncomeTTM.py | 605a216d31fef56f1a88b39a3a9a2b23dfa799dd | [] | no_license | rlcjj/quant | 4c2be8a8686679ceb675660cb37fad554230e0d4 | c07e8f0f6e1580ae29c78c1998a53774a15a67e1 | refs/heads/master | 2020-03-31T07:15:48.111511 | 2018-08-27T05:29:00 | 2018-08-27T05:29:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,685 | py | import pandas as pd
from quant.stock.stock import Stock
from quant.stock.date import Date
from quant.stock.stock_factor_operate import StockFactorOperate
def ARAPIncomeTTM(beg_date, end_date):
"""
因子说明:(预收账款 + 应付账款) / 营业总收入 TTM
最近一期财报 实时更新
若有一个为负值 结果为负值
"""
# param
#################################################################################
factor_name = 'ARAPIncomeTTM'
ipo_num = 90
# read data
#################################################################################
income = Stock().get_factor_h5("OperatingIncome", None, "primary_mfc")
advance = Stock().get_factor_h5("AdvanceReceipts", None, "primary_mfc")
payable = Stock().get_factor_h5("AccountsPayable", None, "primary_mfc")
# data precessing
#################################################################################
[advance, payable, income] = Stock().make_same_index_columns([advance, payable, income])
add = advance.add(payable)
ratio = add.div(income)
ratio = StockFactorOperate().change_quarter_to_daily_with_report_date(ratio, beg_date, end_date)
res = ratio.T.dropna(how='all').T
# save data
#############################################################################
Stock().write_factor_h5(res, factor_name, "alpha_dfc")
return res
#############################################################################
if __name__ == '__main__':
from datetime import datetime
beg_date = '2004-01-01'
end_date = datetime.today()
data = ARAPIncomeTTM(beg_date, end_date)
print(data)
| [
"[email protected]"
] | |
ba5fe81af0632687c14d963ae372ba1b8ee5503f | a8750439f200e4efc11715df797489f30e9828c6 | /CodeForces/EC_46_2_C_1.py | 8ee3122998960e839a22312f1db953f98a96581f | [] | no_license | rajlath/rkl_codes | f657174305dc85c3fa07a6fff1c7c31cfe6e2f89 | d4bcee3df2f501349feed7a26ef9828573aff873 | refs/heads/master | 2023-02-21T10:16:35.800612 | 2021-01-27T11:43:34 | 2021-01-27T11:43:34 | 110,989,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | n = int(input())
a = []
for i in range(n):
l, r = [int(x) for x in input().split()]
a.append([l, 1])
a.append([r+1, -1])
a = sorted(a)
ans = [0] * (n + 1)
idx = 0
for i in range(len(a) - 1):
idx += a[i][1]
ans[idx] += a[i+1][0] - a[i][0]
for i in range(1, n+1):
print(ans[i], end = " ") | [
"[email protected]"
] | |
9b05b73cd5f0370491f151c54c36a981422be0f9 | 16b567ed93c10287f7b9e90ddc819512aadbcaf5 | /filters/stopwords_filter.py | c69ebfd10592f81b4efc0b75d78a5a7c9c1a54df | [] | no_license | Rigel772/python-keyword-density | b3bdfb70e06e53264be7507e4111a923b40ea51a | c3a4469360de3d7c02dd9b8de2dc7eac45a3253a | refs/heads/master | 2020-05-19T11:28:23.854324 | 2018-11-02T13:22:51 | 2018-11-02T13:22:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | #-*- coding: utf-8 -*-
import os.path
from .base_filter import BaseFilter
class StopwordsFilter(BaseFilter):
def __init__(self, country):
super(StopwordsFilter, self).__init__()
self.country = country
stopword_fname = '%s.txt' % self.country
folder_name = os.path.dirname(__file__)
self.fname = os.path.join(folder_name, 'stopwords', stopword_fname)
with open(self.fname, 'rb') as f:
self.stopwords = {l.strip().decode('utf8') for l in f if l}
def predicate(self, tok):
"""Returns True if tok not in stopwords else False"""
return tok not in self.stopwords
| [
"[email protected]"
] | |
e638da845ef167b11b3122f120cd0e44acefa0de | e93c6e93f612bca7f192adf539b4f489ad114ab5 | /m01_basics/l_07_nested_data.py | 6739ccff73e049c4db47923a80292b1b7f11a800 | [
"MIT"
] | permissive | be1iever/python-52-weeks | 8d57a10af9c0f5309ba21a9503a8fdf4bd82840c | 185d8b3147c6bfb069d58e4933b74792081bf8f2 | refs/heads/main | 2023-08-19T08:21:45.330447 | 2021-09-21T15:00:28 | 2021-09-21T15:00:28 | 409,847,518 | 1 | 0 | MIT | 2021-09-24T05:51:14 | 2021-09-24T05:51:13 | null | UTF-8 | Python | false | false | 2,506 | py | from pprint import pprint
from random import choice
import copy
from util.create_utils import create_network
device = {
"name": "r3-L-n7",
"vendor": "cisco",
"model": "catalyst 2960",
"os": "ios",
"interfaces": [
]
}
print("\n\n----- device with no interfaces --------------------")
for key, value in device.items():
print(f"{key:>16s} : {value}")
interfaces = list()
for index in range(0, 8):
interface = {
"name": "g/0/0/" + str(index),
"speed": choice(["10", "100", "1000"])
}
interfaces.append(interface)
device["interfaces"] = interfaces
print("\n\n----- device with interfaces --------------------")
for key, value in device.items():
if key != "interfaces":
print(f"{key:>16s} : {value}")
else:
print(f"{key:>16s} :")
for interface in device["interfaces"]:
print(f"\t\t\t\t\t{interface}")
print()
print("\n\n----- device with interfaces using pprint--------------------")
pprint(device)
print("\n\n----- network with devices and interfaces --------------------")
network = create_network(num_devices=4, num_subnets=4)
pprint(network)
print("\n----- information about network --------------------")
print(f"-- number of subnets: {len(network['subnets'])}")
print(f"-- list of subnets: {network['subnets'].keys()}")
print(f"-- list of subnets w/o extraneous: {', '.join(network['subnets'])}")
print("\n----- network and devices nicely formatted --------------------")
for subnet_address, subnet in network["subnets"].items():
print(f"\n-- subnet: {subnet_address}")
for device in subnet["devices"]:
print(f" |-- device: {device['name']:8} {device['ip']:10} {device['vendor']:>10} : {device['os']}")
print("\n\n----- remember assignment vs shallow copy vs deep copy --------------------")
print(" modify 'network' only, and see if assign/copy/deepcopy versions reflect that change")
network_assign = network
network["subnets"]["10.0.1.0"]["devices"][0]["name"] = "different name assigned"
print(f" --- network == network_assign : {network==network_assign}")
network_copy = copy.copy(network)
network["subnets"]["10.0.1.0"]["devices"][0]["name"] = "another different name, copy this time"
print(f" --- network == network_copy : {network==network_copy}")
network_deepcopy = copy.deepcopy(network)
network["subnets"]["10.0.1.0"]["devices"][0]["name"] = "this time with deep copy"
print(f" --- network == network_deepcopy : {network==network_deepcopy}")
| [
"[email protected]"
] | |
ce2469650940b0fa5dfceaad6a4836793f0f23b9 | 30fd01dbae99721069d936d5daa6a8050488a248 | /hacker/FirefoxSQLite.py | 7da8415a2e85749f5c5b4f1f6d446bc2933e030b | [] | no_license | chenshuo666/mypython | 6b334ad42b117c2750129028e82037643d99ab6a | 3cfcf49f2d6cc3733d244cc7eb212a4dba6a439a | refs/heads/master | 2020-03-10T04:04:35.530485 | 2018-04-17T04:02:16 | 2018-04-17T04:02:16 | 129,182,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,999 | py | #!/usr/bin/python
# coding=utf-8
import re
import optparse
import os
import sqlite3
# 解析打印downloads.sqlite文件的内容,输出浏览器下载的相关信息
def printDownloads(downloadDB):
conn = sqlite3.connect(downloadDB)
c = conn.cursor()
c.execute('SELECT name, source, datetime(endTime/1000000, \'unixepoch\') FROM moz_downloads;')
print('\n[*] --- Files Downloaded --- ')
for row in c:
print('[+] File: ' + str(row[0]) + ' from source: ' + str(row[1]) + ' at: ' + str(row[2]))
# 解析打印cookies.sqlite文件的内容,输出cookie相关信息
def printCookies(cookiesDB):
try:
conn = sqlite3.connect(cookiesDB)
c = conn.cursor()
c.execute('SELECT host, name, value FROM moz_cookies')
print('\n[*] -- Found Cookies --')
for row in c:
host = str(row[0])
name = str(row[1])
value = str(row[2])
print('[+] Host: ' + host + ', Cookie: ' + name + ', Value: ' + value)
except Exception as e:
if 'encrypted' in str(e):
print('\n[*] Error reading your cookies database.')
print('[*] Upgrade your Python-Sqlite3 Library')
# 解析打印places.sqlite文件的内容,输出历史记录
def printHistory(placesDB):
try:
conn = sqlite3.connect(placesDB)
c = conn.cursor()
c.execute("SELECT url, datetime(visit_date/1000000, 'unixepoch') FROM moz_places, moz_historyvisits WHERE visit_count > 0 AND moz_places.id==moz_historyvisits.place_id;")
print('\n[*] -- Found History --')
for row in c:
url = str(row[0])
date = str(row[1])
print('[+] ' + date + ' - Visited: ' + url)
except Exception as e:
if 'encrypted' in str(e):
print('\n[*] Error reading your places database.')
print('[*] Upgrade your Python-Sqlite3 Library')
exit(0)
# 解析打印places.sqlite文件的内容,输出百度的搜索记录
def printBaidu(placesDB):
conn = sqlite3.connect(placesDB)
c = conn.cursor()
c.execute( "SELECT url, datetime(visit_date/1000000, 'unixepoch') FROM moz_places, moz_historyvisits WHERE visit_count > 0 AND moz_places.id==moz_historyvisits.place_id;")
print('\n[*] -- Found Baidu --')
for row in c:
url = str(row[0])
date = str(row[1])
if 'baidu' in url.lower():
r = re.findall(r'wd=.*?\&', url)
if r:
search = r[0].split('&')[0]
search = search.replace('wd=', '').replace('+', ' ')
print('[+] ' + date + ' - Searched For: ' + search)
def main():
parser = optparse.OptionParser("[*]Usage: firefoxParse.py -p <firefox profile path> ")
#C:\Users\用户名\AppData\Roaming\Mozilla\Firefox\Profiles\e28nsous.default,SQLite缓存的地址
parser.add_option('-p', dest='pathName', type='string', help='specify skype profile path')
(options, args) = parser.parse_args()
pathName = options.pathName
if pathName == None:
print(parser.usage)
exit(0)
elif os.path.isdir(pathName) == False:
print('[!] Path Does Not Exist: ' + pathName)
exit(0)
else:
downloadDB = os.path.join(pathName, 'downloads.sqlite')
if os.path.isfile(downloadDB):
printDownloads(downloadDB)
else:
print('[!] Downloads Db does not exist: ' + downloadDB)
cookiesDB = os.path.join(pathName, 'cookies.sqlite')
if os.path.isfile(cookiesDB):
pass
printCookies(cookiesDB)
else:
print('[!] Cookies Db does not exist:' + cookiesDB)
placesDB = os.path.join(pathName, 'places.sqlite')
if os.path.isfile(placesDB):
printHistory(placesDB)
printBaidu(placesDB)
else:
print('[!] PlacesDb does not exist: ' + placesDB)
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
086f919dc5d77d92ce256911cf93cd83d411d684 | e5f194129752f3f89eed53478416d2c92cde0259 | /.cache/Microsoft/Python Language Server/stubs.v4/PW5N1gWcYNUaFmNEjFpBbn4_TkxeV53eiQaZBrpg6xw=/python3.pyi | 8befe0f027be53bb4d55f3d4c9c1399a04b4cd3d | [] | no_license | stepin-s/st | 1677fc25cb42c36afd76d2e3a48a1c0a5daf1b93 | b4cf346a446d57210197ee7f6f809cbc0a5b8799 | refs/heads/master | 2023-07-27T17:37:39.268414 | 2021-05-25T12:08:10 | 2021-05-25T12:08:10 | 405,090,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230,782 | pyi | class NotImplementedType(object):
__class__ = NotImplementedType
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __reduce__(self):
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class object:
'The base class of the class hierarchy.\n\nWhen called, it accepts no arguments and returns a new featureless\ninstance that has no instance attributes and cannot be given any.\n'
__class__ = object
def __delattr__(self, name):
'Implement delattr(self, name).'
return None
def __dir__(self):
'Default dir() implementation.'
return ['']
def __eq__(self, value):
'Return self==value.'
return False
def __format__(self, format_spec):
'Default object formatter.'
return ''
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __init__(self):
'The base class of the class hierarchy.\n\nWhen called, it accepts no arguments and returns a new featureless\ninstance that has no instance attributes and cannot be given any.\n'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __le__(self, value):
'Return self<=value.'
return False
def __lt__(self, value):
'Return self<value.'
return False
def __ne__(self, value):
'Return self!=value.'
return False
def __reduce__(self):
'Helper for pickle.'
return ''; return ()
def __reduce_ex__(self, protocol):
'Helper for pickle.'
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
def __setattr__(self, name, value):
'Implement setattr(self, name, value).'
return None
def __sizeof__(self):
'Size of object in memory, in bytes.'
return 0
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__Object__ = object
class type(object):
"type(object_or_name, bases, dict)\ntype(object) -> the object's type\ntype(name, bases, dict) -> a new type"
__base__ = object
__bases__ = ()
__basicsize__ = 880
def __call__(self, *args, **kwargs):
'Call self as a function.'
return cls()
__class__ = type
def __delattr__(self, name):
'Implement delattr(self, name).'
return None
__dict__ = {}
__dictoffset__ = 264
def __dir__(self):
'Specialized __dir__ implementation for types.'
return ['']
__flags__ = 2148291584
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, object_or_name, bases, dict):
"type(object_or_name, bases, dict)\ntype(object) -> the object's type\ntype(name, bases, dict) -> a new type"
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __instancecheck__(self, instance):
'Check if an object is an instance.'
return False
__itemsize__ = 40
__mro__ = ()
__name__ = 'type'
@classmethod
def __prepare__(cls, name, bases, **kwds):
'__prepare__() -> dict\nused to create the namespace for the class statement'
return None
__qualname__ = 'type'
def __repr__(self):
'Return repr(self).'
return ''
def __setattr__(self, name, value):
'Implement setattr(self, name, value).'
return None
def __sizeof__(self):
'Return memory consumption of the type object.'
return 0
def __subclasscheck__(self, subclass):
'Check if a class is a subclass.'
return False
def __subclasses__(self):
'Return a list of immediate subclasses.'
return (cls,)
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__text_signature__ = None
__weakrefoffset__ = 368
def mro(self):
"Return a type's method resolution order."
return [__Type__()]
__Type__ = type
class int(object):
"int([x]) -> integer\nint(x, base=10) -> integer\n\nConvert a number or string to an integer, or return 0 if no arguments\nare given. If x is a number, return x.__int__(). For floating point\nnumbers, this truncates towards zero.\n\nIf x is not a number or if base is given, then x must be a string,\nbytes, or bytearray instance representing an integer literal in the\ngiven base. The literal can be preceded by '+' or '-' and be surrounded\nby whitespace. The base defaults to 10. Valid bases are 0 and 2-36.\nBase 0 means to interpret the base from the string as an integer literal.\n>>> int('0b100', base=0)\n4"
def __abs__(self):
'abs(self)'
return int()
def __add__(self, value):
'Return self+value.'
return int()
def __and__(self, value):
'Return self&value.'
return int()
def __bool__(self):
'self != 0'
return False
def __ceil__(self):
'Ceiling of an Integral returns itself.'
return int()
__class__ = int
def __divmod__(self, value):
'Return divmod(self, value).'
return (0, 0)
def __eq__(self, value):
'Return self==value.'
return False
def __float__(self):
'float(self)'
return 0.0
def __floor__(self):
'Flooring an Integral returns itself.'
return int()
def __floordiv__(self, value):
'Return self//value.'
return 0
def __format__(self, format_spec):
return ''
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getnewargs__(self):
return ()
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __index__(self):
'Return self converted to an integer, if self is suitable for use as an index into a list.'
return 0
def __init__(self, x, base=10):
"int([x]) -> integer\nint(x, base=10) -> integer\n\nConvert a number or string to an integer, or return 0 if no arguments\nare given. If x is a number, return x.__int__(). For floating point\nnumbers, this truncates towards zero.\n\nIf x is not a number or if base is given, then x must be a string,\nbytes, or bytearray instance representing an integer literal in the\ngiven base. The literal can be preceded by '+' or '-' and be surrounded\nby whitespace. The base defaults to 10. Valid bases are 0 and 2-36.\nBase 0 means to interpret the base from the string as an integer literal.\n>>> int('0b100', base=0)\n4"
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __int__(self):
'int(self)'
return 0
def __invert__(self):
'~self'
return int()
def __le__(self, value):
'Return self<=value.'
return False
def __lshift__(self, value):
'Return self<<value.'
return int()
def __lt__(self, value):
'Return self<value.'
return False
def __mod__(self, value):
'Return self%value.'
return int()
def __mul__(self, value):
'Return self*value.'
return int()
def __ne__(self, value):
'Return self!=value.'
return False
def __neg__(self):
'-self'
return int()
def __or__(self, value):
'Return self|value.'
return int()
def __pos__(self):
'+self'
return int()
def __pow__(self, value, mod):
'Return pow(self, value, mod).'
return int()
def __radd__(self, value):
'Return value+self.'
return int()
def __rand__(self, value):
'Return value&self.'
return int()
def __rdivmod__(self, value):
'Return divmod(value, self).'
return (0, 0)
def __repr__(self):
'Return repr(self).'
return ''
def __rfloordiv__(self, value):
'Return value//self.'
return int()
def __rlshift__(self, value):
'Return value<<self.'
return int()
def __rmod__(self, value):
'Return value%self.'
return int()
def __rmul__(self, value):
'Return value*self.'
return int()
def __ror__(self, value):
'Return value|self.'
return int()
def __round__(self, ndigits=0):
'Rounding an Integral returns itself.\nRounding with an ndigits argument also returns an integer.'
return int()
def __rpow__(self, value, mod):
'Return pow(value, self, mod).'
return int()
def __rrshift__(self, value):
'Return value>>self.'
return int()
def __rshift__(self, value):
'Return self>>value.'
return int()
def __rsub__(self, value):
'Return value-self.'
return int()
def __rtruediv__(self, value):
'Return value/self.'
return int()
def __rxor__(self, value):
'Return value^self.'
return int()
def __sizeof__(self):
'Returns size in memory, in bytes.'
return 0
def __sub__(self, value):
'Return self-value.'
return int()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def __truediv__(self, value):
'Return self/value.'
return __Float__()
def __trunc__(self):
'Truncating an Integral returns itself.'
return int()
def __xor__(self, value):
'Return self^value.'
return int()
def as_integer_ratio(self):
'Return integer ratio.\n\nReturn a pair of integers, whose ratio is exactly equal to the original int\nand with a positive denominator.\n\n>>> (10).as_integer_ratio()\n(10, 1)\n>>> (-10).as_integer_ratio()\n(-10, 1)\n>>> (0).as_integer_ratio()\n(0, 1)'
pass
def bit_length(self):
"Number of bits necessary to represent self in binary.\n\n>>> bin(37)\n'0b100101'\n>>> (37).bit_length()\n6"
return 0
def conjugate(self):
'Returns self, the complex conjugate of any int.'
return __Complex__()
@property
def denominator(self):
'the denominator of a rational number in lowest terms'
pass
@classmethod
def from_bytes(cls, type, bytes, byteorder):
"Return the integer represented by the given array of bytes.\n\n bytes\n Holds the array of bytes to convert. The argument must either\n support the buffer protocol or be an iterable object producing bytes.\n Bytes and bytearray are examples of built-in objects that support the\n buffer protocol.\n byteorder\n The byte order used to represent the integer. If byteorder is 'big',\n the most significant byte is at the beginning of the byte array. If\n byteorder is 'little', the most significant byte is at the end of the\n byte array. To request the native byte order of the host system, use\n `sys.byteorder' as the byte order value.\n signed\n Indicates whether two's complement is used to represent the integer."
return 0
@property
def imag(self):
'the imaginary part of a complex number'
pass
@property
def numerator(self):
'the numerator of a rational number in lowest terms'
pass
@property
def real(self):
'the real part of a complex number'
pass
def to_bytes(self, length, byteorder):
"Return an array of bytes representing an integer.\n\n length\n Length of bytes object to use. An OverflowError is raised if the\n integer is not representable with the given number of bytes.\n byteorder\n The byte order used to represent the integer. If byteorder is 'big',\n the most significant byte is at the beginning of the byte array. If\n byteorder is 'little', the most significant byte is at the end of the\n byte array. To request the native byte order of the host system, use\n `sys.byteorder' as the byte order value.\n signed\n Determines whether two's complement is used to represent the integer.\n If signed is False and a negative integer is given, an OverflowError\n is raised."
return b''
__Int__ = int
class bool(int):
'bool(x) -> bool\n\nReturns True when the argument x is true, False otherwise.\nThe builtins True and False are the only two instances of the class bool.\nThe class bool is a subclass of the class int, and cannot be subclassed.'
def __and__(self, value):
'Return self&value.'
return bool()
__class__ = bool
def __init__(self, x):
'bool(x) -> bool\n\nReturns True when the argument x is true, False otherwise.\nThe builtins True and False are the only two instances of the class bool.\nThe class bool is a subclass of the class int, and cannot be subclassed.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __or__(self, value):
'Return self|value.'
return bool()
def __rand__(self, value):
'Return value&self.'
return bool()
def __repr__(self):
'Return repr(self).'
return ''
def __ror__(self, value):
'Return value|self.'
return bool()
def __rxor__(self, value):
'Return value^self.'
return bool()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def __xor__(self, value):
'Return self^value.'
return bool()
@classmethod
def from_bytes(cls, type, bytes, byteorder):
"Return the integer represented by the given array of bytes.\n\n bytes\n Holds the array of bytes to convert. The argument must either\n support the buffer protocol or be an iterable object producing bytes.\n Bytes and bytearray are examples of built-in objects that support the\n buffer protocol.\n byteorder\n The byte order used to represent the integer. If byteorder is 'big',\n the most significant byte is at the beginning of the byte array. If\n byteorder is 'little', the most significant byte is at the end of the\n byte array. To request the native byte order of the host system, use\n `sys.byteorder' as the byte order value.\n signed\n Indicates whether two's complement is used to represent the integer."
return False
__Bool__ = bool
__Long__ = __Int__
class float(object):
'Convert a string or number to a floating point number, if possible.'
def __abs__(self):
'abs(self)'
return float()
def __add__(self, value):
'Return self+value.'
return float()
def __bool__(self):
'self != 0'
return False
__class__ = float
def __divmod__(self, value):
'Return divmod(self, value).'
return (0, 0)
def __eq__(self, value):
'Return self==value.'
return False
def __float__(self):
'float(self)'
return 0.0
def __floordiv__(self, value):
'Return self//value.'
return 0
def __format__(self, format_spec):
'Formats the float according to format_spec.'
return ''
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
@classmethod
def __getformat__(cls, type, typestr):
"You probably don't want to use this function.\n\n typestr\n Must be 'double' or 'float'.\n\nIt exists mainly to be used in Python's test suite.\n\nThis function returns whichever of 'unknown', 'IEEE, big-endian' or 'IEEE,\nlittle-endian' best describes the format of floating point numbers used by the\nC type named by typestr."
return ''
def __getnewargs__(self):
return ()
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __init__(self, *args, **kwargs):
'Convert a string or number to a floating point number, if possible.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __int__(self):
'int(self)'
return 0
def __le__(self, value):
'Return self<=value.'
return False
def __lt__(self, value):
'Return self<value.'
return False
def __mod__(self, value):
'Return self%value.'
return float()
def __mul__(self, value):
'Return self*value.'
return float()
def __ne__(self, value):
'Return self!=value.'
return False
def __neg__(self):
'-self'
return float()
def __pos__(self):
'+self'
return float()
def __pow__(self, value, mod):
'Return pow(self, value, mod).'
return float()
def __radd__(self, value):
'Return value+self.'
return float()
def __rdivmod__(self, value):
'Return divmod(value, self).'
return (0, 0)
def __repr__(self):
'Return repr(self).'
return ''
def __rfloordiv__(self, value):
'Return value//self.'
return float()
def __rmod__(self, value):
'Return value%self.'
return float()
def __rmul__(self, value):
'Return value*self.'
return float()
def __round__(self, ndigits):
'Return the Integral closest to x, rounding half toward even.\n\nWhen an argument is passed, work like built-in round(x, ndigits).'
return float()
def __rpow__(self, value, mod):
'Return pow(value, self, mod).'
return float()
def __rsub__(self, value):
'Return value-self.'
return float()
def __rtruediv__(self, value):
'Return value/self.'
return float()
@classmethod
def __set_format__(cls, type, typestr, fmt):
"You probably don't want to use this function.\n\n typestr\n Must be 'double' or 'float'.\n fmt\n Must be one of 'unknown', 'IEEE, big-endian' or 'IEEE, little-endian',\n and in addition can only be one of the latter two if it appears to\n match the underlying C reality.\n\nIt exists mainly to be used in Python's test suite.\n\nOverride the automatic determination of C-level floating point type.\nThis affects how floats are converted to and from binary strings."
pass
def __sub__(self, value):
'Return self-value.'
return float()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def __truediv__(self, value):
'Return self/value.'
return __Float__()
def __trunc__(self):
'Return the Integral closest to x between 0 and x.'
return float()
def as_integer_ratio(self):
'Return integer ratio.\n\nReturn a pair of integers, whose ratio is exactly equal to the original float\nand with a positive denominator.\n\nRaise OverflowError on infinities and a ValueError on NaNs.\n\n>>> (10.0).as_integer_ratio()\n(10, 1)\n>>> (0.0).as_integer_ratio()\n(0, 1)\n>>> (-.25).as_integer_ratio()\n(-1, 4)'
return (0, 0)
def conjugate(self):
'Return self, the complex conjugate of any float.'
return __Complex__()
@classmethod
def fromhex(cls, type, string):
"Create a floating-point number from a hexadecimal string.\n\n>>> float.fromhex('0x1.ffffp10')\n2047.984375\n>>> float.fromhex('-0x1p-1074')\n-5e-324"
return 0.0
def hex(self):
"Return a hexadecimal representation of a floating-point number.\n\n>>> (-0.1).hex()\n'-0x1.999999999999ap-4'\n>>> 3.14159.hex()\n'0x1.921f9f01b866ep+1'"
return ''
@property
def imag(self):
'the imaginary part of a complex number'
pass
def is_integer(self):
'Return True if the float is an integer.'
return False
@property
def real(self):
'the real part of a complex number'
pass
__Float__ = float
class complex(object):
'Create a complex number from a real part and an optional imaginary part.\n\nThis is equivalent to (real + imag*1j) where imag defaults to 0.'
def __abs__(self):
'abs(self)'
return complex()
def __add__(self, value):
'Return self+value.'
return complex()
def __bool__(self):
'self != 0'
return False
__class__ = complex
def __divmod__(self, value):
'Return divmod(self, value).'
return (0, 0)
def __eq__(self, value):
'Return self==value.'
return False
def __float__(self):
'float(self)'
return 0.0
def __floordiv__(self, value):
'Return self//value.'
return 0
def __format__(self, format_spec):
'complex.__format__() -> str\n\nConvert to a string according to format_spec.'
return ''
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getnewargs__(self):
return ()
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __init__(self, *args, **kwargs):
'Create a complex number from a real part and an optional imaginary part.\n\nThis is equivalent to (real + imag*1j) where imag defaults to 0.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __int__(self):
'int(self)'
return 0
def __le__(self, value):
'Return self<=value.'
return False
def __lt__(self, value):
'Return self<value.'
return False
def __mod__(self, value):
'Return self%value.'
return complex()
def __mul__(self, value):
'Return self*value.'
return complex()
def __ne__(self, value):
'Return self!=value.'
return False
def __neg__(self):
'-self'
return complex()
def __pos__(self):
'+self'
return complex()
def __pow__(self, value, mod):
'Return pow(self, value, mod).'
return complex()
def __radd__(self, value):
'Return value+self.'
return complex()
def __rdivmod__(self, value):
'Return divmod(value, self).'
return (0, 0)
def __repr__(self):
'Return repr(self).'
return ''
def __rfloordiv__(self, value):
'Return value//self.'
return complex()
def __rmod__(self, value):
'Return value%self.'
return complex()
def __rmul__(self, value):
'Return value*self.'
return complex()
def __rpow__(self, value, mod):
'Return pow(value, self, mod).'
return complex()
def __rsub__(self, value):
'Return value-self.'
return complex()
def __rtruediv__(self, value):
'Return value/self.'
return complex()
def __sub__(self, value):
'Return self-value.'
return complex()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def __truediv__(self, value):
'Return self/value.'
return __Float__()
def conjugate(self):
'complex.conjugate() -> complex\n\nReturn the complex conjugate of its argument. (3-4j).conjugate() == 3+4j.'
return __Complex__()
@property
def imag(self):
'the imaginary part of a complex number'
pass
@property
def real(self):
'the real part of a complex number'
pass
__Complex__ = complex
class tuple(object):
"Built-in immutable sequence.\n\nIf no argument is given, the constructor returns an empty tuple.\nIf iterable is specified the tuple is initialized from iterable's items.\n\nIf the argument is a tuple, the return value is the same object."
def __add__(self, value):
'Return self+value.'
return tuple()
__class__ = tuple
def __contains__(self, key):
'Return key in self.'
return False
def __eq__(self, value):
'Return self==value.'
return False
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getitem__(self, key):
'Return self[key].'
pass
def __getnewargs__(self):
return ()
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __init__(self, *args, **kwargs):
"Built-in immutable sequence.\n\nIf no argument is given, the constructor returns an empty tuple.\nIf iterable is specified the tuple is initialized from iterable's items.\n\nIf the argument is a tuple, the return value is the same object."
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return __TupleIterator__()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __mul__(self, value):
'Return self*value.'
return tuple()
def __ne__(self, value):
'Return self!=value.'
return False
def __repr__(self):
'Return repr(self).'
return ''
def __rmul__(self, value):
'Return value*self.'
return tuple()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def count(self, value):
'Return number of occurrences of value.'
return 0
def index(self, value, start, stop):
'Return first index of value.\n\nRaises ValueError if the value is not present.'
return 0
__Tuple__ = tuple
class list(object):
'Built-in mutable sequence.\n\nIf no argument is given, the constructor creates a new empty list.\nThe argument must be an iterable if specified.'
def __add__(self, value):
'Return self+value.'
return list()
__class__ = list
def __contains__(self, key):
'Return key in self.'
return False
def __delitem__(self, key):
'Delete self[key].'
return None
def __eq__(self, value):
'Return self==value.'
return False
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getitem__(self, index):
'x.__getitem__(y) <==> x[y]'
pass
def __gt__(self, value):
'Return self>value.'
return False
__hash__ = None
def __iadd__(self, value):
'Implement self+=value.'
return None
def __imul__(self, value):
'Implement self*=value.'
return None
def __init__(self, *args, **kwargs):
'Built-in mutable sequence.\n\nIf no argument is given, the constructor creates a new empty list.\nThe argument must be an iterable if specified.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return __ListIterator__()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __mul__(self, value):
'Return self*value.'
return list()
def __ne__(self, value):
'Return self!=value.'
return False
def __repr__(self):
'Return repr(self).'
return ''
def __reversed__(self):
'Return a reverse iterator over the list.'
return __ListIterator__()
def __rmul__(self, value):
'Return value*self.'
return list()
def __setitem__(self, key, value):
'Set self[key] to value.'
return None
def __sizeof__(self):
'Return the size of the list in memory, in bytes.'
return 0
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def append(self, object):
'Append object to the end of the list.'
return None
def clear(self):
'Remove all items from list.'
return None
def copy(self):
'Return a shallow copy of the list.'
return list()
def count(self, value):
'Return number of occurrences of value.'
return 0
def extend(self, iterable):
'Extend list by appending elements from the iterable.'
return None
def index(self, value, start, stop):
'Return first index of value.\n\nRaises ValueError if the value is not present.'
return 0
def insert(self, index, object):
'Insert object before index.'
return None
def pop(self, index):
'Remove and return item at index (default last).\n\nRaises IndexError if list is empty or index is out of range.'
return self[0]
def remove(self, value):
'Remove first occurrence of value.\n\nRaises ValueError if the value is not present.'
return None
def reverse(self):
'Reverse *IN PLACE*.'
return None
def sort(self):
'Sort the list in ascending order and return None.\n\nThe sort is in-place (i.e. the list itself is modified) and stable (i.e. the\norder of two equal elements is maintained).\n\nIf a key function is given, apply it once to each list item and sort them,\nascending or descending, according to their function values.\n\nThe reverse flag can be set to sort in descending order.'
return None
__List__ = list
class dict(object):
"dict() -> new empty dictionary\ndict(mapping) -> new dictionary initialized from a mapping object's\n (key, value) pairs\ndict(iterable) -> new dictionary initialized as if via:\n d = {}\n for k, v in iterable:\n d[k] = v\ndict(**kwargs) -> new dictionary initialized with the name=value pairs\n in the keyword argument list. For example: dict(one=1, two=2)"
__class__ = dict
def __contains__(self, key):
'True if the dictionary has the specified key, else False.'
return False
def __delitem__(self, key):
'Delete self[key].'
return None
def __eq__(self, value):
'Return self==value.'
return False
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getitem__(self, key):
'x.__getitem__(y) <==> x[y]'
pass
def __gt__(self, value):
'Return self>value.'
return False
__hash__ = None
def __init__(self, iterable):
"dict() -> new empty dictionary\ndict(mapping) -> new dictionary initialized from a mapping object's\n (key, value) pairs\ndict(iterable) -> new dictionary initialized as if via:\n d = {}\n for k, v in iterable:\n d[k] = v\ndict(**kwargs) -> new dictionary initialized with the name=value pairs\n in the keyword argument list. For example: dict(one=1, two=2)"
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return __DictKeys__()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __ne__(self, value):
'Return self!=value.'
return False
def __repr__(self):
'Return repr(self).'
return ''
def __reversed__(self):
'Return a reverse iterator over the dict keys.'
pass
def __setitem__(self, key, value):
'Set self[key] to value.'
return None
def __sizeof__(self):
'D.__sizeof__() -> size of D in memory, in bytes'
return 0
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def clear(self):
'D.clear() -> None. Remove all items from D.'
return None
def copy(self):
'D.copy() -> a shallow copy of D'
return dict()
@classmethod
def fromkeys(cls, type, iterable, value):
'Create a new dictionary with keys from iterable and values set to value.'
return {}
def get(self, key, default):
'Return the value for key if key is in the dictionary, else default.'
return self[0]
def items(self):
"D.items() -> a set-like object providing a view on D's items"
return __DictItems__()
def keys(self):
"D.keys() -> a set-like object providing a view on D's keys"
return __DictKeys__()
def pop(self, k, d=None):
'D.pop(k[,d]) -> v, remove specified key and return the corresponding value.\nIf key is not found, d is returned if given, otherwise KeyError is raised'
return self.keys()[0]
def popitem(self):
'Remove and return a (key, value) pair as a 2-tuple.\n\nPairs are returned in LIFO (last-in, first-out) order.\nRaises KeyError if the dict is empty.'
return self.items()[0]
def setdefault(self, key, default):
'Insert key with a value of default if key is not in the dictionary.\n\nReturn the value for key if key is in the dictionary, else default.'
return self[0]
def update(self, d):
'D.update([E, ]**F) -> None. Update D from dict/iterable E and F.\nIf E is present and has a .keys() method, then does: for k in E: D[k] = E[k]\nIf E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v\nIn either case, this is followed by: for k in F: D[k] = F[k]'
return None
def values(self):
"D.values() -> an object providing a view on D's values"
return __DictValues__()
__Dict__ = dict
class set(object):
'set() -> new empty set object\nset(iterable) -> new set object\n\nBuild an unordered collection of unique elements.'
def __and__(self, value):
'Return self&value.'
return set()
__class__ = set
def __contains__(self, value):
'x.__contains__(y) <==> y in x.'
return False
def __eq__(self, value):
'Return self==value.'
return False
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __gt__(self, value):
'Return self>value.'
return False
__hash__ = None
def __iand__(self, value):
'Return self&=value.'
return None
def __init__(self, iterable):
'set() -> new empty set object\nset(iterable) -> new set object\n\nBuild an unordered collection of unique elements.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __ior__(self, value):
'Return self|=value.'
return None
def __isub__(self, value):
'Return self-=value.'
return None
def __iter__(self):
'Implement iter(self).'
return __SetIterator__()
def __ixor__(self, value):
'Return self^=value.'
return None
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __ne__(self, value):
'Return self!=value.'
return False
def __or__(self, value):
'Return self|value.'
return set()
def __rand__(self, value):
'Return value&self.'
return set()
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
def __ror__(self, value):
'Return value|self.'
return set()
def __rsub__(self, value):
'Return value-self.'
return set()
def __rxor__(self, value):
'Return value^self.'
return set()
def __sizeof__(self):
'S.__sizeof__() -> size of S in memory, in bytes'
return 0
def __sub__(self, value):
'Return self-value.'
return set()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def __xor__(self, value):
'Return self^value.'
return set()
def add(self, value):
'Add an element to a set.\n\nThis has no effect if the element is already present.'
return None
def clear(self):
'Remove all elements from this set.'
return None
def copy(self):
'Return a shallow copy of a set.'
return set()
def difference(self, other):
'Return the difference of two or more sets as a new set.\n\n(i.e. all elements that are in this set but not the others.)'
return set()
def difference_update(self, *others):
'Remove all elements of another set from this set.'
return None
def discard(self, elem):
'Remove an element from a set if it is a member.\n\nIf the element is not a member, do nothing.'
return None
def intersection(self, other):
'Return the intersection of two sets as a new set.\n\n(i.e. all elements that are in both sets.)'
return set()
def intersection_update(self, *others):
'Update a set with the intersection of itself and another.'
return None
def isdisjoint(self, other):
'Return True if two sets have a null intersection.'
return False
def issubset(self, other):
'Report whether another set contains this set.'
return False
def issuperset(self, other):
'Report whether this set contains another set.'
return False
def pop(self):
'Remove and return an arbitrary set element.\nRaises KeyError if the set is empty.'
pass
def remove(self, elem):
'Remove an element from a set; it must be a member.\n\nIf the element is not a member, raise a KeyError.'
return None
def symmetric_difference(self, other):
'Return the symmetric difference of two sets as a new set.\n\n(i.e. all elements that are in exactly one of the sets.)'
return set()
def symmetric_difference_update(self, *others):
'Update a set with the symmetric difference of itself and another.'
return None
def union(self, *others):
'Return the union of sets as a new set.\n\n(i.e. all elements that are in either set.)'
return set()
def update(self, *others):
'Update a set with the union of itself and others.'
return None
__Set__ = set
class frozenset(object):
'frozenset() -> empty frozenset object\nfrozenset(iterable) -> frozenset object\n\nBuild an immutable unordered collection of unique elements.'
def __and__(self, value):
'Return self&value.'
return frozenset()
__class__ = frozenset
def __contains__(self, value):
'x.__contains__(y) <==> y in x.'
return False
def __eq__(self, value):
'Return self==value.'
return False
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __init__(self, iterable):
'frozenset() -> empty frozenset object\nfrozenset(iterable) -> frozenset object\n\nBuild an immutable unordered collection of unique elements.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return __SetIterator__()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __ne__(self, value):
'Return self!=value.'
return False
def __or__(self, value):
'Return self|value.'
return frozenset()
def __rand__(self, value):
'Return value&self.'
return frozenset()
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
def __ror__(self, value):
'Return value|self.'
return frozenset()
def __rsub__(self, value):
'Return value-self.'
return frozenset()
def __rxor__(self, value):
'Return value^self.'
return frozenset()
def __sizeof__(self):
'S.__sizeof__() -> size of S in memory, in bytes'
return 0
def __sub__(self, value):
'Return self-value.'
return frozenset()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def __xor__(self, value):
'Return self^value.'
return frozenset()
def copy(self):
'Return a shallow copy of a set.'
return frozenset()
def difference(self, other):
'Return the difference of two or more sets as a new set.\n\n(i.e. all elements that are in this set but not the others.)'
return frozenset()
def intersection(self, other):
'Return the intersection of two sets as a new set.\n\n(i.e. all elements that are in both sets.)'
return frozenset()
def isdisjoint(self, other):
'Return True if two sets have a null intersection.'
return False
def issubset(self, other):
'Report whether another set contains this set.'
return False
def issuperset(self, other):
'Report whether this set contains another set.'
return False
def symmetric_difference(self, other):
'Return the symmetric difference of two sets as a new set.\n\n(i.e. all elements that are in exactly one of the sets.)'
return frozenset()
def union(self, *others):
'Return the union of sets as a new set.\n\n(i.e. all elements that are in either set.)'
return frozenset()
__FrozenSet__ = frozenset
class bytes(object):
'bytes(iterable_of_ints) -> bytes\nbytes(string, encoding[, errors]) -> bytes\nbytes(bytes_or_buffer) -> immutable copy of bytes_or_buffer\nbytes(int) -> bytes object of size given by the parameter initialized with null bytes\nbytes() -> empty bytes object\n\nConstruct an immutable array of bytes from:\n - an iterable yielding integers in range(256)\n - a text string encoded using the specified encoding\n - any object implementing the buffer API.\n - an integer'
def __add__(self, value):
'Return self+value.'
return bytes()
__class__ = bytes
def __contains__(self, key):
'Return key in self.'
return False
def __eq__(self, value):
'Return self==value.'
return False
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getitem__(self, key):
'Return self[key].'
return bytes()
def __getnewargs__(self):
return ()
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __init__(self, string, encoding, errors=None):
'bytes(iterable_of_ints) -> bytes\nbytes(string, encoding[, errors]) -> bytes\nbytes(bytes_or_buffer) -> immutable copy of bytes_or_buffer\nbytes(int) -> bytes object of size given by the parameter initialized with null bytes\nbytes() -> empty bytes object\n\nConstruct an immutable array of bytes from:\n - an iterable yielding integers in range(256)\n - a text string encoded using the specified encoding\n - any object implementing the buffer API.\n - an integer'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return __BytesIterator__()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __mod__(self, value):
'Return self%value.'
return bytes()
def __mul__(self, value):
'Return self*value.'
return bytes()
def __ne__(self, value):
'Return self!=value.'
return False
def __repr__(self):
'Return repr(self).'
return ''
def __rmod__(self, value):
'Return value%self.'
return bytes()
def __rmul__(self, value):
'Return value*self.'
return bytes()
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def capitalize(self):
'B.capitalize() -> copy of B\n\nReturn a copy of B with only its first character capitalized (ASCII)\nand the rest lower-cased.'
return bytes()
def center(self, width, fillchar):
'Return a centered string of length width.\n\nPadding is done using the specified fill character.'
return bytes()
def count(self, sub, start=0, end=-1):
'B.count(sub[, start[, end]]) -> int\n\nReturn the number of non-overlapping occurrences of subsection sub in\nbytes B[start:end]. Optional arguments start and end are interpreted\nas in slice notation.'
return 0
def decode(self, encoding, errors):
"Decode the bytes using the codec registered for encoding.\n\n encoding\n The encoding with which to decode the bytes.\n errors\n The error handling scheme to use for the handling of decoding errors.\n The default is 'strict' meaning that decoding errors raise a\n UnicodeDecodeError. Other possible values are 'ignore' and 'replace'\n as well as any other name registered with codecs.register_error that\n can handle UnicodeDecodeErrors."
return ''
def endswith(self, suffix, start=0, end=-1):
'B.endswith(suffix[, start[, end]]) -> bool\n\nReturn True if B ends with the specified suffix, False otherwise.\nWith optional start, test B beginning at that position.\nWith optional end, stop comparing B at that position.\nsuffix can also be a tuple of bytes to try.'
return False
def expandtabs(self, tabsize):
'Return a copy where all tab characters are expanded using spaces.\n\nIf tabsize is not given, a tab size of 8 characters is assumed.'
return bytes()
def find(self, sub, start=0, end=-1):
'B.find(sub[, start[, end]]) -> int\n\nReturn the lowest index in B where subsection sub is found,\nsuch that sub is contained within B[start,end]. Optional\narguments start and end are interpreted as in slice notation.\n\nReturn -1 on failure.'
return 0
@classmethod
def fromhex(cls, type, string):
"Create a bytes object from a string of hexadecimal numbers.\n\nSpaces between two numbers are accepted.\nExample: bytes.fromhex('B9 01EF') -> b'\\\\xb9\\\\x01\\\\xef'."
return b''
def hex(self):
"Create a str of hexadecimal numbers from a bytes object.\n\n sep\n An optional single character or byte to separate hex bytes.\n bytes_per_sep\n How many bytes between separators. Positive values count from the\n right, negative values count from the left.\n\nExample:\n>>> value = b'\\xb9\\x01\\xef'\n>>> value.hex()\n'b901ef'\n>>> value.hex(':')\n'b9:01:ef'\n>>> value.hex(':', 2)\n'b9:01ef'\n>>> value.hex(':', -2)\n'b901:ef'"
return ''
def index(self, sub, start=0, end=-1):
'B.index(sub[, start[, end]]) -> int\n\nReturn the lowest index in B where subsection sub is found,\nsuch that sub is contained within B[start,end]. Optional\narguments start and end are interpreted as in slice notation.\n\nRaises ValueError when the subsection is not found.'
return 0
def isalnum(self):
'B.isalnum() -> bool\n\nReturn True if all characters in B are alphanumeric\nand there is at least one character in B, False otherwise.'
return False
def isalpha(self):
'B.isalpha() -> bool\n\nReturn True if all characters in B are alphabetic\nand there is at least one character in B, False otherwise.'
return False
def isascii(self):
'B.isascii() -> bool\n\nReturn True if B is empty or all characters in B are ASCII,\nFalse otherwise.'
return True
def isdigit(self):
'B.isdigit() -> bool\n\nReturn True if all characters in B are digits\nand there is at least one character in B, False otherwise.'
return False
def islower(self):
'B.islower() -> bool\n\nReturn True if all cased characters in B are lowercase and there is\nat least one cased character in B, False otherwise.'
return False
def isspace(self):
'B.isspace() -> bool\n\nReturn True if all characters in B are whitespace\nand there is at least one character in B, False otherwise.'
return False
def istitle(self):
'B.istitle() -> bool\n\nReturn True if B is a titlecased string and there is at least one\ncharacter in B, i.e. uppercase characters may only follow uncased\ncharacters and lowercase characters only cased ones. Return False\notherwise.'
return False
def isupper(self):
'B.isupper() -> bool\n\nReturn True if all cased characters in B are uppercase and there is\nat least one cased character in B, False otherwise.'
return False
def join(self, iterable_of_bytes):
"Concatenate any number of bytes objects.\n\nThe bytes whose method is called is inserted in between each pair.\n\nThe result is returned as a new bytes object.\n\nExample: b'.'.join([b'ab', b'pq', b'rs']) -> b'ab.pq.rs'."
return b''
def ljust(self, width, fillchar):
'Return a left-justified string of length width.\n\nPadding is done using the specified fill character.'
return bytes()
def lower(self):
'B.lower() -> copy of B\n\nReturn a copy of B with all ASCII characters converted to lowercase.'
return bytes()
def lstrip(self, bytes):
'Strip leading bytes contained in the argument.\n\nIf the argument is omitted or None, strip leading ASCII whitespace.'
return bytes()
@classmethod
def maketrans(cls, frm, to):
'Return a translation table useable for the bytes or bytearray translate method.\n\nThe returned table will be one where each byte in frm is mapped to the byte at\nthe same position in to.\n\nThe bytes objects frm and to must be of the same length.'
return b''
def partition(self, sep):
'Partition the bytes into three parts using the given separator.\n\nThis will search for the separator sep in the bytes. If the separator is found,\nreturns a 3-tuple containing the part before the separator, the separator\nitself, and the part after it.\n\nIf the separator is not found, returns a 3-tuple containing the original bytes\nobject and two empty bytes objects.'
return (bytes(), bytes(), bytes())
def replace(self, old, new, count):
'Return a copy with all occurrences of substring old replaced by new.\n\n count\n Maximum number of occurrences to replace.\n -1 (the default value) means replace all occurrences.\n\nIf the optional argument count is given, only the first count occurrences are\nreplaced.'
return bytes()
def rfind(self, sub, start=0, end=-1):
'B.rfind(sub[, start[, end]]) -> int\n\nReturn the highest index in B where subsection sub is found,\nsuch that sub is contained within B[start,end]. Optional\narguments start and end are interpreted as in slice notation.\n\nReturn -1 on failure.'
return 0
def rindex(self, sub, start=0, end=-1):
'B.rindex(sub[, start[, end]]) -> int\n\nReturn the highest index in B where subsection sub is found,\nsuch that sub is contained within B[start,end]. Optional\narguments start and end are interpreted as in slice notation.\n\nRaise ValueError when the subsection is not found.'
return 0
def rjust(self, width, fillchar):
'Return a right-justified string of length width.\n\nPadding is done using the specified fill character.'
return bytes()
def rpartition(self, sep):
'Partition the bytes into three parts using the given separator.\n\nThis will search for the separator sep in the bytes, starting at the end. If\nthe separator is found, returns a 3-tuple containing the part before the\nseparator, the separator itself, and the part after it.\n\nIf the separator is not found, returns a 3-tuple containing two empty bytes\nobjects and the original bytes object.'
return (bytes(), bytes(), bytes())
def rsplit(self, sep, maxsplit):
'Return a list of the sections in the bytes, using sep as the delimiter.\n\n sep\n The delimiter according which to split the bytes.\n None (the default value) means split on ASCII whitespace characters\n (space, tab, return, newline, formfeed, vertical tab).\n maxsplit\n Maximum number of splits to do.\n -1 (the default value) means no limit.\n\nSplitting is done starting at the end of the bytes and working to the front.'
return [bytes()]
def rstrip(self, bytes):
'Strip trailing bytes contained in the argument.\n\nIf the argument is omitted or None, strip trailing ASCII whitespace.'
return bytes()
def split(self, sep, maxsplit):
'Return a list of the sections in the bytes, using sep as the delimiter.\n\n sep\n The delimiter according which to split the bytes.\n None (the default value) means split on ASCII whitespace characters\n (space, tab, return, newline, formfeed, vertical tab).\n maxsplit\n Maximum number of splits to do.\n -1 (the default value) means no limit.'
return [bytes()]
def splitlines(self, keepends):
'Return a list of the lines in the bytes, breaking at line boundaries.\n\nLine breaks are not included in the resulting list unless keepends is given and\ntrue.'
return [self()]
def startswith(self, prefix, start=0, end=-1):
'B.startswith(prefix[, start[, end]]) -> bool\n\nReturn True if B starts with the specified prefix, False otherwise.\nWith optional start, test B beginning at that position.\nWith optional end, stop comparing B at that position.\nprefix can also be a tuple of bytes to try.'
return False
def strip(self, bytes):
'Strip leading and trailing bytes contained in the argument.\n\nIf the argument is omitted or None, strip leading and trailing ASCII whitespace.'
return bytes()
def swapcase(self):
'B.swapcase() -> copy of B\n\nReturn a copy of B with uppercase ASCII characters converted\nto lowercase ASCII and vice versa.'
return bytes()
def title(self):
'B.title() -> copy of B\n\nReturn a titlecased version of B, i.e. ASCII words start with uppercase\ncharacters, all remaining cased characters have lowercase.'
return bytes()
def translate(self, table, delete):
'Return a copy with each character mapped by the given translation table.\n\n table\n Translation table, which must be a bytes object of length 256.\n\nAll characters occurring in the optional argument delete are removed.\nThe remaining characters are mapped through the given translation table.'
return bytes()
def upper(self):
'B.upper() -> copy of B\n\nReturn a copy of B with all ASCII characters converted to uppercase.'
return bytes()
def zfill(self, width):
'Pad a numeric string with zeros on the left, to fill a field of the given width.\n\nThe original string is never truncated.'
return bytes()
__Bytes__ = bytes
class bytes_iterator(object):
__class__ = bytes_iterator
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return bytes_iterator()
def __length_hint__(self):
'Private method returning an estimate of len(list(it)).'
return 0
def __next__(self):
'Implement next(self).'
return 0
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
def __setstate__(self, state):
'Set state information for unpickling.'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__BytesIterator__ = bytes_iterator
class str(object):
"str(object='') -> str\nstr(bytes_or_buffer[, encoding[, errors]]) -> str\n\nCreate a new string object from the given object. If encoding or\nerrors is specified, then the object must expose a data buffer\nthat will be decoded using the given encoding and error handler.\nOtherwise, returns the result of object.__str__() (if defined)\nor repr(object).\nencoding defaults to sys.getdefaultencoding().\nerrors defaults to 'strict'."
def __add__(self, value):
'Return self+value.'
return str()
__class__ = str
def __contains__(self, key):
'Return key in self.'
return False
def __eq__(self, value):
'Return self==value.'
return False
def __format__(self, format_spec):
'Return a formatted version of the string as described by format_spec.'
return ''
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getitem__(self, key):
'Return self[key].'
return str()
def __getnewargs__(self):
return ()
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __init__(self, bytes_or_buffer, encoding=None, errors=None):
"str(object='') -> str\nstr(bytes_or_buffer[, encoding[, errors]]) -> str\n\nCreate a new string object from the given object. If encoding or\nerrors is specified, then the object must expose a data buffer\nthat will be decoded using the given encoding and error handler.\nOtherwise, returns the result of object.__str__() (if defined)\nor repr(object).\nencoding defaults to sys.getdefaultencoding().\nerrors defaults to 'strict'."
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return __UnicodeIterator__()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __mod__(self, value):
'Return self%value.'
return str()
def __mul__(self, value):
'Return self*value.'
return str()
def __ne__(self, value):
'Return self!=value.'
return False
def __repr__(self):
'Return repr(self).'
return ''
def __rmod__(self, value):
'Return value%self.'
return str()
def __rmul__(self, value):
'Return value*self.'
return str()
def __sizeof__(self):
'Return the size of the string in memory, in bytes.'
return 0
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def capitalize(self):
'Return a capitalized version of the string.\n\nMore specifically, make the first character have upper case and the rest lower\ncase.'
return str()
def casefold(self):
'Return a version of the string suitable for caseless comparisons.'
return str()
def center(self, width, fillchar):
'Return a centered string of length width.\n\nPadding is done using the specified fill character (default is a space).'
return str()
def count(self, sub, start=0, end=-1):
'S.count(sub[, start[, end]]) -> int\n\nReturn the number of non-overlapping occurrences of substring sub in\nstring S[start:end]. Optional arguments start and end are\ninterpreted as in slice notation.'
return 0
def encode(self, encoding, errors):
"Encode the string using the codec registered for encoding.\n\n encoding\n The encoding in which to encode the string.\n errors\n The error handling scheme to use for encoding errors.\n The default is 'strict' meaning that encoding errors raise a\n UnicodeEncodeError. Other possible values are 'ignore', 'replace' and\n 'xmlcharrefreplace' as well as any other name registered with\n codecs.register_error that can handle UnicodeEncodeErrors."
return b''
def endswith(self, suffix, start=0, end=-1):
'S.endswith(suffix[, start[, end]]) -> bool\n\nReturn True if S ends with the specified suffix, False otherwise.\nWith optional start, test S beginning at that position.\nWith optional end, stop comparing S at that position.\nsuffix can also be a tuple of strings to try.'
return False
def expandtabs(self, tabsize):
'Return a copy where all tab characters are expanded using spaces.\n\nIf tabsize is not given, a tab size of 8 characters is assumed.'
return str()
def find(self, sub, start=0, end=-1):
'S.find(sub[, start[, end]]) -> int\n\nReturn the lowest index in S where substring sub is found,\nsuch that sub is contained within S[start:end]. Optional\narguments start and end are interpreted as in slice notation.\n\nReturn -1 on failure.'
return 0
def format(self, *args, **kwargs):
"S.format(*args, **kwargs) -> str\n\nReturn a formatted version of S, using substitutions from args and kwargs.\nThe substitutions are identified by braces ('{' and '}')."
return str()
def format_map(self, mapping):
"S.format_map(mapping) -> str\n\nReturn a formatted version of S, using substitutions from mapping.\nThe substitutions are identified by braces ('{' and '}')."
return str()
def index(self, sub, start=0, end=-1):
'S.index(sub[, start[, end]]) -> int\n\nReturn the lowest index in S where substring sub is found,\nsuch that sub is contained within S[start:end]. Optional\narguments start and end are interpreted as in slice notation.\n\nRaises ValueError when the substring is not found.'
return 0
def isalnum(self):
'Return True if the string is an alpha-numeric string, False otherwise.\n\nA string is alpha-numeric if all characters in the string are alpha-numeric and\nthere is at least one character in the string.'
return False
def isalpha(self):
'Return True if the string is an alphabetic string, False otherwise.\n\nA string is alphabetic if all characters in the string are alphabetic and there\nis at least one character in the string.'
return False
def isascii(self):
'Return True if all characters in the string are ASCII, False otherwise.\n\nASCII characters have code points in the range U+0000-U+007F.\nEmpty string is ASCII too.'
pass
def isdecimal(self):
'Return True if the string is a decimal string, False otherwise.\n\nA string is a decimal string if all characters in the string are decimal and\nthere is at least one character in the string.'
return False
def isdigit(self):
'Return True if the string is a digit string, False otherwise.\n\nA string is a digit string if all characters in the string are digits and there\nis at least one character in the string.'
return False
def isidentifier(self):
'Return True if the string is a valid Python identifier, False otherwise.\n\nCall keyword.iskeyword(s) to test whether string s is a reserved identifier,\nsuch as "def" or "class".'
return False
def islower(self):
'Return True if the string is a lowercase string, False otherwise.\n\nA string is lowercase if all cased characters in the string are lowercase and\nthere is at least one cased character in the string.'
return False
def isnumeric(self):
'Return True if the string is a numeric string, False otherwise.\n\nA string is numeric if all characters in the string are numeric and there is at\nleast one character in the string.'
return False
def isprintable(self):
'Return True if the string is printable, False otherwise.\n\nA string is printable if all of its characters are considered printable in\nrepr() or if it is empty.'
return False
def isspace(self):
'Return True if the string is a whitespace string, False otherwise.\n\nA string is whitespace if all characters in the string are whitespace and there\nis at least one character in the string.'
return False
def istitle(self):
'Return True if the string is a title-cased string, False otherwise.\n\nIn a title-cased string, upper- and title-case characters may only\nfollow uncased characters and lowercase characters only cased ones.'
return False
def isupper(self):
'Return True if the string is an uppercase string, False otherwise.\n\nA string is uppercase if all cased characters in the string are uppercase and\nthere is at least one cased character in the string.'
return False
def join(self, iterable):
"Concatenate any number of strings.\n\nThe string whose method is called is inserted in between each given string.\nThe result is returned as a new string.\n\nExample: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'"
return ''
def ljust(self, width, fillchar):
'Return a left-justified string of length width.\n\nPadding is done using the specified fill character (default is a space).'
return str()
def lower(self):
'Return a copy of the string converted to lowercase.'
return str()
def lstrip(self, chars):
'Return a copy of the string with leading whitespace removed.\n\nIf chars is given and not None, remove characters in chars instead.'
return str()
@classmethod
def maketrans(x, y, z):
'Return a translation table usable for str.translate().\n\nIf there is only one argument, it must be a dictionary mapping Unicode\nordinals (integers) or characters to Unicode ordinals, strings or None.\nCharacter keys will be then converted to ordinals.\nIf there are two arguments, they must be strings of equal length, and\nin the resulting dictionary, each character in x will be mapped to the\ncharacter at the same position in y. If there is a third argument, it\nmust be a string, whose characters will be mapped to None in the result.'
return {}
def partition(self, sep):
'Partition the string into three parts using the given separator.\n\nThis will search for the separator in the string. If the separator is found,\nreturns a 3-tuple containing the part before the separator, the separator\nitself, and the part after it.\n\nIf the separator is not found, returns a 3-tuple containing the original string\nand two empty strings.'
return (str(), str(), str())
def replace(self, old, new, count):
'Return a copy with all occurrences of substring old replaced by new.\n\n count\n Maximum number of occurrences to replace.\n -1 (the default value) means replace all occurrences.\n\nIf the optional argument count is given, only the first count occurrences are\nreplaced.'
return str()
def rfind(self, sub, start=0, end=-1):
'S.rfind(sub[, start[, end]]) -> int\n\nReturn the highest index in S where substring sub is found,\nsuch that sub is contained within S[start:end]. Optional\narguments start and end are interpreted as in slice notation.\n\nReturn -1 on failure.'
return 0
def rindex(self, sub, start=0, end=-1):
'S.rindex(sub[, start[, end]]) -> int\n\nReturn the highest index in S where substring sub is found,\nsuch that sub is contained within S[start:end]. Optional\narguments start and end are interpreted as in slice notation.\n\nRaises ValueError when the substring is not found.'
return 0
def rjust(self, width, fillchar):
'Return a right-justified string of length width.\n\nPadding is done using the specified fill character (default is a space).'
return str()
def rpartition(self, sep):
'Partition the string into three parts using the given separator.\n\nThis will search for the separator in the string, starting at the end. If\nthe separator is found, returns a 3-tuple containing the part before the\nseparator, the separator itself, and the part after it.\n\nIf the separator is not found, returns a 3-tuple containing two empty strings\nand the original string.'
return (str(), str(), str())
def rsplit(self, sep, maxsplit):
'Return a list of the words in the string, using sep as the delimiter string.\n\n sep\n The delimiter according which to split the string.\n None (the default value) means split according to any whitespace,\n and discard empty strings from the result.\n maxsplit\n Maximum number of splits to do.\n -1 (the default value) means no limit.\n\nSplits are done starting at the end of the string and working to the front.'
return [str()]
def rstrip(self, chars):
'Return a copy of the string with trailing whitespace removed.\n\nIf chars is given and not None, remove characters in chars instead.'
return str()
def split(self, sep, maxsplit):
'Return a list of the words in the string, using sep as the delimiter string.\n\n sep\n The delimiter according which to split the string.\n None (the default value) means split according to any whitespace,\n and discard empty strings from the result.\n maxsplit\n Maximum number of splits to do.\n -1 (the default value) means no limit.'
return [str()]
def splitlines(self, keepends):
'Return a list of the lines in the string, breaking at line boundaries.\n\nLine breaks are not included in the resulting list unless keepends is given and\ntrue.'
return [self()]
def startswith(self, prefix, start=0, end=-1):
'S.startswith(prefix[, start[, end]]) -> bool\n\nReturn True if S starts with the specified prefix, False otherwise.\nWith optional start, test S beginning at that position.\nWith optional end, stop comparing S at that position.\nprefix can also be a tuple of strings to try.'
return False
def strip(self, chars):
'Return a copy of the string with leading and trailing whitespace removed.\n\nIf chars is given and not None, remove characters in chars instead.'
return str()
def swapcase(self):
'Convert uppercase characters to lowercase and lowercase characters to uppercase.'
return str()
def title(self):
'Return a version of the string where each word is titlecased.\n\nMore specifically, words start with uppercased characters and all remaining\ncased characters have lower case.'
return str()
def translate(self, table):
'Replace each character in the string using the given translation table.\n\n table\n Translation table, which must be a mapping of Unicode ordinals to\n Unicode ordinals, strings, or None.\n\nThe table must implement lookup/indexing via __getitem__, for instance a\ndictionary or list. If this operation raises LookupError, the character is\nleft untouched. Characters mapped to None are deleted.'
return str()
def upper(self):
'Return a copy of the string converted to uppercase.'
return str()
def zfill(self, width):
'Pad a numeric string with zeros on the left, to fill a field of the given width.\n\nThe string is never truncated.'
return str()
__Unicode__ = str
class str_iterator(object):
__class__ = str_iterator
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return str_iterator()
def __length_hint__(self):
'Private method returning an estimate of len(list(it)).'
return 0
def __next__(self):
'Implement next(self).'
return __Unicode__()
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
def __setstate__(self, state):
'Set state information for unpickling.'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__UnicodeIterator__ = str_iterator
__Str__ = __Unicode__
__StrIterator__ = __UnicodeIterator__
class module(object):
'Create a module object.\n\nThe name must be a string; the optional doc argument can have any type.'
__class__ = module
def __delattr__(self, name):
'Implement delattr(self, name).'
return None
__dict__ = {}
def __dir__(self):
'__dir__() -> list\nspecialized dir() implementation'
return ['']
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
'Create a module object.\n\nThe name must be a string; the optional doc argument can have any type.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __repr__(self):
'Return repr(self).'
return ''
def __setattr__(self, name, value):
'Implement setattr(self, name, value).'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__Module__ = module
class function(object):
'Create a function object.\n\n code\n a code object\n globals\n the globals dictionary\n name\n a string that overrides the name from the code object\n argdefs\n a tuple that specifies the default argument values\n closure\n a tuple that supplies the bindings for free variables'
@property
def __annotations__(self):
return {}
def __call__(self, *args, **kwargs):
'Call self as a function.'
pass
__class__ = function
@property
def __closure__(self):
pass
@property
def __code__(self):
return object()
@property
def __defaults__(self):
pass
__dict__ = {}
def __get__(self, instance, owner):
'Return an attribute of instance, which is of type owner.'
return function()
@property
def __globals__(self):
return {}
def __init__(self, *args, **kwargs):
'Create a function object.\n\n code\n a code object\n globals\n the globals dictionary\n name\n a string that overrides the name from the code object\n argdefs\n a tuple that specifies the default argument values\n closure\n a tuple that supplies the bindings for free variables'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@property
def __kwdefaults__(self):
pass
__name__ = 'function'
__qualname__ = 'function'
def __repr__(self):
'Return repr(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__Function__ = function
class wrapper_descriptor(object):
def __call__(self, *args, **kwargs):
'Call self as a function.'
pass
__class__ = wrapper_descriptor
def __get__(self, instance, owner):
'Return an attribute of instance, which is of type owner.'
return wrapper_descriptor()
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
__name__ = 'wrapper_descriptor'
@property
def __objclass__(self):
pass
__qualname__ = 'wrapper_descriptor'
def __reduce__(self):
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__text_signature__ = None
__BuiltinMethodDescriptor__ = wrapper_descriptor
class builtin_function_or_method(object):
def __call__(self, *args, **kwargs):
'Call self as a function.'
pass
__class__ = builtin_function_or_method
def __eq__(self, value):
'Return self==value.'
return False
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __le__(self, value):
'Return self<=value.'
return False
def __lt__(self, value):
'Return self<value.'
return False
__name__ = 'builtin_function_or_method'
def __ne__(self, value):
'Return self!=value.'
return False
__qualname__ = 'builtin_function_or_method'
def __reduce__(self):
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
@property
def __self__(self):
pass
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__text_signature__ = None
__BuiltinFunction__ = builtin_function_or_method
class generator(object):
__class__ = generator
def __del__(self):
return None
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return generator()
__name__ = 'generator'
def __next__(self):
'Implement next(self).'
pass
__qualname__ = 'generator'
def __repr__(self):
'Return repr(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def close(self):
'close() -> raise GeneratorExit inside generator.'
return None
@property
def gi_code(self):
pass
@property
def gi_frame(self):
pass
@property
def gi_running(self):
pass
@property
def gi_yieldfrom(self):
'object being iterated by yield from, or None'
pass
def send(self, value):
"send(arg) -> send 'arg' into generator,\nreturn next yielded value or raise StopIteration."
return self.__next__()
def throw(self, type, value=None, traceback=None):
'throw(typ[,val[,tb]]) -> raise exception in generator,\nreturn next yielded value or raise StopIteration.'
return None
__Generator__ = generator
class property(object):
'Property attribute.\n\n fget\n function to be used for getting an attribute value\n fset\n function to be used for setting an attribute value\n fdel\n function to be used for del\'ing an attribute\n doc\n docstring\n\nTypical use is to define a managed attribute x:\n\nclass C(object):\n def getx(self): return self._x\n def setx(self, value): self._x = value\n def delx(self): del self._x\n x = property(getx, setx, delx, "I\'m the \'x\' property.")\n\nDecorators make defining new properties or modifying existing ones easy:\n\nclass C(object):\n @property\n def x(self):\n "I am the \'x\' property."\n return self._x\n @x.setter\n def x(self, value):\n self._x = value\n @x.deleter\n def x(self):\n del self._x'
__class__ = property
def __delete__(self, instance):
'Delete an attribute of instance.'
return None
def __get__(self, instance, owner):
'Return an attribute of instance, which is of type owner.'
return property()
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
'Property attribute.\n\n fget\n function to be used for getting an attribute value\n fset\n function to be used for setting an attribute value\n fdel\n function to be used for del\'ing an attribute\n doc\n docstring\n\nTypical use is to define a managed attribute x:\n\nclass C(object):\n def getx(self): return self._x\n def setx(self, value): self._x = value\n def delx(self): del self._x\n x = property(getx, setx, delx, "I\'m the \'x\' property.")\n\nDecorators make defining new properties or modifying existing ones easy:\n\nclass C(object):\n @property\n def x(self):\n "I am the \'x\' property."\n return self._x\n @x.setter\n def x(self, value):\n self._x = value\n @x.deleter\n def x(self):\n del self._x'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@property
def __isabstractmethod__(self):
pass
def __set__(self, instance, value):
'Set an attribute of instance to value.'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def deleter(self, func):
'Descriptor to change the deleter on a property.'
return func
@property
def fdel(self):
pass
@property
def fget(self):
pass
@property
def fset(self):
pass
def getter(self, func):
'Descriptor to change the getter on a property.'
return func
def setter(self, func):
'Descriptor to change the setter on a property.'
return func
__Property__ = property
class classmethod(object):
'classmethod(function) -> method\n\nConvert a function to be a class method.\n\nA class method receives the class as implicit first argument,\njust like an instance method receives the instance.\nTo declare a class method, use this idiom:\n\n class C:\n @classmethod\n def f(cls, arg1, arg2, ...):\n ...\n\nIt can be called either on the class (e.g. C.f()) or on an instance\n(e.g. C().f()). The instance is ignored except for its class.\nIf a class method is called for a derived class, the derived class\nobject is passed as the implied first argument.\n\nClass methods are different than C++ or Java static methods.\nIf you want those, see the staticmethod builtin.'
__class__ = classmethod
__dict__ = {}
@property
def __func__(self):
pass
def __get__(self, instance, owner):
'Return an attribute of instance, which is of type owner.'
return classmethod()
def __init__(self, function):
'classmethod(function) -> method\n\nConvert a function to be a class method.\n\nA class method receives the class as implicit first argument,\njust like an instance method receives the instance.\nTo declare a class method, use this idiom:\n\n class C:\n @classmethod\n def f(cls, arg1, arg2, ...):\n ...\n\nIt can be called either on the class (e.g. C.f()) or on an instance\n(e.g. C().f()). The instance is ignored except for its class.\nIf a class method is called for a derived class, the derived class\nobject is passed as the implied first argument.\n\nClass methods are different than C++ or Java static methods.\nIf you want those, see the staticmethod builtin.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@property
def __isabstractmethod__(self):
pass
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__ClassMethod__ = classmethod
class staticmethod(object):
'staticmethod(function) -> method\n\nConvert a function to be a static method.\n\nA static method does not receive an implicit first argument.\nTo declare a static method, use this idiom:\n\n class C:\n @staticmethod\n def f(arg1, arg2, ...):\n ...\n\nIt can be called either on the class (e.g. C.f()) or on an instance\n(e.g. C().f()). Both the class and the instance are ignored, and\nneither is passed implicitly as the first argument to the method.\n\nStatic methods in Python are similar to those found in Java or C++.\nFor a more advanced concept, see the classmethod builtin.'
__class__ = staticmethod
__dict__ = {}
@property
def __func__(self):
pass
def __get__(self, instance, owner):
'Return an attribute of instance, which is of type owner.'
return staticmethod()
def __init__(self, function):
'staticmethod(function) -> method\n\nConvert a function to be a static method.\n\nA static method does not receive an implicit first argument.\nTo declare a static method, use this idiom:\n\n class C:\n @staticmethod\n def f(arg1, arg2, ...):\n ...\n\nIt can be called either on the class (e.g. C.f()) or on an instance\n(e.g. C().f()). Both the class and the instance are ignored, and\nneither is passed implicitly as the first argument to the method.\n\nStatic methods in Python are similar to those found in Java or C++.\nFor a more advanced concept, see the classmethod builtin.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@property
def __isabstractmethod__(self):
pass
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__StaticMethod__ = staticmethod
class ellipsis(object):
__class__ = ellipsis
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __reduce__(self):
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__Ellipsis__ = ellipsis
class tuple_iterator(object):
__class__ = tuple_iterator
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return tuple_iterator()
def __length_hint__(self):
'Private method returning an estimate of len(list(it)).'
return 0
def __next__(self):
'Implement next(self).'
pass
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
def __setstate__(self, state):
'Set state information for unpickling.'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__TupleIterator__ = tuple_iterator
class list_iterator(object):
__class__ = list_iterator
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return list_iterator()
def __length_hint__(self):
'Private method returning an estimate of len(list(it)).'
return 0
def __next__(self):
'Implement next(self).'
pass
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
def __setstate__(self, state):
'Set state information for unpickling.'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__ListIterator__ = list_iterator
class dict_keys(object):
def __and__(self, value):
'Return self&value.'
return dict_keys()
__class__ = dict_keys
def __contains__(self, key):
'Return key in self.'
return False
def __eq__(self, value):
'Return self==value.'
return False
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __gt__(self, value):
'Return self>value.'
return False
__hash__ = None
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return dict_keys()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __ne__(self, value):
'Return self!=value.'
return False
def __or__(self, value):
'Return self|value.'
return dict_keys()
def __rand__(self, value):
'Return value&self.'
return dict_keys()
def __repr__(self):
'Return repr(self).'
return ''
def __reversed__(self):
'Return a reverse iterator over the dict keys.'
pass
def __ror__(self, value):
'Return value|self.'
return dict_keys()
def __rsub__(self, value):
'Return value-self.'
return dict_keys()
def __rxor__(self, value):
'Return value^self.'
return dict_keys()
def __sub__(self, value):
'Return self-value.'
return dict_keys()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def __xor__(self, value):
'Return self^value.'
return dict_keys()
def isdisjoint(self, other):
'Return True if the view and the given iterable have a null intersection.'
return False
__DictKeys__ = dict_keys
class dict_values(object):
__class__ = dict_values
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return dict_values()
def __len__(self):
'Return len(self).'
return 0
def __repr__(self):
'Return repr(self).'
return ''
def __reversed__(self):
'Return a reverse iterator over the dict values.'
pass
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__DictValues__ = dict_values
class dict_items(object):
def __and__(self, value):
'Return self&value.'
return dict_items()
__class__ = dict_items
def __contains__(self, key):
'Return key in self.'
return False
def __eq__(self, value):
'Return self==value.'
return False
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __gt__(self, value):
'Return self>value.'
return False
__hash__ = None
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return dict_items()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __ne__(self, value):
'Return self!=value.'
return False
def __or__(self, value):
'Return self|value.'
return dict_items()
def __rand__(self, value):
'Return value&self.'
return dict_items()
def __repr__(self):
'Return repr(self).'
return ''
def __reversed__(self):
'Return a reverse iterator over the dict items.'
pass
def __ror__(self, value):
'Return value|self.'
return dict_items()
def __rsub__(self, value):
'Return value-self.'
return dict_items()
def __rxor__(self, value):
'Return value^self.'
return dict_items()
def __sub__(self, value):
'Return self-value.'
return dict_items()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def __xor__(self, value):
'Return self^value.'
return dict_items()
def isdisjoint(self, other):
'Return True if the view and the given iterable have a null intersection.'
return False
__DictItems__ = dict_items
class set_iterator(object):
__class__ = set_iterator
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return set_iterator()
def __length_hint__(self):
'Private method returning an estimate of len(list(it)).'
return 0
def __next__(self):
'Implement next(self).'
pass
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__SetIterator__ = set_iterator
class callable_iterator(object):
__class__ = callable_iterator
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return callable_iterator()
def __next__(self):
'Implement next(self).'
pass
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
__CallableIterator__ = callable_iterator
__builtin_module_names__ = "_abc,_ast,_bisect,_blake2,_codecs,_collections,_csv,_datetime,_elementtree,_functools,_heapq,_imp,_io,_locale,_md5,_operator,_pickle,_posixsubprocess,_random,_sha1,_sha256,_sha3,_sha512,_signal,_socket,_sre,_stat,_statistics,_string,_struct,_symtable,_thread,_tracemalloc,_warnings,_weakref,array,atexit,binascii,builtins,cmath,errno,faulthandler,fcntl,gc,grp,itertools,marshal,math,posix,pwd,pyexpat,select,spwd,sys,syslog,time,unicodedata,xxsubtype,zlib"
class ArithmeticError(Exception):
'Base class for arithmetic errors.'
__class__ = ArithmeticError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for arithmetic errors.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class AssertionError(Exception):
'Assertion failed.'
__class__ = AssertionError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Assertion failed.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class AttributeError(Exception):
'Attribute not found.'
__class__ = AttributeError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Attribute not found.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class BaseException(object):
'Common base class for all exceptions'
@property
def __cause__(self):
'exception cause'
pass
__class__ = BaseException
@property
def __context__(self):
'exception context'
pass
def __delattr__(self, name):
'Implement delattr(self, name).'
return None
__dict__ = {}
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
'Common base class for all exceptions'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __reduce__(self):
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
def __setattr__(self, name, value):
'Implement setattr(self, name, value).'
return None
def __setstate__(self, state):
return None
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def __suppress_context__(self):
pass
@property
def __traceback__(self):
pass
@property
def args(self):
pass
def with_traceback(self):
'Exception.with_traceback(tb) --\n set self.__traceback__ to tb and return self.'
pass
class BlockingIOError(OSError):
'I/O operation would block.'
__class__ = BlockingIOError
__dict__ = {}
def __init__(self, *args, **kwargs):
'I/O operation would block.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class BrokenPipeError(ConnectionError):
'Broken pipe.'
__class__ = BrokenPipeError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Broken pipe.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class BufferError(Exception):
'Buffer error.'
__class__ = BufferError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Buffer error.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class BytesWarning(Warning):
'Base class for warnings about bytes and buffer related problems, mostly\nrelated to conversion from str or comparing to str.'
__class__ = BytesWarning
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for warnings about bytes and buffer related problems, mostly\nrelated to conversion from str or comparing to str.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class ChildProcessError(OSError):
'Child process error.'
__class__ = ChildProcessError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Child process error.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class ConnectionAbortedError(ConnectionError):
'Connection aborted.'
__class__ = ConnectionAbortedError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Connection aborted.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class ConnectionError(OSError):
'Connection error.'
__class__ = ConnectionError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Connection error.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class ConnectionRefusedError(ConnectionError):
'Connection refused.'
__class__ = ConnectionRefusedError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Connection refused.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class ConnectionResetError(ConnectionError):
'Connection reset.'
__class__ = ConnectionResetError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Connection reset.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class DeprecationWarning(Warning):
'Base class for warnings about deprecated features.'
__class__ = DeprecationWarning
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for warnings about deprecated features.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class EOFError(Exception):
'Read beyond end of file.'
__class__ = EOFError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Read beyond end of file.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
Ellipsis = ellipsis()
EnvironmentError = OSError
class Exception(BaseException):
'Common base class for all non-exit exceptions.'
__class__ = Exception
__dict__ = {}
def __init__(self, *args, **kwargs):
'Common base class for all non-exit exceptions.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class FileExistsError(OSError):
'File already exists.'
__class__ = FileExistsError
__dict__ = {}
def __init__(self, *args, **kwargs):
'File already exists.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class FileNotFoundError(OSError):
'File not found.'
__class__ = FileNotFoundError
__dict__ = {}
def __init__(self, *args, **kwargs):
'File not found.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class FloatingPointError(ArithmeticError):
'Floating point operation failed.'
__class__ = FloatingPointError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Floating point operation failed.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class FutureWarning(Warning):
'Base class for warnings about constructs that will change semantically\nin the future.'
__class__ = FutureWarning
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for warnings about constructs that will change semantically\nin the future.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class GeneratorExit(BaseException):
'Request that a generator exit.'
__class__ = GeneratorExit
__dict__ = {}
def __init__(self, *args, **kwargs):
'Request that a generator exit.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
IOError = OSError
class ImportError(Exception):
"Import can't find module, or can't find name in module."
__class__ = ImportError
__dict__ = {}
def __init__(self, *args, **kwargs):
"Import can't find module, or can't find name in module."
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __reduce__(self):
return ''; return ()
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def msg(self):
'exception message'
pass
@property
def name(self):
'module name'
pass
@property
def path(self):
'module path'
pass
class ImportWarning(Warning):
'Base class for warnings about probable mistakes in module imports'
__class__ = ImportWarning
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for warnings about probable mistakes in module imports'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class IndentationError(SyntaxError):
'Improper indentation.'
__class__ = IndentationError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Improper indentation.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class IndexError(LookupError):
'Sequence index out of range.'
__class__ = IndexError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Sequence index out of range.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class InterruptedError(OSError):
'Interrupted by signal.'
__class__ = InterruptedError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Interrupted by signal.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class IsADirectoryError(OSError):
"Operation doesn't work on directories."
__class__ = IsADirectoryError
__dict__ = {}
def __init__(self, *args, **kwargs):
"Operation doesn't work on directories."
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class KeyError(LookupError):
'Mapping key not found.'
__class__ = KeyError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Mapping key not found.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class KeyboardInterrupt(BaseException):
'Program interrupted by user.'
__class__ = KeyboardInterrupt
__dict__ = {}
def __init__(self, *args, **kwargs):
'Program interrupted by user.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class LookupError(Exception):
'Base class for lookup errors.'
__class__ = LookupError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for lookup errors.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class MemoryError(Exception):
'Out of memory.'
__class__ = MemoryError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Out of memory.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class ModuleNotFoundError(ImportError):
'Module not found.'
__class__ = ModuleNotFoundError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Module not found.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class NameError(Exception):
'Name not found globally.'
__class__ = NameError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Name not found globally.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class NotADirectoryError(OSError):
'Operation only works on directories.'
__class__ = NotADirectoryError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Operation only works on directories.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
NotImplemented = NotImplementedType()
class NotImplementedError(RuntimeError):
"Method or function hasn't been implemented yet."
__class__ = NotImplementedError
__dict__ = {}
def __init__(self, *args, **kwargs):
"Method or function hasn't been implemented yet."
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class OSError(Exception):
'Base class for I/O related errors.'
__class__ = OSError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for I/O related errors.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __reduce__(self):
return ''; return ()
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def characters_written(self):
pass
@property
def errno(self):
'POSIX exception code'
pass
@property
def filename(self):
'exception filename'
pass
@property
def filename2(self):
'second exception filename'
pass
@property
def strerror(self):
'exception strerror'
pass
class OverflowError(ArithmeticError):
'Result too large to be represented.'
__class__ = OverflowError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Result too large to be represented.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class PendingDeprecationWarning(Warning):
'Base class for warnings about features which will be deprecated\nin the future.'
__class__ = PendingDeprecationWarning
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for warnings about features which will be deprecated\nin the future.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class PermissionError(OSError):
'Not enough permissions.'
__class__ = PermissionError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Not enough permissions.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class ProcessLookupError(OSError):
'Process not found.'
__class__ = ProcessLookupError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Process not found.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class RecursionError(RuntimeError):
'Recursion limit exceeded.'
__class__ = RecursionError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Recursion limit exceeded.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class ReferenceError(Exception):
'Weak ref proxy used after referent went away.'
__class__ = ReferenceError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Weak ref proxy used after referent went away.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class ResourceWarning(Warning):
'Base class for warnings about resource usage.'
__class__ = ResourceWarning
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for warnings about resource usage.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class RuntimeError(Exception):
'Unspecified run-time error.'
__class__ = RuntimeError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Unspecified run-time error.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class RuntimeWarning(Warning):
'Base class for warnings about dubious runtime behavior.'
__class__ = RuntimeWarning
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for warnings about dubious runtime behavior.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class StopAsyncIteration(Exception):
'Signal the end from iterator.__anext__().'
__class__ = StopAsyncIteration
__dict__ = {}
def __init__(self):
'Signal the end from iterator.__anext__().'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class StopIteration(Exception):
'Signal the end from iterator.__next__().'
__class__ = StopIteration
__dict__ = {}
def __init__(self):
'Signal the end from iterator.__next__().'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def value(self):
'generator return value'
pass
class SyntaxError(Exception):
'Invalid syntax.'
__class__ = SyntaxError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Invalid syntax.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def filename(self):
'exception filename'
pass
@property
def lineno(self):
'exception lineno'
pass
@property
def msg(self):
'exception msg'
pass
@property
def offset(self):
'exception offset'
pass
@property
def print_file_and_line(self):
'exception print_file_and_line'
pass
@property
def text(self):
'exception text'
pass
class SyntaxWarning(Warning):
'Base class for warnings about dubious syntax.'
__class__ = SyntaxWarning
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for warnings about dubious syntax.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class SystemError(Exception):
'Internal error in the Python interpreter.\n\nPlease report this to the Python maintainer, along with the traceback,\nthe Python version, and the hardware/OS platform and version.'
__class__ = SystemError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Internal error in the Python interpreter.\n\nPlease report this to the Python maintainer, along with the traceback,\nthe Python version, and the hardware/OS platform and version.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class SystemExit(BaseException):
'Request to exit from the interpreter.'
__class__ = SystemExit
__dict__ = {}
def __init__(self, *args, **kwargs):
'Request to exit from the interpreter.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def code(self):
'exception code'
pass
class TabError(IndentationError):
'Improper mixture of spaces and tabs.'
__class__ = TabError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Improper mixture of spaces and tabs.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class TimeoutError(OSError):
'Timeout expired.'
__class__ = TimeoutError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Timeout expired.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class TypeError(Exception):
'Inappropriate argument type.'
__class__ = TypeError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Inappropriate argument type.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class UnboundLocalError(NameError):
'Local name referenced but not bound to a value.'
__class__ = UnboundLocalError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Local name referenced but not bound to a value.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class UnicodeDecodeError(UnicodeError):
'Unicode decoding error.'
__class__ = UnicodeDecodeError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Unicode decoding error.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def encoding(self):
'exception encoding'
pass
@property
def end(self):
'exception end'
pass
@property
def object(self):
'exception object'
pass
@property
def reason(self):
'exception reason'
pass
@property
def start(self):
'exception start'
pass
class UnicodeEncodeError(UnicodeError):
'Unicode encoding error.'
__class__ = UnicodeEncodeError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Unicode encoding error.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def encoding(self):
'exception encoding'
pass
@property
def end(self):
'exception end'
pass
@property
def object(self):
'exception object'
pass
@property
def reason(self):
'exception reason'
pass
@property
def start(self):
'exception start'
pass
class UnicodeError(ValueError):
'Unicode related error.'
__class__ = UnicodeError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Unicode related error.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class UnicodeTranslateError(UnicodeError):
'Unicode translation error.'
__class__ = UnicodeTranslateError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Unicode translation error.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def encoding(self):
'exception encoding'
pass
@property
def end(self):
'exception end'
pass
@property
def object(self):
'exception object'
pass
@property
def reason(self):
'exception reason'
pass
@property
def start(self):
'exception start'
pass
class UnicodeWarning(Warning):
'Base class for warnings about Unicode related problems, mostly\nrelated to conversion problems.'
__class__ = UnicodeWarning
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for warnings about Unicode related problems, mostly\nrelated to conversion problems.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class UserWarning(Warning):
'Base class for warnings generated by user code.'
__class__ = UserWarning
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for warnings generated by user code.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class ValueError(Exception):
'Inappropriate argument value (of correct type).'
__class__ = ValueError
__dict__ = {}
def __init__(self, ofcorrecttype):
'Inappropriate argument value (of correct type).'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class Warning(Exception):
'Base class for warning categories.'
__class__ = Warning
__dict__ = {}
def __init__(self, *args, **kwargs):
'Base class for warning categories.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
class ZeroDivisionError(ArithmeticError):
'Second argument to a division or modulo operation was zero.'
__class__ = ZeroDivisionError
__dict__ = {}
def __init__(self, *args, **kwargs):
'Second argument to a division or modulo operation was zero.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def __build_class__(func, name, *bases, metaclass=None, **kwds):
'__build_class__(func, name, /, *bases, [metaclass], **kwds) -> class\n\nInternal helper function used by the class statement.'
pass
__doc__ = "Built-in functions, exceptions, and other objects.\n\nNoteworthy: None is the `nil' object; Ellipsis represents `...' in slices."
def __import__(name, globals=None, locals=None, fromlist=(), level=0):
"__import__(name, globals=None, locals=None, fromlist=(), level=0) -> module\n\nImport a module. Because this function is meant for use by the Python\ninterpreter and not for general use, it is better to use\nimportlib.import_module() to programmatically import a module.\n\nThe globals argument is only used to determine the context;\nthey are not modified. The locals argument is unused. The fromlist\nshould be a list of names to emulate ``from name import ...'', or an\nempty list to emulate ``import name''.\nWhen importing a module from a package, note that __import__('A.B', ...)\nreturns package A when fromlist is empty, but its submodule B when\nfromlist is not empty. The level argument is used to determine whether to\nperform absolute or relative imports: 0 is absolute, while a positive number\nis the number of parent directories to search relative to the current module."
pass
__name__ = 'builtins'
__package__ = ''
def abs(x):
'Return the absolute value of the argument.'
pass
def all(iterable):
'Return True if bool(x) is True for all values x in the iterable.\n\nIf the iterable is empty, return True.'
return False
def any(iterable):
'Return True if bool(x) is True for any x in the iterable.\n\nIf the iterable is empty, return False.'
return False
def ascii(obj):
'Return an ASCII-only representation of an object.\n\nAs repr(), return a string containing a printable representation of an\nobject, but escape the non-ASCII characters in the string returned by\nrepr() using \\\\x, \\\\u or \\\\U escapes. This generates a string similar\nto that returned by repr() in Python 2.'
return ''
def bin(number):
"Return the binary representation of an integer.\n\n >>> bin(2796202)\n '0b1010101010101010101010'"
return ''
def breakpoint(*args, **kws):
'breakpoint(*args, **kws)\n\nCall sys.breakpointhook(*args, **kws). sys.breakpointhook() must accept\nwhatever arguments are passed.\n\nBy default, this drops you into the pdb debugger.'
pass
class bytearray(object):
'bytearray(iterable_of_ints) -> bytearray\nbytearray(string, encoding[, errors]) -> bytearray\nbytearray(bytes_or_buffer) -> mutable copy of bytes_or_buffer\nbytearray(int) -> bytes array of size given by the parameter initialized with null bytes\nbytearray() -> empty bytes array\n\nConstruct a mutable bytearray object from:\n - an iterable yielding integers in range(256)\n - a text string encoded using the specified encoding\n - a bytes or a buffer object\n - any object implementing the buffer API.\n - an integer'
def __add__(self, value):
'Return self+value.'
return bytearray()
def __alloc__(self):
'B.__alloc__() -> int\n\nReturn the number of bytes actually allocated.'
return 1
__class__ = bytearray
def __contains__(self, key):
'Return key in self.'
return False
def __delitem__(self, key):
'Delete self[key].'
return None
def __eq__(self, value):
'Return self==value.'
return False
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getitem__(self, key):
'Return self[key].'
pass
def __gt__(self, value):
'Return self>value.'
return False
__hash__ = None
def __iadd__(self, value):
'Implement self+=value.'
return None
def __imul__(self, value):
'Implement self*=value.'
return None
def __init__(self, string, encoding, errors=None):
'bytearray(iterable_of_ints) -> bytearray\nbytearray(string, encoding[, errors]) -> bytearray\nbytearray(bytes_or_buffer) -> mutable copy of bytes_or_buffer\nbytearray(int) -> bytes array of size given by the parameter initialized with null bytes\nbytearray() -> empty bytes array\n\nConstruct a mutable bytearray object from:\n - an iterable yielding integers in range(256)\n - a text string encoded using the specified encoding\n - a bytes or a buffer object\n - any object implementing the buffer API.\n - an integer'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return bytearray()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __mod__(self, value):
'Return self%value.'
return bytearray()
def __mul__(self, value):
'Return self*value.'
return bytearray()
def __ne__(self, value):
'Return self!=value.'
return False
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
def __reduce_ex__(self, proto):
'Return state information for pickling.'
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
def __rmod__(self, value):
'Return value%self.'
return bytearray()
def __rmul__(self, value):
'Return value*self.'
return bytearray()
def __setitem__(self, key, value):
'Set self[key] to value.'
return None
def __sizeof__(self):
'Returns the size of the bytearray object in memory, in bytes.'
return 0
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def append(self, item):
'Append a single item to the end of the bytearray.\n\n item\n The item to be appended.'
pass
def capitalize(self):
'B.capitalize() -> copy of B\n\nReturn a copy of B with only its first character capitalized (ASCII)\nand the rest lower-cased.'
return bytearray()
def center(self, width, fillchar):
'Return a centered string of length width.\n\nPadding is done using the specified fill character.'
return bytearray()
def clear(self):
'Remove all items from the bytearray.'
return None
def copy(self):
'Return a copy of B.'
return bytearray()
def count(self, x):
'B.count(sub[, start[, end]]) -> int\n\nReturn the number of non-overlapping occurrences of subsection sub in\nbytes B[start:end]. Optional arguments start and end are interpreted\nas in slice notation.'
return 0
def decode(self, encoding, errors):
"Decode the bytearray using the codec registered for encoding.\n\n encoding\n The encoding with which to decode the bytearray.\n errors\n The error handling scheme to use for the handling of decoding errors.\n The default is 'strict' meaning that decoding errors raise a\n UnicodeDecodeError. Other possible values are 'ignore' and 'replace'\n as well as any other name registered with codecs.register_error that\n can handle UnicodeDecodeErrors."
pass
def endswith(self, suffix, start=0, end=-1):
'B.endswith(suffix[, start[, end]]) -> bool\n\nReturn True if B ends with the specified suffix, False otherwise.\nWith optional start, test B beginning at that position.\nWith optional end, stop comparing B at that position.\nsuffix can also be a tuple of bytes to try.'
return False
def expandtabs(self, tabsize):
'Return a copy where all tab characters are expanded using spaces.\n\nIf tabsize is not given, a tab size of 8 characters is assumed.'
return bytearray()
def extend(self, iterable_of_ints):
'Append all the items from the iterator or sequence to the end of the bytearray.\n\n iterable_of_ints\n The iterable of items to append.'
pass
def find(self, sub, start=0, end=-1):
'B.find(sub[, start[, end]]) -> int\n\nReturn the lowest index in B where subsection sub is found,\nsuch that sub is contained within B[start,end]. Optional\narguments start and end are interpreted as in slice notation.\n\nReturn -1 on failure.'
return 0
@classmethod
def fromhex(cls, type, string):
"Create a bytearray object from a string of hexadecimal numbers.\n\nSpaces between two numbers are accepted.\nExample: bytearray.fromhex('B9 01EF') -> bytearray(b'\\\\xb9\\\\x01\\\\xef')"
pass
def hex(self):
"Create a str of hexadecimal numbers from a bytearray object.\n\n sep\n An optional single character or byte to separate hex bytes.\n bytes_per_sep\n How many bytes between separators. Positive values count from the\n right, negative values count from the left.\n\nExample:\n>>> value = bytearray([0xb9, 0x01, 0xef])\n>>> value.hex()\n'b901ef'\n>>> value.hex(':')\n'b9:01:ef'\n>>> value.hex(':', 2)\n'b9:01ef'\n>>> value.hex(':', -2)\n'b901:ef'"
return ''
def index(self, v):
'B.index(sub[, start[, end]]) -> int\n\nReturn the lowest index in B where subsection sub is found,\nsuch that sub is contained within B[start,end]. Optional\narguments start and end are interpreted as in slice notation.\n\nRaises ValueError when the subsection is not found.'
return 0
def insert(self, index, item):
'Insert a single item into the bytearray before the given index.\n\n index\n The index where the value is to be inserted.\n item\n The item to be inserted.'
pass
def isalnum(self):
'B.isalnum() -> bool\n\nReturn True if all characters in B are alphanumeric\nand there is at least one character in B, False otherwise.'
return False
def isalpha(self):
'B.isalpha() -> bool\n\nReturn True if all characters in B are alphabetic\nand there is at least one character in B, False otherwise.'
return False
def isascii(self):
'B.isascii() -> bool\n\nReturn True if B is empty or all characters in B are ASCII,\nFalse otherwise.'
return True
def isdigit(self):
'B.isdigit() -> bool\n\nReturn True if all characters in B are digits\nand there is at least one character in B, False otherwise.'
return False
def islower(self):
'B.islower() -> bool\n\nReturn True if all cased characters in B are lowercase and there is\nat least one cased character in B, False otherwise.'
return False
def isspace(self):
'B.isspace() -> bool\n\nReturn True if all characters in B are whitespace\nand there is at least one character in B, False otherwise.'
return False
def istitle(self):
'B.istitle() -> bool\n\nReturn True if B is a titlecased string and there is at least one\ncharacter in B, i.e. uppercase characters may only follow uncased\ncharacters and lowercase characters only cased ones. Return False\notherwise.'
return False
def isupper(self):
'B.isupper() -> bool\n\nReturn True if all cased characters in B are uppercase and there is\nat least one cased character in B, False otherwise.'
return False
def join(self, iterable_of_bytes):
'Concatenate any number of bytes/bytearray objects.\n\nThe bytearray whose method is called is inserted in between each pair.\n\nThe result is returned as a new bytearray object.'
pass
def ljust(self, width, fillchar):
'Return a left-justified string of length width.\n\nPadding is done using the specified fill character.'
return bytearray()
def lower(self):
'B.lower() -> copy of B\n\nReturn a copy of B with all ASCII characters converted to lowercase.'
return bytearray()
def lstrip(self, bytes):
'Strip leading bytes contained in the argument.\n\nIf the argument is omitted or None, strip leading ASCII whitespace.'
return bytearray()
@classmethod
def maketrans(cls, frm, to):
'Return a translation table useable for the bytes or bytearray translate method.\n\nThe returned table will be one where each byte in frm is mapped to the byte at\nthe same position in to.\n\nThe bytes objects frm and to must be of the same length.'
pass
def partition(self, sep):
'Partition the bytearray into three parts using the given separator.\n\nThis will search for the separator sep in the bytearray. If the separator is\nfound, returns a 3-tuple containing the part before the separator, the\nseparator itself, and the part after it as new bytearray objects.\n\nIf the separator is not found, returns a 3-tuple containing the copy of the\noriginal bytearray object and two empty bytearray objects.'
return (bytearray(), bytearray(), bytearray())
def pop(self, index):
'Remove and return a single item from B.\n\n index\n The index from where to remove the item.\n -1 (the default value) means remove the last item.\n\nIf no index argument is given, will pop the last item.'
pass
def remove(self, value):
'Remove the first occurrence of a value in the bytearray.\n\n value\n The value to remove.'
return None
def replace(self, old, new, count):
'Return a copy with all occurrences of substring old replaced by new.\n\n count\n Maximum number of occurrences to replace.\n -1 (the default value) means replace all occurrences.\n\nIf the optional argument count is given, only the first count occurrences are\nreplaced.'
return bytearray()
def reverse(self):
'Reverse the order of the values in B in place.'
pass
def rfind(self, sub, start=0, end=-1):
'B.rfind(sub[, start[, end]]) -> int\n\nReturn the highest index in B where subsection sub is found,\nsuch that sub is contained within B[start,end]. Optional\narguments start and end are interpreted as in slice notation.\n\nReturn -1 on failure.'
return 0
def rindex(self, sub, start=0, end=-1):
'B.rindex(sub[, start[, end]]) -> int\n\nReturn the highest index in B where subsection sub is found,\nsuch that sub is contained within B[start,end]. Optional\narguments start and end are interpreted as in slice notation.\n\nRaise ValueError when the subsection is not found.'
return 0
def rjust(self, width, fillchar):
'Return a right-justified string of length width.\n\nPadding is done using the specified fill character.'
return bytearray()
def rpartition(self, sep):
'Partition the bytearray into three parts using the given separator.\n\nThis will search for the separator sep in the bytearray, starting at the end.\nIf the separator is found, returns a 3-tuple containing the part before the\nseparator, the separator itself, and the part after it as new bytearray\nobjects.\n\nIf the separator is not found, returns a 3-tuple containing two empty bytearray\nobjects and the copy of the original bytearray object.'
return (bytearray(), bytearray(), bytearray())
def rsplit(self, sep, maxsplit):
'Return a list of the sections in the bytearray, using sep as the delimiter.\n\n sep\n The delimiter according which to split the bytearray.\n None (the default value) means split on ASCII whitespace characters\n (space, tab, return, newline, formfeed, vertical tab).\n maxsplit\n Maximum number of splits to do.\n -1 (the default value) means no limit.\n\nSplitting is done starting at the end of the bytearray and working to the front.'
return [bytearray()]
def rstrip(self, bytes):
'Strip trailing bytes contained in the argument.\n\nIf the argument is omitted or None, strip trailing ASCII whitespace.'
return bytearray()
def split(self, sep, maxsplit):
'Return a list of the sections in the bytearray, using sep as the delimiter.\n\n sep\n The delimiter according which to split the bytearray.\n None (the default value) means split on ASCII whitespace characters\n (space, tab, return, newline, formfeed, vertical tab).\n maxsplit\n Maximum number of splits to do.\n -1 (the default value) means no limit.'
return [bytearray()]
def splitlines(self, keepends):
'Return a list of the lines in the bytearray, breaking at line boundaries.\n\nLine breaks are not included in the resulting list unless keepends is given and\ntrue.'
return [self()]
def startswith(self, prefix, start=0, end=-1):
'B.startswith(prefix[, start[, end]]) -> bool\n\nReturn True if B starts with the specified prefix, False otherwise.\nWith optional start, test B beginning at that position.\nWith optional end, stop comparing B at that position.\nprefix can also be a tuple of bytes to try.'
return False
def strip(self, bytes):
'Strip leading and trailing bytes contained in the argument.\n\nIf the argument is omitted or None, strip leading and trailing ASCII whitespace.'
return bytearray()
def swapcase(self):
'B.swapcase() -> copy of B\n\nReturn a copy of B with uppercase ASCII characters converted\nto lowercase ASCII and vice versa.'
return bytearray()
def title(self):
'B.title() -> copy of B\n\nReturn a titlecased version of B, i.e. ASCII words start with uppercase\ncharacters, all remaining cased characters have lowercase.'
return bytearray()
def translate(self, table, delete):
'Return a copy with each character mapped by the given translation table.\n\n table\n Translation table, which must be a bytes object of length 256.\n\nAll characters occurring in the optional argument delete are removed.\nThe remaining characters are mapped through the given translation table.'
pass
def upper(self):
'B.upper() -> copy of B\n\nReturn a copy of B with all ASCII characters converted to uppercase.'
return bytearray()
def zfill(self, width):
'Pad a numeric string with zeros on the left, to fill a field of the given width.\n\nThe original string is never truncated.'
return bytearray()
def callable(obj):
'Return whether the object is callable (i.e., some kind of function).\n\nNote that classes are callable, as are instances of classes with a\n__call__() method.'
return False
def chr(i):
'Return a Unicode string of one character with ordinal i; 0 <= i <= 0x10ffff.'
return ''
def compile(source, filename, mode, flags, dont_inherit, optimize):
"Compile source into a code object that can be executed by exec() or eval().\n\nThe source code may represent a Python module, statement or expression.\nThe filename will be used for run-time error messages.\nThe mode must be 'exec' to compile a module, 'single' to compile a\nsingle (interactive) statement, or 'eval' to compile an expression.\nThe flags argument, if present, controls which future statements influence\nthe compilation of the code.\nThe dont_inherit argument, if true, stops the compilation inheriting\nthe effects of any future statements in effect in the code calling\ncompile; if absent or false these statements do influence the compilation,\nin addition to any features explicitly specified."
pass
def copyright(self):
'interactive prompt objects for printing the license text, a list of\n contributors and the copyright notice.'
pass
def credits(self):
'interactive prompt objects for printing the license text, a list of\n contributors and the copyright notice.'
pass
def delattr(obj, name):
"Deletes the named attribute from the given object.\n\ndelattr(x, 'y') is equivalent to ``del x.y''"
pass
def dir(object=None):
"dir([object]) -> list of strings\n\nIf called without an argument, return the names in the current scope.\nElse, return an alphabetized list of names comprising (some of) the attributes\nof the given object, and of attributes reachable from it.\nIf the object supplies a method named __dir__, it will be used; otherwise\nthe default dir() logic is used and returns:\n for a module object: the module's attributes.\n for a class object: its attributes, and recursively the attributes\n of its bases.\n for any other object: its attributes, its class's attributes, and\n recursively the attributes of its class's base classes."
return list()
def divmod(x, y):
'Return the tuple (x//y, x%y). Invariant: div*y + mod == x.'
return (0, 0)
class enumerate(object):
'Return an enumerate object.\n\n iterable\n an object supporting iteration\n\nThe enumerate object yields pairs containing a count (from start, which\ndefaults to zero) and a value yielded by the iterable argument.\n\nenumerate is useful for obtaining an indexed list:\n (0, seq[0]), (1, seq[1]), (2, seq[2]), ...'
__class__ = enumerate
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
'Return an enumerate object.\n\n iterable\n an object supporting iteration\n\nThe enumerate object yields pairs containing a count (from start, which\ndefaults to zero) and a value yielded by the iterable argument.\n\nenumerate is useful for obtaining an indexed list:\n (0, seq[0]), (1, seq[1]), (2, seq[2]), ...'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return enumerate()
def __next__(self):
'Implement next(self).'
pass
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def eval(source, globals, locals):
'Evaluate the given source in the context of globals and locals.\n\nThe source may be a string representing a Python expression\nor a code object as returned by compile().\nThe globals must be a dictionary and locals can be any mapping,\ndefaulting to the current globals and locals.\nIf only globals is given, locals defaults to it.'
pass
def exec(source, globals, locals):
'Execute the given source in the context of globals and locals.\n\nThe source may be a string representing one or more Python statements\nor a code object as returned by compile().\nThe globals must be a dictionary and locals can be any mapping,\ndefaulting to the current globals and locals.\nIf only globals is given, locals defaults to it.'
pass
def exit(self, code):
pass
class filter(object):
'filter(function or None, iterable) --> filter object\n\nReturn an iterator yielding those items of iterable for which function(item)\nis true. If function is None, return the items that are true.'
__class__ = filter
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, functionorNone, iterable):
'filter(function or None, iterable) --> filter object\n\nReturn an iterator yielding those items of iterable for which function(item)\nis true. If function is None, return the items that are true.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return filter()
def __next__(self):
'Implement next(self).'
pass
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def format(value, format_spec):
"Return value.__format__(format_spec)\n\nformat_spec defaults to the empty string.\nSee the Format Specification Mini-Language section of help('FORMATTING') for\ndetails."
return ''
def getattr(object, name, default=None):
"getattr(object, name[, default]) -> value\n\nGet a named attribute from an object; getattr(x, 'y') is equivalent to x.y.\nWhen a default argument is given, it is returned when the attribute doesn't\nexist; without it, an exception is raised in that case."
pass
def globals():
"Return the dictionary containing the current scope's global variables.\n\nNOTE: Updates to this dictionary *will* affect name lookups in the current\nglobal scope and vice-versa."
return __Dict__()
def hasattr(obj, name):
'Return whether the object has an attribute with the given name.\n\nThis is done by calling getattr(obj, name) and catching AttributeError.'
return False
def hash(obj):
'Return the hash value for the given object.\n\nTwo objects that compare equal must also have the same hash value, but the\nreverse is not necessarily true.'
return 0
def help(self, *args, **kwds):
"Define the builtin 'help'.\n\n This is a wrapper around pydoc.help that provides a helpful message\n when 'help' is typed at the Python interactive prompt.\n\n Calling help() at the Python prompt starts an interactive help session.\n Calling help(thing) prints help for the python object 'thing'.\n "
pass
def hex(number):
"Return the hexadecimal representation of an integer.\n\n >>> hex(12648430)\n '0xc0ffee'"
return ''
def id(obj):
"Return the identity of an object.\n\nThis is guaranteed to be unique among simultaneously existing objects.\n(CPython uses the object's memory address.)"
return 0
def input(prompt):
'Read a string from standard input. The trailing newline is stripped.\n\nThe prompt string, if given, is printed to standard output without a\ntrailing newline before reading input.\n\nIf the user hits EOF (*nix: Ctrl-D, Windows: Ctrl-Z+Return), raise EOFError.\nOn *nix systems, readline is used if available.'
return ''
def isinstance(obj, class_or_tuple):
'Return whether an object is an instance of a class or of a subclass thereof.\n\nA tuple, as in ``isinstance(x, (A, B, ...))``, may be given as the target to\ncheck against. This is equivalent to ``isinstance(x, A) or isinstance(x, B)\nor ...`` etc.'
pass
def issubclass(cls, class_or_tuple):
"Return whether 'cls' is a derived from another class or is the same class.\n\nA tuple, as in ``issubclass(x, (A, B, ...))``, may be given as the target to\ncheck against. This is equivalent to ``issubclass(x, A) or issubclass(x, B)\nor ...`` etc."
pass
def iter(callable, sentinel):
'iter(iterable) -> iterator\niter(callable, sentinel) -> iterator\n\nGet an iterator from an object. In the first form, the argument must\nsupply its own iterator, or be a sequence.\nIn the second form, the callable is called until it returns the sentinel.'
pass
def len(obj):
'Return the number of items in a container.'
return 0
def license(self):
'interactive prompt objects for printing the license text, a list of\n contributors and the copyright notice.'
pass
def locals():
"Return a dictionary containing the current scope's local variables.\n\nNOTE: Whether or not updates to this dictionary will affect name lookups in\nthe local scope and vice-versa is *implementation dependent* and not\ncovered by any backwards compatibility guarantees."
return __Dict__()
class map(object):
'map(func, *iterables) --> map object\n\nMake an iterator that computes the function using arguments from\neach of the iterables. Stops when the shortest iterable is exhausted.'
__class__ = map
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, func, *iterables):
'map(func, *iterables) --> map object\n\nMake an iterator that computes the function using arguments from\neach of the iterables. Stops when the shortest iterable is exhausted.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return map()
def __next__(self):
'Implement next(self).'
pass
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def max(iterable, *, default=obj, key=func):
'max(iterable, *[, default=obj, key=func]) -> value\nmax(arg1, arg2, *args, *[, key=func]) -> value\n\nWith a single iterable argument, return its biggest item. The\ndefault keyword-only argument specifies an object to return if\nthe provided iterable is empty.\nWith two or more arguments, return the largest argument.'
pass
class memoryview(object):
'Create a new memoryview object which references the given object.'
__class__ = memoryview
def __delitem__(self, key):
'Delete self[key].'
return None
def __enter__(self):
return self
def __eq__(self, value):
'Return self==value.'
return False
def __exit__(self):
pass
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getitem__(self, key):
'Return self[key].'
pass
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __init__(self, *args, **kwargs):
'Create a new memoryview object which references the given object.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __ne__(self, value):
'Return self!=value.'
return False
def __repr__(self):
'Return repr(self).'
return ''
def __setitem__(self, key, value):
'Set self[key] to value.'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def c_contiguous(self):
'A bool indicating whether the memory is C contiguous.'
pass
def cast(self, format):
'Cast a memoryview to a new format or shape.'
pass
@property
def contiguous(self):
'A bool indicating whether the memory is contiguous.'
pass
@property
def f_contiguous(self):
'A bool indicating whether the memory is Fortran contiguous.'
pass
@property
def format(self):
'A string containing the format (in struct module style)\n for each element in the view.'
return ''
def hex(self):
"Return the data in the buffer as a str of hexadecimal numbers.\n\n sep\n An optional single character or byte to separate hex bytes.\n bytes_per_sep\n How many bytes between separators. Positive values count from the\n right, negative values count from the left.\n\nExample:\n>>> value = memoryview(b'\\xb9\\x01\\xef')\n>>> value.hex()\n'b901ef'\n>>> value.hex(':')\n'b9:01:ef'\n>>> value.hex(':', 2)\n'b9:01ef'\n>>> value.hex(':', -2)\n'b901:ef'"
return ''
@property
def itemsize(self):
'The size in bytes of each element of the memoryview.'
pass
@property
def nbytes(self):
'The amount of space in bytes that the array would use in\n a contiguous representation.'
pass
@property
def ndim(self):
'An integer indicating how many dimensions of a multi-dimensional\n array the memory represents.'
pass
@property
def obj(self):
'The underlying object of the memoryview.'
pass
@property
def readonly(self):
'A bool indicating whether the memory is read only.'
pass
def release(self):
'Release the underlying buffer exposed by the memoryview object.'
pass
@property
def shape(self):
'A tuple of ndim integers giving the shape of the memory\n as an N-dimensional array.'
pass
@property
def strides(self):
'A tuple of ndim integers giving the size in bytes to access\n each element for each dimension of the array.'
pass
@property
def suboffsets(self):
'A tuple of integers used internally for PIL-style arrays.'
pass
def tobytes(self, order):
"Return the data in the buffer as a byte string. Order can be {'C', 'F', 'A'}.\nWhen order is 'C' or 'F', the data of the original array is converted to C or\nFortran order. For contiguous views, 'A' returns an exact copy of the physical\nmemory. In particular, in-memory Fortran order is preserved. For non-contiguous\nviews, the data is converted to C first. order=None is the same as order='C'."
pass
def tolist(self):
'Return the data in the buffer as a list of elements.'
pass
def toreadonly(self):
'Return a readonly version of the memoryview.'
pass
def min(iterable, *, default=obj, key=func):
'min(iterable, *[, default=obj, key=func]) -> value\nmin(arg1, arg2, *args, *[, key=func]) -> value\n\nWith a single iterable argument, return its smallest item. The\ndefault keyword-only argument specifies an object to return if\nthe provided iterable is empty.\nWith two or more arguments, return the smallest argument.'
pass
def next(iterator, default=None):
'next(iterator[, default])\n\nReturn the next item from the iterator. If default is given and the iterator\nis exhausted, it is returned instead of raising StopIteration.'
pass
def oct(number):
"Return the octal representation of an integer.\n\n >>> oct(342391)\n '0o1234567'"
return ''
def open(file, mode, buffering, encoding, errors, newline, closefd, opener):
'Open file and return a stream. Raise OSError upon failure.\n\nfile is either a text or byte string giving the name (and the path\nif the file isn\'t in the current working directory) of the file to\nbe opened or an integer file descriptor of the file to be\nwrapped. (If a file descriptor is given, it is closed when the\nreturned I/O object is closed, unless closefd is set to False.)\n\nmode is an optional string that specifies the mode in which the file\nis opened. It defaults to \'r\' which means open for reading in text\nmode. Other common values are \'w\' for writing (truncating the file if\nit already exists), \'x\' for creating and writing to a new file, and\n\'a\' for appending (which on some Unix systems, means that all writes\nappend to the end of the file regardless of the current seek position).\nIn text mode, if encoding is not specified the encoding used is platform\ndependent: locale.getpreferredencoding(False) is called to get the\ncurrent locale encoding. (For reading and writing raw bytes use binary\nmode and leave encoding unspecified.) The available modes are:\n\n========= ===============================================================\nCharacter Meaning\n--------- ---------------------------------------------------------------\n\'r\' open for reading (default)\n\'w\' open for writing, truncating the file first\n\'x\' create a new file and open it for writing\n\'a\' open for writing, appending to the end of the file if it exists\n\'b\' binary mode\n\'t\' text mode (default)\n\'+\' open a disk file for updating (reading and writing)\n\'U\' universal newline mode (deprecated)\n========= ===============================================================\n\nThe default mode is \'rt\' (open for reading text). For binary random\naccess, the mode \'w+b\' opens and truncates the file to 0 bytes, while\n\'r+b\' opens the file without truncation. The \'x\' mode implies \'w\' and\nraises an `FileExistsError` if the file already exists.\n\nPython distinguishes between files opened in binary and text modes,\neven when the underlying operating system doesn\'t. Files opened in\nbinary mode (appending \'b\' to the mode argument) return contents as\nbytes objects without any decoding. In text mode (the default, or when\n\'t\' is appended to the mode argument), the contents of the file are\nreturned as strings, the bytes having been first decoded using a\nplatform-dependent encoding or using the specified encoding if given.\n\n\'U\' mode is deprecated and will raise an exception in future versions\nof Python. It has no effect in Python 3. Use newline to control\nuniversal newlines mode.\n\nbuffering is an optional integer used to set the buffering policy.\nPass 0 to switch buffering off (only allowed in binary mode), 1 to select\nline buffering (only usable in text mode), and an integer > 1 to indicate\nthe size of a fixed-size chunk buffer. When no buffering argument is\ngiven, the default buffering policy works as follows:\n\n* Binary files are buffered in fixed-size chunks; the size of the buffer\n is chosen using a heuristic trying to determine the underlying device\'s\n "block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.\n On many systems, the buffer will typically be 4096 or 8192 bytes long.\n\n* "Interactive" text files (files for which isatty() returns True)\n use line buffering. Other text files use the policy described above\n for binary files.\n\nencoding is the name of the encoding used to decode or encode the\nfile. This should only be used in text mode. The default encoding is\nplatform dependent, but any encoding supported by Python can be\npassed. See the codecs module for the list of supported encodings.\n\nerrors is an optional string that specifies how encoding errors are to\nbe handled---this argument should not be used in binary mode. Pass\n\'strict\' to raise a ValueError exception if there is an encoding error\n(the default of None has the same effect), or pass \'ignore\' to ignore\nerrors. (Note that ignoring encoding errors can lead to data loss.)\nSee the documentation for codecs.register or run \'help(codecs.Codec)\'\nfor a list of the permitted encoding error strings.\n\nnewline controls how universal newlines works (it only applies to text\nmode). It can be None, \'\', \'\\n\', \'\\r\', and \'\\r\\n\'. It works as\nfollows:\n\n* On input, if newline is None, universal newlines mode is\n enabled. Lines in the input can end in \'\\n\', \'\\r\', or \'\\r\\n\', and\n these are translated into \'\\n\' before being returned to the\n caller. If it is \'\', universal newline mode is enabled, but line\n endings are returned to the caller untranslated. If it has any of\n the other legal values, input lines are only terminated by the given\n string, and the line ending is returned to the caller untranslated.\n\n* On output, if newline is None, any \'\\n\' characters written are\n translated to the system default line separator, os.linesep. If\n newline is \'\' or \'\\n\', no translation takes place. If newline is any\n of the other legal values, any \'\\n\' characters written are translated\n to the given string.\n\nIf closefd is False, the underlying file descriptor will be kept open\nwhen the file is closed. This does not work when a file name is given\nand must be True in that case.\n\nA custom opener can be used by passing a callable as *opener*. The\nunderlying file descriptor for the file object is then obtained by\ncalling *opener* with (*file*, *flags*). *opener* must return an open\nfile descriptor (passing os.open as *opener* results in functionality\nsimilar to passing None).\n\nopen() returns a file object whose type depends on the mode, and\nthrough which the standard file operations such as reading and writing\nare performed. When open() is used to open a file in a text mode (\'w\',\n\'r\', \'wt\', \'rt\', etc.), it returns a TextIOWrapper. When used to open\na file in a binary mode, the returned class varies: in read binary\nmode, it returns a BufferedReader; in write binary and append binary\nmodes, it returns a BufferedWriter, and in read/write mode, it returns\na BufferedRandom.\n\nIt is also possible to use a string or bytearray as a file for both\nreading and writing. For strings StringIO can be used like a file\nopened in a text mode, and for bytes a BytesIO can be used like a file\nopened in a binary mode.'
pass
def ord(c):
'Return the Unicode code point for a one-character string.'
pass
def pow(base, exp, mod):
'Equivalent to base**exp with 2 arguments or base**exp % mod with 3 arguments\n\nSome types, such as ints, are able to use a more efficient algorithm when\ninvoked using the three argument form.'
pass
def print():
"print(value, ..., sep=' ', end='\\n', file=sys.stdout, flush=False)\n\nPrints the values to a stream, or to sys.stdout by default.\nOptional keyword arguments:\nfile: a file-like object (stream); defaults to the current sys.stdout.\nsep: string inserted between values, default a space.\nend: string appended after the last value, default a newline.\nflush: whether to forcibly flush the stream."
pass
def quit(self, code):
pass
class range(object):
'range(stop) -> range object\nrange(start, stop[, step]) -> range object\n\nReturn an object that produces a sequence of integers from start (inclusive)\nto stop (exclusive) by step. range(i, j) produces i, i+1, i+2, ..., j-1.\nstart defaults to 0, and stop is omitted! range(4) produces 0, 1, 2, 3.\nThese are exactly the valid indices for a list of 4 elements.\nWhen step is given, it specifies the increment (or decrement).'
def __bool__(self):
'self != 0'
return False
__class__ = range
def __contains__(self, key):
'Return key in self.'
return False
def __eq__(self, value):
'Return self==value.'
return False
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getitem__(self, key):
'Return self[key].'
pass
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __init__(self, start, stop, step=None):
'range(stop) -> range object\nrange(start, stop[, step]) -> range object\n\nReturn an object that produces a sequence of integers from start (inclusive)\nto stop (exclusive) by step. range(i, j) produces i, i+1, i+2, ..., j-1.\nstart defaults to 0, and stop is omitted! range(4) produces 0, 1, 2, 3.\nThese are exactly the valid indices for a list of 4 elements.\nWhen step is given, it specifies the increment (or decrement).'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return range()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __ne__(self, value):
'Return self!=value.'
return False
def __reduce__(self):
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
def __reversed__(self):
'Return a reverse iterator.'
pass
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def count(self, x):
'rangeobject.count(value) -> integer -- return number of occurrences of value'
return 0
def index(self, v):
'rangeobject.index(value) -> integer -- return index of value.\nRaise ValueError if the value is not present.'
return 0
@property
def start(self):
pass
@property
def step(self):
pass
@property
def stop(self):
pass
def repr(obj):
'Return the canonical string representation of the object.\n\nFor many object types, including most builtins, eval(repr(obj)) == obj.'
return ''
class reversed(object):
'Return a reverse iterator over the values of the given sequence.'
__class__ = reversed
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *args, **kwargs):
'Return a reverse iterator over the values of the given sequence.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return reversed()
def __length_hint__(self):
'Private method returning an estimate of len(list(it)).'
return 0
def __next__(self):
'Implement next(self).'
pass
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
def __setstate__(self, state):
'Set state information for unpickling.'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def round(number, ndigits):
'Round a number to a given precision in decimal digits.\n\nThe return value is an integer if ndigits is omitted or None. Otherwise\nthe return value has the same type as the number. ndigits may be negative.'
return 0.0
def setattr(obj, name, value):
"Sets the named attribute on the given object to the specified value.\n\nsetattr(x, 'y', v) is equivalent to ``x.y = v''"
pass
class slice(object):
'slice(stop)\nslice(start, stop[, step])\n\nCreate a slice object. This is used for extended slicing (e.g. a[0:10:2]).'
__class__ = slice
def __eq__(self, value):
'Return self==value.'
return False
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __gt__(self, value):
'Return self>value.'
return False
__hash__ = None
def __init__(self, start, stop, step=None):
'slice(stop)\nslice(start, stop[, step])\n\nCreate a slice object. This is used for extended slicing (e.g. a[0:10:2]).'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __le__(self, value):
'Return self<=value.'
return False
def __lt__(self, value):
'Return self<value.'
return False
def __ne__(self, value):
'Return self!=value.'
return False
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def indices(self):
'S.indices(len) -> (start, stop, stride)\n\nAssuming a sequence of length len, calculate the start and stop\nindices, and the stride length of the extended slice described by\nS. Out of bounds indices are clipped in a manner consistent with the\nhandling of normal slices.'
return tuple()
@property
def start(self):
pass
@property
def step(self):
pass
@property
def stop(self):
pass
def sorted(iterable):
'Return a new list containing all items from the iterable in ascending order.\n\nA custom key function can be supplied to customize the sort order, and the\nreverse flag can be set to request the result in descending order.'
return __List__()
def sum(iterable, start):
"Return the sum of a 'start' value (default: 0) plus an iterable of numbers\n\nWhen the iterable is empty, return the start value.\nThis function is intended specifically for use with numeric values and may\nreject non-numeric types."
pass
class super(object):
'super() -> same as super(__class__, <first argument>)\nsuper(type) -> unbound super object\nsuper(type, obj) -> bound super object; requires isinstance(obj, type)\nsuper(type, type2) -> bound super object; requires issubclass(type2, type)\nTypical use to call a cooperative superclass method:\nclass C(B):\n def meth(self, arg):\n super().meth(arg)\nThis works for class methods too:\nclass C(B):\n @classmethod\n def cmeth(cls, arg):\n super().cmeth(arg)\n'
__class__ = super
def __get__(self, instance, owner):
'Return an attribute of instance, which is of type owner.'
return super()
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, type, type2):
'super() -> same as super(__class__, <first argument>)\nsuper(type) -> unbound super object\nsuper(type, obj) -> bound super object; requires isinstance(obj, type)\nsuper(type, type2) -> bound super object; requires issubclass(type2, type)\nTypical use to call a cooperative superclass method:\nclass C(B):\n def meth(self, arg):\n super().meth(arg)\nThis works for class methods too:\nclass C(B):\n @classmethod\n def cmeth(cls, arg):\n super().cmeth(arg)\n'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __repr__(self):
'Return repr(self).'
return ''
@property
def __self__(self):
'the instance invoking super(); may be None'
pass
@property
def __self_class__(self):
'the type of the instance invoking super(); may be None'
pass
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def __thisclass__(self):
'the class invoking super()'
pass
def vars(object=None):
'vars([object]) -> dictionary\n\nWithout arguments, equivalent to locals().\nWith an argument, equivalent to object.__dict__.'
return dict()
class zip(object):
'zip(*iterables) --> zip object\n\nReturn a zip object whose .__next__() method returns a tuple where\nthe i-th element comes from the i-th iterable argument. The .__next__()\nmethod continues until the shortest iterable in the argument sequence\nis exhausted and then it raises StopIteration.'
__class__ = zip
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __init__(self, *iterables):
'zip(*iterables) --> zip object\n\nReturn a zip object whose .__next__() method returns a tuple where\nthe i-th element comes from the i-th iterable argument. The .__next__()\nmethod continues until the shortest iterable in the argument sequence\nis exhausted and then it raises StopIteration.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return zip()
def __next__(self):
'Implement next(self).'
pass
def __reduce__(self):
'Return state information for pickling.'
return ''; return ()
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
| [
"[email protected]"
] | |
7e39d9636d8d51231c8e255ea73707f11e4c337e | 56ffce29f0d27f83206e11870d95982c38524aae | /apweb/site/view/session_test.py | a77c127ed42f7fbac3078f43a773ba651e4786d4 | [] | no_license | adamandpaul/apweb | cce365085e2ee58cfbc31544c5a7414e67ad56b4 | b1bb81fa7d7b39f19e187462aa3447ff482b46af | refs/heads/master | 2022-10-19T02:09:52.437906 | 2021-05-21T06:10:08 | 2021-05-21T06:10:08 | 201,398,036 | 0 | 3 | null | 2022-09-21T21:39:41 | 2019-08-09T05:41:06 | Python | UTF-8 | Python | false | false | 691 | py | # -*- coding:utf-8 -*-
from . import session
from unittest import TestCase
from unittest.mock import MagicMock
from unittest.mock import patch
class TestSessionView(TestCase):
def setUp(self):
self.request = MagicMock()
self.context = MagicMock()
self.view = session.SessionView(self.context, self.request)
@patch("apweb.site.view.session.UserView")
def test_user(self, UserView):
self.assertEqual(self.view.user, UserView.return_value.info_manage)
UserView.assert_called_with(self.request.user, self.request)
def test_info(self):
self.view.__dict__["user"] = "foo"
self.assertEqual(self.view.info["user"], "foo")
| [
"[email protected]"
] | |
99bc5f810433c2c56027c7cadd2f629bb37f2406 | 7f33d68323240d66e610e5a89efc516915a11a96 | /manage.py | cd6b58f7934e4cd956b6d3cad8298609c08f1d21 | [
"Apache-2.0"
] | permissive | dbca-wa/observations | 100df2765ef0f6f62aaf45fc13fbb4af4395f519 | 48b2ad17afa9f0019524cb22a9a0bba74850b87f | refs/heads/master | 2021-05-31T16:29:30.906717 | 2016-04-06T02:42:05 | 2016-04-06T02:42:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | #!/usr/bin/env python
import os
import sys
import confy
confy.read_environment_file()
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "incredibus.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
c538fb5cbdac74431e65498c5bb4964e8dcd47c5 | 63768dc92cde5515a96d774a32facb461a3bf6e9 | /jacket/compute/cloud/vm_mode.py | 7ca85fbd53edb92822a9d5b0385735b37fb28c03 | [
"Apache-2.0"
] | permissive | ljZM33nd/jacket | 6fe9156f6f5789e5c24425afa7ce9237c302673d | d7ad3147fcb43131098c2a5210847634ff5fb325 | refs/heads/master | 2023-04-16T11:02:01.153751 | 2016-11-15T02:48:12 | 2016-11-15T02:48:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,212 | py | # Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Possible vm modes for instances.
Compute instance vm modes represent the host/guest ABI used for the
virtual machine / container. Individual hypervisors may support
multiple different vm modes per host. Available vm modes for a hypervisor
driver may also vary according to the architecture it is running on.
The 'vm_mode' parameter can be set against an instance to
choose what sort of VM to boot.
"""
from jacket.compute import exception
HVM = "hvm" # Native ABI (aka fully virtualized)
XEN = "xen" # Xen 3.0 paravirtualized
UML = "uml" # User Mode Linux paravirtualized
EXE = "exe" # Executables in containers
ALL = [HVM, XEN, UML, EXE]
def get_from_instance(instance):
"""Get the vm mode for an instance
:param instance: instance object to query
:returns: canonicalized vm mode for the instance
"""
mode = instance.vm_mode
return canonicalize(mode)
def is_valid(name):
"""Check if a string is a valid vm mode
:param name: vm mode name to validate
:returns: True if @name is valid
"""
return name in ALL
def canonicalize(mode):
"""Canonicalize the vm mode
:param name: vm mode name to canonicalize
:returns: a canonical vm mode name
"""
if mode is None:
return None
mode = mode.lower()
# For compatibility with pre-Folsom deployments
if mode == "pv":
mode = XEN
if mode == "hv":
mode = HVM
if mode == "baremetal":
mode = HVM
if not is_valid(mode):
raise exception.InvalidVirtualMachineMode(vmmode=mode)
return mode
| [
"[email protected]"
] | |
8beeae688c7148ebe2715f0ca83ccfd8f6ce9996 | 6b9084d234c87d7597f97ec95808e13f599bf9a1 | /data/tracking/sampler/SiamFC/_deprecated/sampler.py | db5571b4db36b29aa180d356235ddcd410d4e57c | [] | no_license | LitingLin/ubiquitous-happiness | 4b46234ce0cb29c4d27b00ec5a60d3eeb52c26fc | aae2d764e136ca4a36c054212b361dd7e8b22cba | refs/heads/main | 2023-07-13T19:51:32.227633 | 2021-08-03T16:02:03 | 2021-08-03T16:02:03 | 316,664,903 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,797 | py | import numpy as np
from Dataset.SOT.Storage.MemoryMapped.dataset import SingleObjectTrackingDataset_MemoryMapped
from Dataset.MOT.Storage.MemoryMapped.dataset import MultipleObjectTrackingDataset_MemoryMapped
from Dataset.DET.Storage.MemoryMapped.dataset import DetectionDataset_MemoryMapped
from data.tracking.sampler._sampler.sequence.SiamFC.DET import \
do_sampling_in_detection_dataset_image, get_one_random_sample_in_detection_dataset_image
from data.tracking.sampler._sampler.sequence.SiamFC.SOT import \
do_sampling_in_single_object_tracking_dataset_sequence, \
do_negative_sampling_in_single_object_tracking_dataset_sequence, \
get_one_random_sample_in_single_object_tracking_dataset_sequence
from data.tracking.sampler._sampler.sequence.SiamFC.MOT import \
do_sampling_in_multiple_object_tracking_dataset_sequence, \
do_negative_sampling_in_multiple_object_tracking_dataset_sequence, \
get_one_random_sample_in_multiple_object_tracking_dataset_sequence
from data.tracking.sampler.SiamFC.type import SiamesePairSamplingMethod
class SOTTrackingSiameseIterableDatasetSampler:
def __init__(self, datasets, negative_sample_ratio, enforce_fine_positive_sample, sampling_method: SiamesePairSamplingMethod, datasets_sampling_parameters=None, datasets_sampling_weight=None, data_processor=None):
self.datasets = datasets
self.dataset_lengths = [len(dataset) for dataset in datasets]
self.datasets_sampling_weight = datasets_sampling_weight
self.negative_sample_ratio = negative_sample_ratio
self.enforce_fine_positive_sample = enforce_fine_positive_sample
raise NotImplementedError
self.sampling_method = sampling_method
self.data_processor = data_processor
self.datasets_sampling_parameters = datasets_sampling_parameters
self.current_index_of_dataset = None
self.current_index_of_sequence = None
self.current_is_sampling_positive_sample = None
def move_next(self, rng_engine: np.random.Generator):
index_of_dataset = rng_engine.choice(np.arange(len(self.datasets)), p=self.datasets_sampling_weight)
if self.negative_sample_ratio == 0:
is_negative = False
else:
is_negative = rng_engine.random() < self.negative_sample_ratio
index_of_sequence = rng_engine.integers(0, self.dataset_lengths[index_of_dataset])
self.current_index_of_dataset = index_of_dataset
self.current_is_sampling_positive_sample = not is_negative
self.current_index_of_sequence = index_of_sequence
def _pick_random_object_as_negative_sample(self, rng_engine: np.random.Generator):
index_of_dataset = rng_engine.choice(np.arange(len(self.datasets)), p=self.datasets_sampling_weight)
dataset = self.datasets[index_of_dataset]
index_of_sequence = rng_engine.integers(0, len(dataset))
sequence = dataset[index_of_sequence]
if isinstance(dataset, DetectionDataset_MemoryMapped):
data = get_one_random_sample_in_detection_dataset_image(sequence, rng_engine)
elif isinstance(dataset, SingleObjectTrackingDataset_MemoryMapped):
data = get_one_random_sample_in_single_object_tracking_dataset_sequence(sequence, rng_engine)
elif isinstance(dataset, MultipleObjectTrackingDataset_MemoryMapped):
data = get_one_random_sample_in_multiple_object_tracking_dataset_sequence(sequence, rng_engine)
else:
raise NotImplementedError
return data
def do_sampling(self, rng_engine: np.random.Generator):
dataset = self.datasets[self.current_index_of_dataset]
sequence = dataset[self.current_index_of_sequence]
frame_range = 100
if self.datasets_sampling_parameters is not None:
sampling_parameter = self.datasets_sampling_parameters[self.current_index_of_dataset]
if 'frame_range' in sampling_parameter:
frame_range = sampling_parameter['frame_range']
if isinstance(dataset, (SingleObjectTrackingDataset_MemoryMapped, MultipleObjectTrackingDataset_MemoryMapped)):
if sequence.has_fps():
fps = sequence.get_fps()
frame_range = int(round(fps / 30 * frame_range))
if self.current_is_sampling_positive_sample:
if isinstance(dataset, DetectionDataset_MemoryMapped):
z_image, z_bbox = do_sampling_in_detection_dataset_image(sequence, rng_engine)
data = (z_image, z_bbox, z_image, z_bbox, True)
elif isinstance(dataset, (SingleObjectTrackingDataset_MemoryMapped, MultipleObjectTrackingDataset_MemoryMapped)):
if isinstance(dataset, SingleObjectTrackingDataset_MemoryMapped):
sampled_data, is_positive = do_sampling_in_single_object_tracking_dataset_sequence(sequence, frame_range, rng_engine)
else:
sampled_data, is_positive = do_sampling_in_multiple_object_tracking_dataset_sequence(sequence, frame_range, rng_engine)
if is_positive == 0:
data = (sampled_data[0][0], sampled_data[0][1], sampled_data[0][0], sampled_data[0][1], True)
else:
data = (sampled_data[0][0], sampled_data[0][1], sampled_data[1][0], sampled_data[1][1], is_positive == 1)
else:
raise NotImplementedError
else:
if isinstance(dataset, DetectionDataset_MemoryMapped):
z_image, z_bbox = do_sampling_in_detection_dataset_image(sequence, rng_engine)
x_image, x_bbox = self._pick_random_object_as_negative_sample(rng_engine)
data = (z_image, z_bbox, x_image, x_bbox, False)
elif isinstance(dataset, (SingleObjectTrackingDataset_MemoryMapped, MultipleObjectTrackingDataset_MemoryMapped)):
if isinstance(dataset, SingleObjectTrackingDataset_MemoryMapped):
sampled_data = do_negative_sampling_in_single_object_tracking_dataset_sequence(sequence, frame_range, rng_engine)
else:
sampled_data = do_negative_sampling_in_multiple_object_tracking_dataset_sequence(sequence, frame_range, rng_engine)
if len(sampled_data) == 1:
x_image, x_bbox = self._pick_random_object_as_negative_sample(rng_engine)
data = (sampled_data[0][0], sampled_data[0][1], x_image, x_bbox, False)
else:
data = (sampled_data[0][0], sampled_data[0][1], sampled_data[1][0], sampled_data[1][1], False)
else:
raise NotImplementedError
if self.data_processor is not None:
data = self.data_processor(*data)
return data
| [
"[email protected]"
] | |
7b67372b80781dbc722821dd0e9e4fccabe7148f | 2ad9a73cb3e2da46fb15ae56a6dee11407fe8845 | /ports/kodi/addons/plugin.video.transistortv/scrapers/premiumizev2_scraper.py | 69c8e67f70be1ddef948a478838c26a63220c567 | [] | no_license | hpduong/retropie_configs | cde596b35897a3faeedefabd742fc15820d58255 | ed4e39146e5bebc0212dcef91108541a128d9325 | refs/heads/master | 2021-07-12T15:46:17.589357 | 2018-11-11T19:10:54 | 2018-11-11T19:10:54 | 157,111,040 | 1 | 2 | null | 2020-07-24T03:43:29 | 2018-11-11T18:59:52 | Python | UTF-8 | Python | false | false | 9,637 | py | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import kodi
import log_utils # @UnusedImport
from transistortv_lib import scraper_utils
from transistortv_lib.utils2 import i18n
from transistortv_lib.constants import FORCE_NO_MATCH
from transistortv_lib.constants import VIDEO_TYPES
from transistortv_lib.constants import QUALITIES
from transistortv_lib.constants import DELIM
import scraper
logger = log_utils.Logger.get_logger()
VIDEO_EXT = ['MKV', 'AVI', 'MP4']
MIN_MEG = 100
LIST_URL = '/api/transfer/list'
FOLDER_URL = '/api/folder/list'
BROWSE_URL = '/api/torrent/browse'
class Scraper(scraper.Scraper):
base_url = ''
base_name = 'Premiumize.me'
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
if kodi.get_setting('%s-use_https' % (self.__class__.base_name)) == 'true':
scheme = 'https'
prefix = 'www'
else:
scheme = 'http'
prefix = 'http'
base_url = kodi.get_setting('%s-base_url' % (self.__class__.base_name))
self.base_url = scheme + '://' + prefix + '.' + base_url
self.username = kodi.get_setting('%s-username' % (self.__class__.base_name))
self.password = kodi.get_setting('%s-password' % (self.__class__.base_name))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE, VIDEO_TYPES.EPISODE, VIDEO_TYPES.SEASON])
@classmethod
def get_name(cls):
return 'Premiumize.V2'
def get_sources(self, video):
hosters = []
source_url = self.get_url(video)
if not source_url or source_url == FORCE_NO_MATCH: return hosters
for stream in self.__get_videos(source_url, video):
if video.video_type == VIDEO_TYPES.EPISODE and not scraper_utils.release_check(video, stream['name']):
continue
host = scraper_utils.get_direct_hostname(self, stream['url'])
hoster = {'multi-part': False, 'class': self, 'views': None, 'url': stream['url'], 'rating': None, 'host': host, 'quality': stream['quality'], 'direct': True}
if 'size' in stream: hoster['size'] = scraper_utils.format_size(stream['size'])
if 'name' in stream: hoster['extra'] = stream['name']
hosters.append(hoster)
return hosters
def __get_videos(self, source_url, video):
videos = []
query = scraper_utils.parse_query(source_url)
if 'hash' in query:
url = scraper_utils.urljoin(self.base_url, BROWSE_URL)
js_data = self._http_get(url, params={'hash': query['hash']}, cache_limit=1)
if 'content' in js_data:
videos = self.__get_videos2(js_data['content'], video)
return videos
def __get_videos2(self, content, video):
videos = []
for key in content:
item = content[key]
if item['type'].lower() == 'dir':
videos += self.__get_videos2(item['children'], video)
else:
if item['ext'].upper() in VIDEO_EXT and ('size' not in item or int(item['size']) > (MIN_MEG * 1024 * 1024)):
temp_video = {'name': item['name'], 'url': item['url'], 'size': item['size']}
temp_video['quality'] = self.__get_quality(item, video)
videos.append(temp_video)
if 'transcoded' in item and item['transcoded']:
transcode = item['transcoded']
name = '(Transcode) %s' % (item['name'])
temp_video = {'name': name, 'url': transcode['url']}
temp_video['quality'] = self.__get_quality(transcode, video)
if 'size' in transcode: temp_video['size'] = transcode['size']
videos.append(temp_video)
return videos
def __get_quality(self, item, video):
if item.get('width'):
return scraper_utils.width_get_quality(item['width'])
elif item.get('height'):
return scraper_utils.height_get_quality(item['height'])
elif 'name' in item:
if video.video_type == VIDEO_TYPES.MOVIE:
meta = scraper_utils.parse_movie_link(item['name'])
else:
meta = scraper_utils.parse_episode_link(item['name'])
return scraper_utils.height_get_quality(meta['height'])
else:
return QUALITIES.HIGH
def get_url(self, video):
url = super(self.__class__, self).get_url(video)
# check each torrent to see if it's an episode if there is no season url
if url is None and video.video_type == VIDEO_TYPES.EPISODE:
if not scraper_utils.force_title(video):
for item in self.__get_torrents():
if scraper_utils.release_check(video, item['name']):
return 'hash=%s' % (item['hash'])
return url
def _get_episode_url(self, season_url, video):
query = scraper_utils.parse_query(season_url)
if 'hash' in query:
for stream in self.__get_videos(season_url, video):
if scraper_utils.release_check(video, stream['name']):
return season_url
def __get_torrents(self, folder_id=None):
torrents = []
url = scraper_utils.urljoin(self.base_url, FOLDER_URL)
if folder_id is not None:
url += '?id=%s' % (folder_id)
js_data = self._http_get(url, cache_limit=.001)
if 'content' in js_data:
for item in js_data['content']:
if item['type'] == 'folder':
torrents += self.__get_torrents(item['id'])
elif item['type'] == 'torrent':
torrents.append(item)
return torrents
def search(self, video_type, title, year, season=''):
results = []
norm_title = scraper_utils.normalize_title(title)
for item in self.__get_torrents():
if title or year or season:
is_season = re.search('(.*?{delim}season{delim}+(\d+)){delim}?(.*)'.format(delim=DELIM), item['name'], re.I)
if (not is_season and video_type == VIDEO_TYPES.SEASON) or (is_season and video_type == VIDEO_TYPES.MOVIE):
continue
if re.search('{delim}S\d+E\d+{delim}'.format(delim=DELIM), item['name'], re.I): continue # skip episodes
if video_type == VIDEO_TYPES.SEASON:
match_title, match_season, extra = is_season.groups()
if season and int(match_season) != int(season): continue
match_year = ''
match_title = re.sub(DELIM, ' ', match_title)
else:
match = re.search('(.*?)\(?(\d{4})\)?(.*)', item['name'])
if match:
match_title, match_year, extra = match.groups()
else:
match_title, match_year, extra = item['name'], '', ''
else:
match_title, match_year, extra = item['name'], '', ''
match_title = match_title.strip()
extra = extra.strip()
if norm_title in scraper_utils.normalize_title(match_title) and (not year or not match_year or year == match_year):
result_title = match_title
if extra: result_title += ' [%s]' % (extra)
result = {'title': result_title, 'year': match_year, 'url': 'hash=%s' % (item['hash'])}
results.append(result)
return results
@classmethod
def get_settings(cls):
name = cls.get_name()
settings = [
' <setting id="%s-enable" type="bool" label="%s %s" default="true" visible="true"/>' % (name, name, i18n('enabled')),
' <setting id="%s-sub_check" type="bool" label=" %s" default="false" visible="eq(-1,true)"/>' % (name, i18n('page_existence')),
]
return settings
def _http_get(self, url, params=None, data=None, allow_redirect=True, cache_limit=8):
if not self.username or not self.password:
return {}
if data is None: data = {}
data.update({'customer_id': self.username, 'pin': self.password})
result = super(self.__class__, self)._http_get(url, params=params, data=data, allow_redirect=allow_redirect, cache_limit=cache_limit)
js_result = scraper_utils.parse_json(result, url)
if 'status' in js_result and js_result['status'] == 'error':
logger.log('Premiumize V2 Scraper Error: %s - (%s)' % (url, js_result.get('message', 'Unknown Error')), log_utils.LOGWARNING)
js_result = {}
return js_result
| [
"[email protected]"
] | |
bc0c564fc708099ee3a1ee9245efc66093f51371 | 52cb25dca22292fce4d3907cc370098d7a57fcc2 | /BAEKJOON/스택/1874_스택 수열.py | cd390374565cc30f00a17f883e2ac40791b3a1f1 | [] | no_license | shjang1013/Algorithm | c4fc4c52cbbd3b7ecf063c716f600d1dbfc40d1a | 33f2caa6339afc6fc53ea872691145effbce0309 | refs/heads/master | 2022-09-16T12:02:53.146884 | 2022-08-31T16:29:04 | 2022-08-31T16:29:04 | 227,843,135 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | # 1부터 n까지의 수를 스택에 넣었다가 뽑아 늘어놓음으로써, 하나의 수열을 만들 수 있다.
# [1,2,3,4,5,6,7,8] => [4,3,6,8,7,5,2,1]
import sys
N = int(input())
stack = []
op = []
count = 1
temp = True
for i in range(N):
n = int(sys.stdin.readline())
while count <= n:
stack.append(count)
op.append("+")
count += 1
if stack[-1] == n:
stack.pop()
op.append("-")
else:
temp = False
break
if temp == False:
print("NO")
else:
print('\n'.join(op))
| [
"[email protected]"
] | |
bc7d8ecba2ea3d08d1b0d03ab497311104f63738 | ff93e108a358a40d71b426bb9615587dfcab4d03 | /Python_Basic/9_Class/class_basics_1.py | 626a48139eb7a1d686767ec3a31ac348d0fbd5a3 | [] | no_license | soumya9988/Python_Machine_Learning_Basics | 074ff0e8e55fd925ca50e0f9b56dba76fc93d187 | 3711bc8e618123420985d01304e13051d9fb13e0 | refs/heads/master | 2020-03-31T14:31:49.217429 | 2019-11-16T21:55:54 | 2019-11-16T21:55:54 | 152,298,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,299 | py | class Menu:
def __init__(self, name, items, start_time, end_time):
self.name = name
self.items = items
self.start_time = start_time
self.end_time = end_time
def __repr__(self):
return '{} menu available from {} to {}'.format(self.name,
self.start_time,
self.end_time)
def calculate_bill(self, purchased_items):
sum_of_items = 0
for item in purchased_items:
if item in self.items:
sum_of_items += self.items[item]
return sum_of_items
class Franchise:
def __init__(self, address, menus):
self.address = address
self.menus = menus
def __repr__(self):
return self.address
def available_menus(self, time):
available = []
for menu in self.menus:
if menu.start_time <= time and \
menu.end_time >= time:
available.append(menu.name)
return available
class Business:
def __init__(self, name, franchises):
self.name = name
self.franchises = franchises
items = {'pancakes': 7.50,
'waffles': 9.00,
'burger': 11.00,
'home fries': 4.50,
'coffee': 1.50,
'espresso': 3.00,
'tea': 1.00,
'mimosa': 10.50,
'orange juice': 3.50}
eb_items = {'salumeria plate': 8.00,
'salad and breadsticks (serves 2, no refills)': 14.00,
'pizza with quattro formaggi': 9.00,
'duck ragu': 17.50,
'mushroom ravioli (vegan)': 13.50,
'coffee': 1.50,
'espresso': 3.00,
}
d_items = {'crostini with eggplant caponata': 13.00,
'ceaser salad': 16.00,
'pizza with quattro formaggi': 11.00,
'duck ragu': 19.50,
'mushroom ravioli (vegan)': 13.50,
'coffee': 2.00,
'espresso': 3.00,
}
k_items = {'chicken nuggets': 6.50,
'fusilli with wild mushrooms': 12.00,
'apple juice': 3.00
}
brunch = Menu('brunch', items, 11.00, 16.00)
early_bird = Menu('early_bird', eb_items, 15.00, 18.00)
dinner = Menu('dinner', d_items, 17.00, 23.00)
kids = Menu('kids', k_items, 11.00, 21.00)
print(brunch)
print(early_bird)
print(dinner)
print(kids)
purchased = ['pancakes', 'home fries', 'coffee']
cost = brunch.calculate_bill(purchased)
print('Cost of brunch purchased: ', cost)
cost_eb = early_bird.calculate_bill(['mushroom ravioli (vegan)', 'salumeria plate'])
print('Cost of early bird purchased: ', cost_eb)
flagship_store = Franchise("1232 West End Road", [brunch, dinner, kids, early_bird])
new_installment = Franchise("12 East Mulberry Street", [brunch, dinner, kids, early_bird])
print('You can choose from the following menus at 12 pm: ', new_installment.available_menus(12.00))
print('You can choose from the following menus at 5 pm: ', new_installment.available_menus(17.00))
arepas_menu = {'arepa pabellon': 7.00, 'pernil arepa': 8.50, 'guayanes arepa': 8.00, 'jamon arepa': 7.50}
arepas_place = Franchise("189 Fitzgerald Avenue", arepas_menu)
arepas_business = Business("Take a' Arepa", arepas_place)
print(arepas_place)
| [
"[email protected]"
] | |
9efe909c265f82499d2be6a904c8fd902fed2bcb | 19236d9e966cf5bafbe5479d613a175211e1dd37 | /cohesity_management_sdk/models/google_cloud_credentials.py | 7e3886d7a27110ac94aca21b0b5ecde8f814ff97 | [
"MIT"
] | permissive | hemanshu-cohesity/management-sdk-python | 236c44fbd9604809027f8ddd0ae6c36e4e727615 | 07c5adee58810979780679065250d82b4b2cdaab | refs/heads/master | 2020-04-29T23:22:08.909550 | 2019-04-10T02:42:16 | 2019-04-10T02:42:16 | 176,474,523 | 0 | 0 | NOASSERTION | 2019-03-19T09:27:14 | 2019-03-19T09:27:12 | null | UTF-8 | Python | false | false | 3,125 | py | # -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
class GoogleCloudCredentials(object):
"""Implementation of the 'Google Cloud Credentials.' model.
Specifies the cloud credentials to connect to a Google service account.
Attributes:
client_email_address (string): Specifies the client email address used
to access Google Cloud Storage.
client_private_key (string): Specifies the private key used to access
Google Cloud Storage that is generated when the service account is
created.
project_id (string): Specifies the project id of an existing Google
Cloud project to store objects.
tier_type (TierType2Enum): Specifies the storage class of GCP.
GoogleTierType specifies the storage class for Google.
'kGoogleStandard' indicates a tier type of Google properties.
'kGoogleNearline' indicates a tier type of Google properties that
is not accessed frequently. 'kGoogleColdline' indicates a tier
type of Google properties that is rarely accessed.
'kGoogleRegional' indicates a tier type of Google properties that
stores frequently accessed data in the same region.
'kGoogleMultiRegional' indicates a tier type of Google properties
that is frequently accessed ("hot" objects) around the world.
"""
# Create a mapping from Model property names to API property names
_names = {
"client_email_address":'clientEmailAddress',
"client_private_key":'clientPrivateKey',
"project_id":'projectId',
"tier_type":'tierType'
}
def __init__(self,
client_email_address=None,
client_private_key=None,
project_id=None,
tier_type=None):
"""Constructor for the GoogleCloudCredentials class"""
# Initialize members of the class
self.client_email_address = client_email_address
self.client_private_key = client_private_key
self.project_id = project_id
self.tier_type = tier_type
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
client_email_address = dictionary.get('clientEmailAddress')
client_private_key = dictionary.get('clientPrivateKey')
project_id = dictionary.get('projectId')
tier_type = dictionary.get('tierType')
# Return an object of this model
return cls(client_email_address,
client_private_key,
project_id,
tier_type)
| [
"[email protected]"
] | |
7932c0ccbe52f6dff8961451ec9518ed9b1d0ba0 | c0d5b7f8e48a26c6ddc63c76c43ab5b397c00028 | /piccolo/apps/user/piccolo_app.py | c01ee635f12d335e6a45650fda81dbfe9fab4925 | [
"MIT"
] | permissive | aminalaee/piccolo | f6c5e5e1c128568f7ccb9ad1dfb4746acedae262 | af8d2d45294dcd84f4f9b6028752aa45b699ec15 | refs/heads/master | 2023-07-14T09:44:04.160116 | 2021-07-11T22:56:27 | 2021-07-11T22:56:27 | 386,398,401 | 0 | 0 | MIT | 2021-07-15T19:32:50 | 2021-07-15T19:08:17 | null | UTF-8 | Python | false | false | 729 | py | import os
from piccolo.conf.apps import AppConfig, Command
from .commands.change_password import change_password
from .commands.change_permissions import change_permissions
from .commands.create import create
from .tables import BaseUser
CURRENT_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
APP_CONFIG = AppConfig(
app_name="user",
migrations_folder_path=os.path.join(
CURRENT_DIRECTORY, "piccolo_migrations"
),
table_classes=[BaseUser],
migration_dependencies=[],
commands=[
Command(callable=create, aliases=["new"]),
Command(callable=change_password, aliases=["password", "pass"]),
Command(callable=change_permissions, aliases=["perm", "perms"]),
],
)
| [
"[email protected]"
] | |
db2cdc19635349844c5e850f4b577b0118d4ae0e | 29a4c1e436bc90deaaf7711e468154597fc379b7 | /modules/trigonometric/doc/fast_sind.py | 46fee4678b0eeccbe7f58327e5d8b321d06b825f | [
"BSL-1.0"
] | permissive | brycelelbach/nt2 | 31bdde2338ebcaa24bb76f542bd0778a620f8e7c | 73d7e8dd390fa4c8d251c6451acdae65def70e0b | refs/heads/master | 2021-01-17T12:41:35.021457 | 2011-04-03T17:37:15 | 2011-04-03T17:37:15 | 1,263,345 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,873 | py | [ ## this file was manually modified by jt
{
'functor' : {
'arity' : '1',
'call_types' : [],
'ret_arity' : '0',
'rturn' : {
'default' : 'typename boost::result_of<nt2::meta::floating(T)>::type',
},
'simd_types' : ['real_convert_'],
'special' : ['trigonometric'],
'type_defs' : [],
'types' : ['real_', 'unsigned_int_', 'signed_int_'],
},
'info' : 'manually modified',
'unit' : {
'global_header' : {
'first_stamp' : 'created by jt the 11/02/2011',
'included' : ['#include <nt2/toolbox/trigonometric/include/constants.hpp>', '#include <nt2/include/functions/sind.hpp>'],
'notes' : [],
'stamp' : 'modified by jt the 11/02/2011',
},
'ranges' : {
'default' : [['T(-45)', 'T(45)']],
'unsigned_int_' : [['0', 'T(45)']],
},
'specific_values' : {
'default' : {
'nt2::Zero<T>()' : {'result' : 'nt2::Zero<r_t>()','ulp_thresh' : '0.5',},
'nt2::_45<T>()' : {'result' : 'nt2::Sqrt_2o_2<r_t>()','ulp_thresh' : '0.5',},
},
'real_' : {
'-nt2::_180<T>()' : {'result' : 'nt2::Nan<r_t>()','ulp_thresh' : '0.5',},
'-nt2::_45<T>()' : {'result' : '-nt2::Sqrt_2o_2<r_t>()','ulp_thresh' : '0.5',},
'-nt2::_90<T>()' : {'result' : 'nt2::Nan<r_t>()','ulp_thresh' : '0.5',},
'nt2::Inf<T>()' : {'result' : 'nt2::Nan<r_t>()','ulp_thresh' : '0.5',},
'nt2::Minf<T>()' : {'result' : 'nt2::Nan<r_t>()','ulp_thresh' : '0.5',},
'nt2::Nan<T>()' : {'result' : 'nt2::Nan<r_t>()','ulp_thresh' : '0.5',},
'nt2::Zero<T>()' : {'result' : 'nt2::Zero<r_t>()','ulp_thresh' : '0.5',},
'nt2::_180<T>()' : {'result' : 'nt2::Nan<r_t>()','ulp_thresh' : '0.5',},
'nt2::_45<T>()' : {'result' : 'nt2::Sqrt_2o_2<r_t>()','ulp_thresh' : '0.5',},
'nt2::_90<T>()' : {'result' : 'nt2::Nan<r_t>()','ulp_thresh' : '0.5',},
},
'signed_int_' : {
'-nt2::_45<T>()' : {'result' : '-nt2::Sqrt_2o_2<r_t>()','ulp_thresh' : '0.5',},
'nt2::Zero<T>()' : {'result' : 'nt2::Zero<r_t>()','ulp_thresh' : '0.5',},
'nt2::_45<T>()' : {'result' : 'nt2::Sqrt_2o_2<r_t>()','ulp_thresh' : '0.5',},
},
},
'verif_test' : {
'property_call' : {
'real_' : ['nt2::fast_sind(a0)'],
},
'property_value' : {
'real_' : ['nt2::sind(a0)'],
},
'ulp_thresh' : {
'real_' : ['1.0'],
},
},
},
},
] | [
"[email protected]"
] | |
a937f5d7fc87c0d7d50c3d34d25169594f08b310 | 3ef70fe63acaa665e2b163f30f1abd0a592231c1 | /stackoverflow/venv/lib/python3.6/site-packages/twisted/protocols/haproxy/_v1parser.py | b17099f3cc388868573fb479110f29e78e8bce65 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | wistbean/learn_python3_spider | 14914b63691ac032955ba1adc29ad64976d80e15 | 40861791ec4ed3bbd14b07875af25cc740f76920 | refs/heads/master | 2023-08-16T05:42:27.208302 | 2023-03-30T17:03:58 | 2023-03-30T17:03:58 | 179,152,420 | 14,403 | 3,556 | MIT | 2022-05-20T14:08:34 | 2019-04-02T20:19:54 | Python | UTF-8 | Python | false | false | 4,326 | py | # -*- test-case-name: twisted.protocols.haproxy.test.test_v1parser -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
IProxyParser implementation for version one of the PROXY protocol.
"""
from zope.interface import implementer
from twisted.internet import address
from ._exceptions import (
convertError, InvalidProxyHeader, InvalidNetworkProtocol,
MissingAddressData
)
from . import _info
from . import _interfaces
@implementer(_interfaces.IProxyParser)
class V1Parser(object):
"""
PROXY protocol version one header parser.
Version one of the PROXY protocol is a human readable format represented
by a single, newline delimited binary string that contains all of the
relevant source and destination data.
"""
PROXYSTR = b'PROXY'
UNKNOWN_PROTO = b'UNKNOWN'
TCP4_PROTO = b'TCP4'
TCP6_PROTO = b'TCP6'
ALLOWED_NET_PROTOS = (
TCP4_PROTO,
TCP6_PROTO,
UNKNOWN_PROTO,
)
NEWLINE = b'\r\n'
def __init__(self):
self.buffer = b''
def feed(self, data):
"""
Consume a chunk of data and attempt to parse it.
@param data: A bytestring.
@type data: L{bytes}
@return: A two-tuple containing, in order, a
L{_interfaces.IProxyInfo} and any bytes fed to the
parser that followed the end of the header. Both of these values
are None until a complete header is parsed.
@raises InvalidProxyHeader: If the bytes fed to the parser create an
invalid PROXY header.
"""
self.buffer += data
if len(self.buffer) > 107 and self.NEWLINE not in self.buffer:
raise InvalidProxyHeader()
lines = (self.buffer).split(self.NEWLINE, 1)
if not len(lines) > 1:
return (None, None)
self.buffer = b''
remaining = lines.pop()
header = lines.pop()
info = self.parse(header)
return (info, remaining)
@classmethod
def parse(cls, line):
"""
Parse a bytestring as a full PROXY protocol header line.
@param line: A bytestring that represents a valid HAProxy PROXY
protocol header line.
@type line: bytes
@return: A L{_interfaces.IProxyInfo} containing the parsed data.
@raises InvalidProxyHeader: If the bytestring does not represent a
valid PROXY header.
@raises InvalidNetworkProtocol: When no protocol can be parsed or is
not one of the allowed values.
@raises MissingAddressData: When the protocol is TCP* but the header
does not contain a complete set of addresses and ports.
"""
originalLine = line
proxyStr = None
networkProtocol = None
sourceAddr = None
sourcePort = None
destAddr = None
destPort = None
with convertError(ValueError, InvalidProxyHeader):
proxyStr, line = line.split(b' ', 1)
if proxyStr != cls.PROXYSTR:
raise InvalidProxyHeader()
with convertError(ValueError, InvalidNetworkProtocol):
networkProtocol, line = line.split(b' ', 1)
if networkProtocol not in cls.ALLOWED_NET_PROTOS:
raise InvalidNetworkProtocol()
if networkProtocol == cls.UNKNOWN_PROTO:
return _info.ProxyInfo(originalLine, None, None)
with convertError(ValueError, MissingAddressData):
sourceAddr, line = line.split(b' ', 1)
with convertError(ValueError, MissingAddressData):
destAddr, line = line.split(b' ', 1)
with convertError(ValueError, MissingAddressData):
sourcePort, line = line.split(b' ', 1)
with convertError(ValueError, MissingAddressData):
destPort = line.split(b' ')[0]
if networkProtocol == cls.TCP4_PROTO:
return _info.ProxyInfo(
originalLine,
address.IPv4Address('TCP', sourceAddr, int(sourcePort)),
address.IPv4Address('TCP', destAddr, int(destPort)),
)
return _info.ProxyInfo(
originalLine,
address.IPv6Address('TCP', sourceAddr, int(sourcePort)),
address.IPv6Address('TCP', destAddr, int(destPort)),
)
| [
"[email protected]"
] | |
60bd09afebd2a97319aa608a6ee44a7fd37b29a0 | 53bd30eee243a73bf19921739454a177a8bab127 | /excapp/migrations/0002_datahistory.py | 31da2844c89998a546d9f612985ee6253eeef234 | [] | no_license | kirigaikabuto/bck | 61697fbe2edd7e4f5b866628a368693a05f6dad9 | 2b17f8c5d438248d73aaf9dbebd3d5dea827a42d | refs/heads/master | 2021-02-04T01:20:06.470527 | 2021-01-04T15:24:13 | 2021-01-04T15:24:13 | 243,593,240 | 0 | 0 | null | 2020-06-06T01:30:15 | 2020-02-27T18:55:15 | Python | UTF-8 | Python | false | false | 745 | py | # Generated by Django 2.2.10 on 2020-11-24 18:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('excapp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='DataHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('child', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='child', to='excapp.Data')),
('parent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='parent', to='excapp.Data')),
],
),
]
| [
"[email protected]"
] | |
095a21f6e7e82a88a8fae7ce3fa8b0c6adccd2dd | dd7cd2d6f613cb7ed1fd1fb917cb351e19db46dd | /week8/codingbat/list-1/make_pi.py | 009f45f2d14784488bdf27cd4bd54aa591d2318f | [] | no_license | ashkeyevli/webdevelopment | 0da187b25f669ff00c2de9662d5d28cde6ad270c | 57f32b384af80780f2578f109357c9451c7fc840 | refs/heads/master | 2023-01-24T03:14:08.574052 | 2020-04-18T02:19:08 | 2020-04-18T02:19:08 | 239,058,404 | 1 | 0 | null | 2023-01-07T15:29:57 | 2020-02-08T02:40:35 | Python | UTF-8 | Python | false | false | 32 | py |
def make_pi():
return [3,1,4]
| [
"[email protected]"
] | |
f9b7ee977762ae5cc9961537a33f3dde790fefac | d24a6e0be809ae3af8bc8daa6dacfc1789d38a84 | /ABC/ABC251-300/ABC261/B.py | 82b39fd3459c8526f81da79d543af2d50a7983b2 | [] | no_license | k-harada/AtCoder | 5d8004ce41c5fc6ad6ef90480ef847eaddeea179 | 02b0a6c92a05c6858b87cb22623ce877c1039f8f | refs/heads/master | 2023-08-21T18:55:53.644331 | 2023-08-05T14:21:25 | 2023-08-05T14:21:25 | 184,904,794 | 9 | 0 | null | 2023-05-22T16:29:18 | 2019-05-04T14:24:18 | Python | UTF-8 | Python | false | false | 547 | py | def solve(n, a):
for i in range(n - 1):
for j in range(i + 1, n):
ij = a[i][j]
ji = a[j][i]
if ij == ji == "D":
continue
elif ij == "W" and ji == "L":
continue
elif ij == "L" and ji == "W":
continue
else:
return "incorrect"
return "correct"
def main():
n = int(input())
a = [list(input()) for _ in range(n)]
res = solve(n, a)
print(res)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
604783e523d181a3faade46c376579c5a709636a | 26cc5db81f589bf04c1d6f5e69d4727466af3f5f | /server/api/__init__.py | 93a582e7ee0efeb1854e5a3d351c63126c4d386d | [
"MIT"
] | permissive | CSCDP/family-context-api | 1c7f6834d2e004dc48622dde68b2f3fc9157ee99 | ac3415cdc1ef649c10c85012a2bb2b24ab1d009e | refs/heads/master | 2023-01-08T23:33:20.980857 | 2021-02-18T11:14:03 | 2021-02-18T11:14:03 | 213,444,879 | 3 | 1 | MIT | 2022-12-12T08:53:24 | 2019-10-07T17:26:44 | Python | UTF-8 | Python | false | false | 48 | py | from .auth_controller import check_cookie_auth
| [
"[email protected]"
] | |
b19e296e14053d689e9995c0c3db2c31aae5ef6f | 1c3fb3c990bd07259c1701c709a28ec45cd0c748 | /services/core-api/app/api/exports/response_models.py | 487ed88be6d03eb97e804a09afb57dc551a1dd8e | [
"Apache-2.0"
] | permissive | usingtechnology/mds | f973106232f73f773bb4bb57737094dd32b1bd3c | c9c542f729df21511ee46e184ea752bad0b7d10c | refs/heads/master | 2022-04-13T07:56:59.060216 | 2020-03-21T22:43:05 | 2020-03-21T22:43:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,537 | py | from app.extensions import api
from flask_restplus import fields
from app.api.mines.response_models import MINE_TENURE_TYPE_CODE_MODEL, MINE_COMMODITY_CODE_MODEL, MINE_DISTURBANCE_CODE_MODEL, MINE_STATUS_CODE_MODEL, MINE_REGION_OPTION, MINE_REPORT_DEFINITION_CATEGORIES, MINE_REPORT_DEFINITION_MODEL, MINE_REPORT_SUBMISSION_STATUS
from app.api.mines.permits.response_models import PERMIT_STATUS_CODE_MODEL
from app.api.compliance.response_models import COMPLIANCE_ARTICLE_MODEL
from app.api.incidents.response_models import MINE_INCIDENT_CATEGORY_MODEL, MINE_INCIDENT_DETERMINATION_TYPE_MODEL, MINE_INCIDENT_STATUS_CODE_MODEL, MINE_INCIDENT_DOCUMENT_TYPE_CODE_MODEL, MINE_INCIDENT_FOLLOWUP_INVESTIGATION_TYPE_MODEL
from app.api.parties.response_models import MINE_PARTY_APPT_TYPE_MODEL, SUB_DIVISION_CODE_MODEL
from app.api.variances.response_models import VARIANCE_APPLICATION_STATUS_CODE, VARIANCE_DOCUMENT_CATEGORY_CODE
from app.api.now_applications.response_models import NOW_APPLICATION_DOCUMENT_TYPE_MODEL, NOW_APPLICATION_REVIEW_TYPES, NOW_APPLICATION_TYPES, UNIT_TYPES, NOW_ACTIVITY_TYPES, NOW_APPLICATION_STATUS_CODES, UNDERGROUND_EXPLORATION_TYPES, NOW_APPLICATION_PERMIT_TYPES, NOW_APPLICATION_REVIEW_TYPES, APPLICATION_PROGRESS_STATUS_CODES
STATIC_CONTENT_MODEL = api.model(
'StaticContentModel', {
'mineDisturbanceOptions':
fields.List(fields.Nested(MINE_DISTURBANCE_CODE_MODEL), attribute='MineDisturbanceCode'),
'mineCommodityOptions':
fields.List(fields.Nested(MINE_COMMODITY_CODE_MODEL), attribute='MineCommodityCode'),
'mineStatusOptions':
fields.List(fields.Nested(MINE_STATUS_CODE_MODEL), attribute='MineStatusXref'),
'mineRegionOptions':
fields.List(fields.Nested(MINE_REGION_OPTION), attribute='MineRegionCode'),
'mineTenureTypes':
fields.List(fields.Nested(MINE_TENURE_TYPE_CODE_MODEL), attribute='MineTenureTypeCode'),
'permitStatusCodes':
fields.List(fields.Nested(PERMIT_STATUS_CODE_MODEL), attribute='PermitStatusCode'),
'incidentDocumentTypeOptions':
fields.List(
fields.Nested(MINE_INCIDENT_DOCUMENT_TYPE_CODE_MODEL),
attribute='MineIncidentDocumentTypeCode'),
'incidentFollowupActionOptions':
fields.List(
fields.Nested(MINE_INCIDENT_FOLLOWUP_INVESTIGATION_TYPE_MODEL),
attribute='MineIncidentFollowupInvestigationType'),
'incidentDeterminationOptions':
fields.List(
fields.Nested(MINE_INCIDENT_DETERMINATION_TYPE_MODEL),
attribute='MineIncidentDeterminationType'),
'incidentStatusCodeOptions':
fields.List(
fields.Nested(MINE_INCIDENT_STATUS_CODE_MODEL), attribute='MineIncidentStatusCode'),
'incidentCategoryCodeOptions':
fields.List(fields.Nested(MINE_INCIDENT_CATEGORY_MODEL), attribute='MineIncidentCategory'),
'provinceOptions':
fields.List(fields.Nested(SUB_DIVISION_CODE_MODEL), attribute='SubDivisionCode'),
'complianceCodes':
fields.List(fields.Nested(COMPLIANCE_ARTICLE_MODEL), attribute='ComplianceArticle'),
'varianceStatusOptions':
fields.List(
fields.Nested(VARIANCE_APPLICATION_STATUS_CODE),
attribute='VarianceApplicationStatusCode'),
'varianceDocumentCategoryOptions':
fields.List(
fields.Nested(VARIANCE_DOCUMENT_CATEGORY_CODE),
attribute='VarianceDocumentCategoryCode'),
'mineReportDefinitionOptions':
fields.List(fields.Nested(MINE_REPORT_DEFINITION_MODEL), attribute='MineReportDefinition'),
'mineReportStatusOptions':
fields.List(
fields.Nested(MINE_REPORT_SUBMISSION_STATUS),
attribute='MineReportSubmissionStatusCode'),
'mineReportCategoryOptions':
fields.List(
fields.Nested(MINE_REPORT_DEFINITION_CATEGORIES), attribute='MineReportCategory'),
'noticeOfWorkActivityTypeOptions':
fields.List(fields.Nested(NOW_ACTIVITY_TYPES), attribute='ActivityType'),
'noticeOfWorkUnitTypeOptions':
fields.List(fields.Nested(UNIT_TYPES), attribute='UnitType'),
'noticeOfWorkApplicationTypeOptions':
fields.List(fields.Nested(NOW_APPLICATION_TYPES), attribute='NOWApplicationType'),
'noticeOfWorkApplicationStatusOptions':
fields.List(fields.Nested(NOW_APPLICATION_STATUS_CODES), attribute='NOWApplicationStatus'),
'noticeOfWorkApplicationDocumentTypeOptions':
fields.List(
fields.Nested(NOW_APPLICATION_DOCUMENT_TYPE_MODEL),
attribute='NOWApplicationDocumentType'),
'noticeOfWorkUndergroundExplorationTypeOptions':
fields.List(
fields.Nested(UNDERGROUND_EXPLORATION_TYPES), attribute='UndergroundExplorationType'),
'noticeOfWorkApplicationProgressStatusCodeOptions':
fields.List(
fields.Nested(APPLICATION_PROGRESS_STATUS_CODES),
attribute='NOWApplicationProgressStatus'),
'noticeOfWorkApplicationPermitTypeOptions':
fields.List(
fields.Nested(NOW_APPLICATION_PERMIT_TYPES), attribute='NOWApplicationPermitType'),
'noticeOfWorkApplicationReviewOptions':
fields.List(
fields.Nested(NOW_APPLICATION_REVIEW_TYPES), attribute='NOWApplicationReviewType'),
'partyRelationshipTypes':
fields.List(
fields.Nested(MINE_PARTY_APPT_TYPE_MODEL), attribute='MinePartyAppointmentType')
})
| [
"[email protected]"
] | |
3c46e5cd8e2c42fe17964f81fddb273c2e6424fc | debc9ddbb577ed68e907cfdb85e0f2c801fdc8af | /rx/linq/observable/onerrorresumenext.py | 490579fe1b84b19f1004b2c54d19d968ec1a33e2 | [
"Apache-2.0"
] | permissive | pstiasny/RxPY | 370eefc733de1241c1eac0dcdf8fa10780f71072 | 2bcf25ecbce1fbcd49d119bd73375572fbf9df5a | refs/heads/master | 2021-01-09T20:01:18.664034 | 2014-07-27T22:19:39 | 2014-07-27T22:19:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,487 | py | from six import add_metaclass
from rx.observable import Observable
from rx.anonymousobservable import AnonymousObservable
from rx.disposables import CompositeDisposable, SingleAssignmentDisposable, \
SerialDisposable
from rx.concurrency import immediate_scheduler
from rx.internal import ExtensionMethod
@add_metaclass(ExtensionMethod)
class ObservableOnErrorResumeNext(Observable):
def __init__(self, subscribe):
self.on_error_resume_next = self.__on_error_resume_next
def __on_error_resume_next(self, second):
"""Continues an observable sequence that is terminated normally or by
an exception with the next observable sequence.
Keyword arguments:
second -- Second observable sequence used to produce results after the first sequence terminates.
Returns an observable sequence that concatenates the first and second sequence, even if the first sequence terminates exceptionally.
"""
if not second:
raise Exception('Second observable is required')
return Observable.on_error_resume_next([self, second])
@classmethod
def on_error_resume_next(cls, *args):
"""Continues an observable sequence that is terminated normally or by
an exception with the next observable sequence.
1 - res = Observable.on_error_resume_next(xs, ys, zs)
2 - res = Observable.on_error_resume_next([xs, ys, zs])
Returns an observable sequence that concatenates the source sequences,
even if a sequence terminates exceptionally.
"""
if args and isinstance(args[0], list):
sources = args[0]
else:
sources = list(args)
def subscribe(observer):
subscription = SerialDisposable()
pos = [0]
def action(this, state=None):
if pos[0] < len(sources):
current = sources[pos[0]]
pos[0] += 1
d = SingleAssignmentDisposable()
subscription.disposable = d
d.disposable = current.subscribe(observer.on_next, lambda ex: this(), lambda: this())
else:
observer.on_completed()
cancelable = immediate_scheduler.schedule_recursive(action)
return CompositeDisposable(subscription, cancelable)
return AnonymousObservable(subscribe)
| [
"[email protected]"
] | |
0f8e34d48b3d1c84947d5930430793223a87c3ef | 5b1e3abd07c4c048e429e0c58c2319e947ab8ffa | /lbforum/__init__.py | 9107c2bfe8fffe679cdfee154b7e961e9f59fd29 | [
"BSD-3-Clause"
] | permissive | MechanisM/LBForum | bdd84890faf28a5a7343fe4c8f3029a0423a9e69 | 5be3aedbba5c4974abc10c8bde245502e7484681 | refs/heads/master | 2021-01-16T23:14:48.432494 | 2011-08-05T13:57:43 | 2011-08-05T13:57:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24 | py | __version__ = '0.9.13'
| [
"[email protected]"
] | |
b6ebbb47ce8ed3feb705ac92c37cae8fce6f828d | 842184bc3c73bef3dd5c2ab523eb33f34b7809ea | /ledger_processor/test_ledger_processor.py | 0f1549e715a92ae4a28e218d752a9cddb2608f8c | [] | no_license | fawkesley/h-work-simulation | 1cb51515fcb57d1f12c13178b049c4e7f8d1702d | 3f150d773a73a2dc2646e7b9c102f298e26cb936 | refs/heads/master | 2021-05-29T22:12:30.148431 | 2015-06-12T16:53:32 | 2015-06-13T07:51:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,060 | py | import datetime
from decimal import Decimal
from os.path import dirname, join as pjoin
from nose.tools import assert_equal, assert_raises
from .ledger_processor import LedgerProcessor
EXAMPLE_LEDGER_FILENAME = pjoin(dirname(__file__), 'example_ledger.csv')
TEST_CASES = [
('john', datetime.date(2015, 1, 16), Decimal('0.00')),
('mary', datetime.date(2015, 1, 16), Decimal('0.00')),
('supermarket', datetime.date(2015, 1, 16), Decimal('0.00')),
('insurance', datetime.date(2015, 1, 16), Decimal('0.00')),
('mary', datetime.date(2015, 1, 17), Decimal('125.00')),
('john', datetime.date(2015, 1, 17), Decimal('-125.00')),
('john', datetime.date(2015, 1, 18), Decimal('-145.00')),
('supermarket', datetime.date(2015, 1, 18), Decimal('20.00')),
('mary', datetime.date(2015, 1, 18), Decimal('25.00')),
('insurance', datetime.date(2015, 1, 18), Decimal('100.00')),
]
def test_get_balance():
for account, test_date, expected_balance in TEST_CASES:
yield _assert_balance_equal, account, test_date, expected_balance
def _assert_balance_equal(account, test_date, expected_balance):
with open(EXAMPLE_LEDGER_FILENAME, 'r') as f:
ledger = LedgerProcessor(f)
got_balance = ledger.get_balance(account, test_date)
assert_equal(expected_balance, got_balance)
def test_get_all_balances():
with open(EXAMPLE_LEDGER_FILENAME, 'r') as f:
ledger = LedgerProcessor(f)
final_balances = ledger.get_all_balances(datetime.date(2015, 1, 18))
expected_final_balances = {
'john': Decimal('-145.00'),
'mary': Decimal('25.00'),
'supermarket': Decimal('20.00'),
'insurance': Decimal('100.00'),
}
assert_equal(expected_final_balances, final_balances)
def test_ledger_cant_be_used_twice():
with open(EXAMPLE_LEDGER_FILENAME, 'r') as f:
ledger = LedgerProcessor(f)
def use_ledger():
ledger.get_all_balances(datetime.date(2015, 1, 18))
use_ledger()
assert_raises(RuntimeError, use_ledger)
| [
"[email protected]"
] | |
6aa3828a32163e80c8d12eaeda888341881c4a0d | f26322b62be667711ca13f3ea73d89b79011459b | /py/models/__init__.py | b4d9c72fe94e27e7ca8052d7c88a8308c3887b88 | [
"Apache-2.0"
] | permissive | zjZSTU/SPP-net | c37c3dcd05bd5fb04cf7ef7efda4ac7dfdc5f89d | e42166dddd37b9493a5aacef18303aa850b21be7 | refs/heads/master | 2022-07-09T22:46:16.254859 | 2020-04-04T15:09:19 | 2020-04-04T15:09:19 | 250,167,176 | 3 | 2 | Apache-2.0 | 2022-06-22T01:36:06 | 2020-03-26T05:12:56 | Python | UTF-8 | Python | false | false | 109 | py | # -*- coding: utf-8 -*-
"""
@date: 2020/3/26 下午4:20
@file: __init__.py.py
@author: zj
@description:
""" | [
"[email protected]"
] | |
d4fdbed76383d85a2fa0b5a4b0e41628313a2037 | 080bbe77da955b3917435c25fc63b90b0f3c724e | /botorch/test_functions/multi_objective.py | 5ea244ee18797620b86ffb02739a044648f09561 | [
"MIT"
] | permissive | irinaespejo/botorch | 3d15d962ff0f5bb34fbd11b2eb7549db755af705 | e4dcf603fdaf83f0e5f8b9b392f943c89dfff7eb | refs/heads/master | 2023-07-11T18:02:11.853790 | 2021-08-19T15:57:21 | 2021-08-19T15:58:12 | 316,017,084 | 0 | 0 | MIT | 2020-11-25T18:02:11 | 2020-11-25T18:02:09 | null | UTF-8 | Python | false | false | 25,982 | py | #! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Multi-objective optimization benchmark problems.
References
.. [Deb2005dtlz]
K. Deb, L. Thiele, M. Laumanns, E. Zitzler, A. Abraham, L. Jain, R. Goldberg.
"Scalable test problems for evolutionary multi-objective optimization"
in Evolutionary Multiobjective Optimization, London, U.K.: Springer-Verlag,
pp. 105-145, 2005.
.. [Deb2005robust]
K. Deb, H. Gupta. "Searching for Robust Pareto-Optimal Solutions in
Multi-objective Optimization" in Evolutionary Multi-Criterion Optimization,
Springer-Berlin, pp. 150-164, 2005.
.. [GarridoMerchan2020]
E. C. Garrido-Merch ́an and D. Hern ́andez-Lobato. Parallel Predictive Entropy
Search for Multi-objective Bayesian Optimization with Constraints.
arXiv e-prints, arXiv:2004.00601, Apr. 2020.
.. [Gelbart2014]
Michael A. Gelbart, Jasper Snoek, and Ryan P. Adams. 2014. Bayesian
optimization with unknown constraints. In Proceedings of the Thirtieth
Conference on Uncertainty in Artificial Intelligence (UAI’14).
AUAI Press, Arlington, Virginia, USA, 250–259.
.. [Oszycka1995]
A. Osyczka, S. Kundu. 1995. A new method to solve generalized multicriteria
optimization problems using the simple genetic algorithm. In Structural
Optimization 10. 94–99.
.. [Tanabe2020]
Ryoji Tanabe, Hisao Ishibuchi, An easy-to-use real-world multi-objective
optimization problem suite, Applied Soft Computing,Volume 89, 2020.
.. [Yang2019a]
K. Yang, M. Emmerich, A. Deutz, and T. Bäck. 2019.
"Multi-Objective Bayesian Global Optimization using expected hypervolume
improvement gradient" in Swarm and evolutionary computation 44, pp. 945--956,
2019.
.. [Zitzler2000]
E. Zitzler, K. Deb, and L. Thiele, “Comparison of multiobjective
evolutionary algorithms: Empirical results,” Evol. Comput., vol. 8, no. 2,
pp. 173–195, 2000.
"""
from __future__ import annotations
import math
from abc import ABC, abstractmethod
from typing import Optional
import torch
from botorch.test_functions.base import (
ConstrainedBaseTestProblem,
MultiObjectiveTestProblem,
)
from botorch.test_functions.synthetic import Branin
from botorch.utils.sampling import sample_hypersphere, sample_simplex
from botorch.utils.transforms import unnormalize
from scipy.special import gamma
from torch import Tensor
class BraninCurrin(MultiObjectiveTestProblem):
r"""Two objective problem composed of the Branin and Currin functions.
Branin (rescaled):
f(x) = (
15*x_1 - 5.1 * (15 * x_0 - 5) ** 2 / (4 * pi ** 2) + 5 * (15 * x_0 - 5)
/ pi - 5
) ** 2 + (10 - 10 / (8 * pi)) * cos(15 * x_0 - 5))
Currin:
f(x) = (1 - exp(-1 / (2 * x_1))) * (
2300 * x_0 ** 3 + 1900 * x_0 ** 2 + 2092 * x_0 + 60
) / 100 * x_0 ** 3 + 500 * x_0 ** 2 + 4 * x_0 + 20
"""
dim = 2
num_objectives = 2
_bounds = [(0.0, 1.0), (0.0, 1.0)]
_ref_point = [18.0, 6.0]
_max_hv = 59.36011874867746 # this is approximated using NSGA-II
def __init__(self, noise_std: Optional[float] = None, negate: bool = False) -> None:
r"""Constructor for Branin-Currin.
Args:
noise_std: Standard deviation of the observation noise.
negate: If True, negate the objectives.
"""
super().__init__(noise_std=noise_std, negate=negate)
self._branin = Branin()
def _rescaled_branin(self, X: Tensor) -> Tensor:
# return to Branin bounds
x_0 = 15 * X[..., 0] - 5
x_1 = 15 * X[..., 1]
return self._branin(torch.stack([x_0, x_1], dim=-1))
@staticmethod
def _currin(X: Tensor) -> Tensor:
x_0 = X[..., 0]
x_1 = X[..., 1]
factor1 = 1 - torch.exp(-1 / (2 * x_1))
numer = 2300 * x_0.pow(3) + 1900 * x_0.pow(2) + 2092 * x_0 + 60
denom = 100 * x_0.pow(3) + 500 * x_0.pow(2) + 4 * x_0 + 20
return factor1 * numer / denom
def evaluate_true(self, X: Tensor) -> Tensor:
# branin rescaled with inputsto [0,1]^2
branin = self._rescaled_branin(X=X)
currin = self._currin(X=X)
return torch.stack([branin, currin], dim=-1)
class DH(MultiObjectiveTestProblem, ABC):
r"""Base class for DH problems for robust multi-objective optimization.
In their paper, [Deb2005robust]_ consider these problems under a mean-robustness
setting, and use uniformly distributed input perturbations from the box with
edge lengths `delta_0 = delta`, `delta_i = 2 * delta, i > 0`, with `delta` ranging
up to `0.01` for DH1 and DH2, and `delta = 0.03` for DH3 and DH4.
These are d-dimensional problems with two objectives:
f_0(x) = x_0
f_1(x) = h(x) + g(x) * S(x) for DH1 and DH2
f_1(x) = h(x) * (g(x) + S(x)) for DH3 and DH4
The goal is to minimize both objectives. See [Deb2005robust]_ for more details
on DH. The reference points were set using `infer_reference_point`.
"""
num_objectives = 2
_ref_point: float = [1.1, 1.1]
_x_1_lb: float
_area_under_curve: float
_min_dim: int
def __init__(
self,
dim: int,
noise_std: Optional[float] = None,
negate: bool = False,
) -> None:
if dim < self._min_dim:
raise ValueError(f"dim must be >= {self._min_dim}, but got dim={dim}!")
self.dim = dim
self._bounds = [(0.0, 1.0), (self._x_1_lb, 1.0)] + [
(-1.0, 1.0) for _ in range(dim - 2)
]
# max_hv is the area of the box minus the area of the curve formed by the PF.
self._max_hv = self._ref_point[0] * self._ref_point[1] - self._area_under_curve
super().__init__(noise_std=noise_std, negate=negate)
@abstractmethod
def _h(self, X: Tensor) -> Tensor:
pass # pragma: no cover
@abstractmethod
def _g(self, X: Tensor) -> Tensor:
pass # pragma: no cover
@abstractmethod
def _S(self, X: Tensor) -> Tensor:
pass # pragma: no cover
class DH1(DH):
r"""DH1 test problem.
d-dimensional problem evaluated on `[0, 1] x [-1, 1]^{d-1}`:
f_0(x) = x_0
f_1(x) = h(x_0) + g(x) * S(x_0)
h(x_0) = 1 - x_0^2
g(x) = \sum_{i=1}^{d-1} (10 + x_i^2 - 10 * cos(4 * pi * x_i))
S(x_0) = alpha / (0.2 + x_0) + beta * x_0^2
where alpha = 1 and beta = 1.
The Pareto front corresponds to the equation `f_1 = 1 - f_0^2`, and it is found at
`x_i = 0` for `i > 0` and any value of `x_0` in `(0, 1]`.
"""
alpha = 1.0
beta = 1.0
_x_1_lb = -1.0
_area_under_curve = 2.0 / 3.0
_min_dim = 2
def _h(self, X: Tensor) -> Tensor:
return 1 - X[..., 0].pow(2)
def _g(self, X: Tensor) -> Tensor:
x_1_to = X[..., 1:]
return torch.sum(
10 + x_1_to.pow(2) - 10 * torch.cos(4 * math.pi * x_1_to),
dim=-1,
)
def _S(self, X: Tensor) -> Tensor:
x_0 = X[..., 0]
return self.alpha / (0.2 + x_0) + self.beta * x_0.pow(2)
def evaluate_true(self, X: Tensor) -> Tensor:
f_0 = X[..., 0]
# This may encounter 0 / 0, which we set to 0.
f_1 = self._h(X) + torch.nan_to_num(self._g(X) * self._S(X))
return torch.stack([f_0, f_1], dim=-1)
class DH2(DH1):
r"""DH2 test problem.
This is identical to DH1 except for having `beta = 10.0`.
"""
beta = 10.0
class DH3(DH):
r"""DH3 test problem.
d-dimensional problem evaluated on `[0, 1]^2 x [-1, 1]^{d-2}`:
f_0(x) = x_0
f_1(x) = h(x_1) * (g(x) + S(x_0))
h(x_1) = 2 - 0.8 * exp(-((x_1 - 0.35) / 0.25)^2) - exp(-((x_1 - 0.85) / 0.03)^2)
g(x) = \sum_{i=2}^{d-1} (50 * x_i^2)
S(x_0) = 1 - sqrt(x_0)
The Pareto front is found at `x_i = 0` for `i > 1`. There's a local and a global
Pareto front, which are found at `x_1 = 0.35` and `x_1 = 0.85`, respectively.
The approximate relationships between the objectives at local and global Pareto
fronts are given by `f_1 = 1.2 (1 - sqrt(f_0))` and `f_1 = 1 - f_0`, respectively.
The specific values on the Pareto fronts can be found by varying `x_0`.
"""
_x_1_lb = 0.0
_area_under_curve = 0.328449169794718
_min_dim = 3
@staticmethod
def _exp_args(x: Tensor) -> Tensor:
exp_arg_1 = -((x - 0.35) / 0.25).pow(2)
exp_arg_2 = -((x - 0.85) / 0.03).pow(2)
return exp_arg_1, exp_arg_2
def _h(self, X: Tensor) -> Tensor:
exp_arg_1, exp_arg_2 = self._exp_args(X[..., 1])
return 2 - 0.8 * torch.exp(exp_arg_1) - torch.exp(exp_arg_2)
def _g(self, X: Tensor) -> Tensor:
return 50 * X[..., 2:].pow(2).sum(dim=-1)
def _S(self, X: Tensor) -> Tensor:
return 1 - X[..., 0].sqrt()
def evaluate_true(self, X: Tensor) -> Tensor:
f_0 = X[..., 0]
f_1 = self._h(X) * (self._g(X) + self._S(X))
return torch.stack([f_0, f_1], dim=-1)
class DH4(DH3):
r"""DH4 test problem.
This is similar to DH3 except that it is evaluated on
`[0, 1] x [-0.15, 1] x [-1, 1]^{d-2}` and:
h(x_0, x_1) = 2 - x_0 - 0.8 * exp(-((x_0 + x_1 - 0.35) / 0.25)^2)
- exp(-((x_0 + x_1 - 0.85) / 0.03)^2)
The Pareto front is found at `x_i = 0` for `i > 2`, with the local one being
near `x_0 + x_1 = 0.35` and the global one near `x_0 + x_1 = 0.85`.
"""
_x_1_lb = -0.15
_area_under_curve = 0.22845
def _h(self, X: Tensor) -> Tensor:
exp_arg_1, exp_arg_2 = self._exp_args(X[..., :2].sum(dim=-1))
return 2 - X[..., 0] - 0.8 * torch.exp(exp_arg_1) - torch.exp(exp_arg_2)
class DTLZ(MultiObjectiveTestProblem):
r"""Base class for DTLZ problems.
See [Deb2005dtlz]_ for more details on DTLZ.
"""
def __init__(
self,
dim: int,
num_objectives: int = 2,
noise_std: Optional[float] = None,
negate: bool = False,
) -> None:
if dim <= num_objectives:
raise ValueError(
f"dim must be > num_objectives, but got {dim} and {num_objectives}."
)
self.num_objectives = num_objectives
self.dim = dim
self.k = self.dim - self.num_objectives + 1
self._bounds = [(0.0, 1.0) for _ in range(self.dim)]
self._ref_point = [self._ref_val for _ in range(num_objectives)]
super().__init__(noise_std=noise_std, negate=negate)
class DTLZ1(DTLZ):
r"""DLTZ1 test problem.
d-dimensional problem evaluated on `[0, 1]^d`:
f_0(x) = 0.5 * x_0 * (1 + g(x))
f_1(x) = 0.5 * (1 - x_0) * (1 + g(x))
g(x) = 100 * \sum_{i=m}^{d-1} (
k + (x_i - 0.5)^2 - cos(20 * pi * (x_i - 0.5))
)
where k = d - m + 1.
The pareto front is given by the line (or hyperplane) \sum_i f_i(x) = 0.5.
The goal is to minimize both objectives. The reference point comes from [Yang2019]_.
"""
_ref_val = 400.0
@property
def _max_hv(self) -> float:
return self._ref_val ** self.num_objectives - 1 / 2 ** self.num_objectives
def evaluate_true(self, X: Tensor) -> Tensor:
X_m = X[..., -self.k :]
X_m_minus_half = X_m - 0.5
sum_term = (
X_m_minus_half.pow(2) - torch.cos(20 * math.pi * X_m_minus_half)
).sum(dim=-1)
g_X_m = 100 * (self.k + sum_term)
g_X_m_term = 0.5 * (1 + g_X_m)
fs = []
for i in range(self.num_objectives):
idx = self.num_objectives - 1 - i
f_i = g_X_m_term * X[..., :idx].prod(dim=-1)
if i > 0:
f_i *= 1 - X[..., idx]
fs.append(f_i)
return torch.stack(fs, dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
r"""Generate `n` pareto optimal points.
The pareto points randomly sampled from the hyperplane sum_i f(x_i) = 0.5.
"""
f_X = 0.5 * sample_simplex(
n=n,
d=self.num_objectives,
qmc=True,
dtype=self.ref_point.dtype,
device=self.ref_point.device,
)
if self.negate:
f_X *= -1
return f_X
class DTLZ2(DTLZ):
r"""DLTZ2 test problem.
d-dimensional problem evaluated on `[0, 1]^d`:
f_0(x) = (1 + g(x)) * cos(x_0 * pi / 2)
f_1(x) = (1 + g(x)) * sin(x_0 * pi / 2)
g(x) = \sum_{i=m}^{d-1} (x_i - 0.5)^2
The pareto front is given by the unit hypersphere \sum{i} f_i^2 = 1.
Note: the pareto front is completely concave. The goal is to minimize
both objectives.
"""
_ref_val = 1.1
@property
def _max_hv(self) -> float:
# hypercube - volume of hypersphere in R^d such that all coordinates are
# positive
hypercube_vol = self._ref_val ** self.num_objectives
pos_hypersphere_vol = (
math.pi ** (self.num_objectives / 2)
/ gamma(self.num_objectives / 2 + 1)
/ 2 ** self.num_objectives
)
return hypercube_vol - pos_hypersphere_vol
def evaluate_true(self, X: Tensor) -> Tensor:
X_m = X[..., -self.k :]
g_X = (X_m - 0.5).pow(2).sum(dim=-1)
g_X_plus1 = 1 + g_X
fs = []
pi_over_2 = math.pi / 2
for i in range(self.num_objectives):
idx = self.num_objectives - 1 - i
f_i = g_X_plus1.clone()
f_i *= torch.cos(X[..., :idx] * pi_over_2).prod(dim=-1)
if i > 0:
f_i *= torch.sin(X[..., idx] * pi_over_2)
fs.append(f_i)
return torch.stack(fs, dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
r"""Generate `n` pareto optimal points.
The pareto points are randomly sampled from the hypersphere's
positive section.
"""
f_X = sample_hypersphere(
n=n,
d=self.num_objectives,
dtype=self.ref_point.dtype,
device=self.ref_point.device,
qmc=True,
).abs()
if self.negate:
f_X *= -1
return f_X
class VehicleSafety(MultiObjectiveTestProblem):
r"""Optimize Vehicle crash-worthiness.
See [Tanabe2020]_ for details.
The reference point is 1.1 * the nadir point from
approximate front provided by [Tanabe2020]_.
The maximum hypervolume is computed using the approximate
pareto front from [Tanabe2020]_.
"""
_ref_point = [1864.72022, 11.81993945, 0.2903999384]
_max_hv = 246.81607081187002
_bounds = [(1.0, 3.0)] * 5
dim = 5
num_objectives = 3
def evaluate_true(self, X: Tensor) -> Tensor:
X1, X2, X3, X4, X5 = torch.split(X, 1, -1)
f1 = (
1640.2823
+ 2.3573285 * X1
+ 2.3220035 * X2
+ 4.5688768 * X3
+ 7.7213633 * X4
+ 4.4559504 * X5
)
f2 = (
6.5856
+ 1.15 * X1
- 1.0427 * X2
+ 0.9738 * X3
+ 0.8364 * X4
- 0.3695 * X1 * X4
+ 0.0861 * X1 * X5
+ 0.3628 * X2 * X4
- 0.1106 * X1.pow(2)
- 0.3437 * X3.pow(2)
+ 0.1764 * X4.pow(2)
)
f3 = (
-0.0551
+ 0.0181 * X1
+ 0.1024 * X2
+ 0.0421 * X3
- 0.0073 * X1 * X2
+ 0.024 * X2 * X3
- 0.0118 * X2 * X4
- 0.0204 * X3 * X4
- 0.008 * X3 * X5
- 0.0241 * X2.pow(2)
+ 0.0109 * X4.pow(2)
)
f_X = torch.cat([f1, f2, f3], dim=-1)
return f_X
class ZDT(MultiObjectiveTestProblem):
r"""Base class for ZDT problems.
See [Zitzler2000]_ for more details on ZDT.
"""
_ref_point = [11.0, 11.0]
def __init__(
self,
dim: int,
num_objectives: int = 2,
noise_std: Optional[float] = None,
negate: bool = False,
) -> None:
if num_objectives != 2:
raise NotImplementedError(
f"{type(self).__name__} currently only supports 2 objectives."
)
if dim < num_objectives:
raise ValueError(
f"dim must be >= num_objectives, but got {dim} and {num_objectives}"
)
self.num_objectives = num_objectives
self.dim = dim
self._bounds = [(0.0, 1.0) for _ in range(self.dim)]
super().__init__(noise_std=noise_std, negate=negate)
@staticmethod
def _g(X: Tensor) -> Tensor:
return 1 + 9 * X[..., 1:].mean(dim=-1)
class ZDT1(ZDT):
r"""ZDT1 test problem.
d-dimensional problem evaluated on `[0, 1]^d`:
f_0(x) = x_0
f_1(x) = g(x) * (1 - sqrt(x_0 / g(x))
g(x) = 1 + 9 / (d - 1) * \sum_{i=1}^{d-1} x_i
The reference point comes from [Yang2019a]_.
The pareto front is convex.
"""
_max_hv = 120 + 2 / 3
def evaluate_true(self, X: Tensor) -> Tensor:
f_0 = X[..., 0]
g = self._g(X=X)
f_1 = g * (1 - (f_0 / g).sqrt())
return torch.stack([f_0, f_1], dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
f_0 = torch.linspace(
0, 1, n, dtype=self.bounds.dtype, device=self.bounds.device
)
f_1 = 1 - f_0.sqrt()
f_X = torch.stack([f_0, f_1], dim=-1)
if self.negate:
f_X *= -1
return f_X
class ZDT2(ZDT):
r"""ZDT2 test problem.
d-dimensional problem evaluated on `[0, 1]^d`:
f_0(x) = x_0
f_1(x) = g(x) * (1 - (x_0 / g(x))^2)
g(x) = 1 + 9 / (d - 1) * \sum_{i=1}^{d-1} x_i
The reference point comes from [Yang2019a]_.
The pareto front is concave.
"""
_max_hv = 120 + 1 / 3
def evaluate_true(self, X: Tensor) -> Tensor:
f_0 = X[..., 0]
g = self._g(X=X)
f_1 = g * (1 - (f_0 / g).pow(2))
return torch.stack([f_0, f_1], dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
f_0 = torch.linspace(
0, 1, n, dtype=self.bounds.dtype, device=self.bounds.device
)
f_1 = 1 - f_0.pow(2)
f_X = torch.stack([f_0, f_1], dim=-1)
if self.negate:
f_X *= -1
return f_X
class ZDT3(ZDT):
r"""ZDT3 test problem.
d-dimensional problem evaluated on `[0, 1]^d`:
f_0(x) = x_0
f_1(x) = 1 - sqrt(x_0 / g(x)) - x_0 / g * sin(10 * pi * x_0)
g(x) = 1 + 9 / (d - 1) * \sum_{i=1}^{d-1} x_i
The reference point comes from [Yang2019a]_.
The pareto front consists of several discontinuous convex parts.
"""
_max_hv = 128.77811613069076060
_parts = [
# this interval includes both end points
[0, 0.0830015349],
# this interval includes only the right end points
[0.1822287280, 0.2577623634],
[0.4093136748, 0.4538821041],
[0.6183967944, 0.6525117038],
[0.8233317983, 0.8518328654],
]
# nugget to make sure linspace returns elements within the specified range
_eps = 1e-6
def evaluate_true(self, X: Tensor) -> Tensor:
f_0 = X[..., 0]
g = self._g(X=X)
f_1 = 1 - (f_0 / g).sqrt() - f_0 / g * torch.sin(10 * math.pi * f_0)
return torch.stack([f_0, f_1], dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
n_parts = len(self._parts)
n_per_part = torch.full(
torch.Size([n_parts]),
n // n_parts,
dtype=torch.long,
device=self.bounds.device,
)
left_over = n % n_parts
n_per_part[:left_over] += 1
f_0s = []
for i, p in enumerate(self._parts):
left, right = p
f_0s.append(
torch.linspace(
left + self._eps,
right - self._eps,
n_per_part[i],
dtype=self.bounds.dtype,
device=self.bounds.device,
)
)
f_0 = torch.cat(f_0s, dim=0)
f_1 = 1 - f_0.sqrt() - f_0 * torch.sin(10 * math.pi * f_0)
f_X = torch.stack([f_0, f_1], dim=-1)
if self.negate:
f_X *= -1
return f_X
# ------ Constrained Multi-Objective Test Problems ----- #
class BNH(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):
r"""The constrained BNH problem.
See [GarridoMerchan2020]_ for more details on this problem. Note that this is a
minimization problem.
"""
dim = 2
num_objectives = 2
num_constraints = 2
_bounds = [(0.0, 5.0), (0.0, 3.0)]
_ref_point = [0.0, 0.0] # TODO: Determine proper reference point
def evaluate_true(self, X: Tensor) -> Tensor:
return torch.stack(
[4.0 * (X ** 2).sum(dim=-1), ((X - 5.0) ** 2).sum(dim=-1)], dim=-1
)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
c1 = 25.0 - (X[..., 0] - 5.0) ** 2 - X[..., 1] ** 2
c2 = (X[..., 0] - 8.0) ** 2 + (X[..., 1] + 3.0) ** 2 - 7.7
return torch.stack([c1, c2], dim=-1)
class SRN(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):
r"""The constrained SRN problem.
See [GarridoMerchan2020]_ for more details on this problem. Note that this is a
minimization problem.
"""
dim = 2
num_objectives = 2
num_constraints = 2
_bounds = [(-20.0, 20.0), (-20.0, 20.0)]
_ref_point = [0.0, 0.0] # TODO: Determine proper reference point
def evaluate_true(self, X: Tensor) -> Tensor:
obj1 = 2.0 + ((X - 2.0) ** 2).sum(dim=-1)
obj2 = 9.0 * X[..., 0] - (X[..., 1] - 1.0) ** 2
return torch.stack([obj1, obj2], dim=-1)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
c1 = 225.0 - ((X ** 2) ** 2).sum(dim=-1)
c2 = -10.0 - X[..., 0] + 3 * X[..., 1]
return torch.stack([c1, c2], dim=-1)
class CONSTR(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):
r"""The constrained CONSTR problem.
See [GarridoMerchan2020]_ for more details on this problem. Note that this is a
minimization problem.
"""
dim = 2
num_objectives = 2
num_constraints = 2
_bounds = [(0.1, 10.0), (0.0, 5.0)]
_ref_point = [10.0, 10.0]
def evaluate_true(self, X: Tensor) -> Tensor:
obj1 = X[..., 0]
obj2 = (1.0 + X[..., 1]) / X[..., 0]
return torch.stack([obj1, obj2], dim=-1)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
c1 = 9.0 * X[..., 0] + X[..., 1] - 6.0
c2 = 9.0 * X[..., 0] - X[..., 1] - 1.0
return torch.stack([c1, c2], dim=-1)
class ConstrainedBraninCurrin(BraninCurrin, ConstrainedBaseTestProblem):
r"""Constrained Branin Currin Function.
This uses the disk constraint from [Gelbart2014]_.
"""
dim = 2
num_objectives = 2
num_constraints = 1
_bounds = [(0.0, 1.0), (0.0, 1.0)]
_con_bounds = [(-5.0, 10.0), (0.0, 15.0)]
_ref_point = [80.0, 12.0]
_max_hv = 608.4004237022673 # from NSGA-II with 90k evaluations
def __init__(self, noise_std: Optional[float] = None, negate: bool = False) -> None:
super().__init__(noise_std=noise_std, negate=negate)
con_bounds = torch.tensor(self._con_bounds, dtype=torch.float).transpose(-1, -2)
self.register_buffer("con_bounds", con_bounds)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
X_tf = unnormalize(X, self.con_bounds)
return 50 - (X_tf[..., 0:1] - 2.5).pow(2) - (X_tf[..., 1:2] - 7.5).pow(2)
class C2DTLZ2(DTLZ2, ConstrainedBaseTestProblem):
num_constraints = 1
_r = 0.2
# approximate from nsga-ii, TODO: replace with analytic
_max_hv = 0.3996406303723544
def evaluate_slack_true(self, X: Tensor) -> Tensor:
if X.ndim > 2:
raise NotImplementedError("Batch X is not supported.")
f_X = self.evaluate_true(X)
term1 = (f_X - 1).pow(2)
mask = ~(torch.eye(f_X.shape[-1], device=f_X.device).bool())
indices = torch.arange(f_X.shape[1], device=f_X.device).repeat(f_X.shape[1], 1)
indexer = indices[mask].view(f_X.shape[1], f_X.shape[-1] - 1)
term2_inner = (
f_X.unsqueeze(1)
.expand(f_X.shape[0], f_X.shape[-1], f_X.shape[-1])
.gather(dim=-1, index=indexer.repeat(f_X.shape[0], 1, 1))
)
term2 = (term2_inner.pow(2) - self._r ** 2).sum(dim=-1)
min1 = (term1 + term2).min(dim=-1).values
min2 = ((f_X - 1 / math.sqrt(f_X.shape[-1])).pow(2) - self._r ** 2).sum(dim=-1)
return -torch.min(min1, min2).unsqueeze(-1)
class OSY(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):
r"""
The OSY test problem from [Oszycka1995]_.
Implementation from
https://github.com/msu-coinlab/pymoo/blob/master/pymoo/problems/multi/osy.py
Note that this implementation assumes minimization, so please choose negate=True.
"""
dim = 6
num_constraints = 6
num_objectives = 2
_bounds = [
(0.0, 10.0),
(0.0, 10.0),
(1.0, 5.0),
(0.0, 6.0),
(1.0, 5.0),
(0.0, 10.0),
]
_ref_point = [-75.0, 75.0]
def evaluate_true(self, X: Tensor) -> Tensor:
f1 = -(
25 * (X[..., 0] - 2) ** 2
+ (X[..., 1] - 2) ** 2
+ (X[..., 2] - 1) ** 2
+ (X[..., 3] - 4) ** 2
+ (X[..., 4] - 1) ** 2
)
f2 = (X ** 2).sum(-1)
return torch.stack([f1, f2], dim=-1)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
g1 = X[..., 0] + X[..., 1] - 2.0
g2 = 6.0 - X[..., 0] - X[..., 1]
g3 = 2.0 - X[..., 1] + X[..., 0]
g4 = 2.0 - X[..., 0] + 3.0 * X[..., 1]
g5 = 4.0 - (X[..., 2] - 3.0) ** 2 - X[..., 3]
g6 = (X[..., 4] - 3.0) ** 2 + X[..., 5] - 4.0
return torch.stack([g1, g2, g3, g4, g5, g6], dim=-1)
| [
"[email protected]"
] | |
4b8524cc460faabc41efc6e9ca0584712bb5bfd6 | ab69c2e3e4ec895fc533a4d37768aab517f86722 | /tests/structures/test_comparisons.py | 995b3acc05d605970a8217e4e74851e623881818 | [
"BSD-3-Clause",
"MIT"
] | permissive | pranavmodx/batavia | 9cf7d7528cb88b16d5b33b64481281b60e84cbec | 084d78eb553f21c787009e1141638e810fcc654f | refs/heads/master | 2020-08-07T19:08:36.105839 | 2019-10-08T06:32:23 | 2019-10-08T06:32:23 | 213,560,529 | 1 | 0 | NOASSERTION | 2019-10-08T06:01:52 | 2019-10-08T06:01:50 | null | UTF-8 | Python | false | false | 5,319 | py | from ..utils import TranspileTestCase
class ComparisonTests(TranspileTestCase):
def test_is(self):
self.assertCodeExecution("""
x = 1
if x is 1:
print('Correct')
else:
print('Incorrect')
print('Done.')
""")
self.assertCodeExecution("""
x = 1
if x is 5:
print('Incorrect')
else:
print('Correct')
print('Done.')
""")
self.assertCodeExecution("""
x = None
if x is None:
print('Correct')
else:
print('Incorrect')
print('Done.')
""")
self.assertCodeExecution("""
x = 1
if x is None:
print('Incorrect')
else:
print('Correct')
print('Done.')
""")
def test_is_not(self):
self.assertCodeExecution("""
x = 1
if x is not 5:
print('Correct')
else:
print('Incorrect')
print('Done.')
""")
self.assertCodeExecution("""
x = 1
if x is not 1:
print('Correct')
else:
print('Incorrect')
print('Done.')
""")
self.assertCodeExecution("""
x = 1
if x is not None:
print('Correct')
else:
print('Incorrect')
print('Done.')
""")
self.assertCodeExecution("""
x = None
if x is not None:
print('Incorrect')
else:
print('Correct')
print('Done.')
""")
def test_lt(self):
self.assertCodeExecution("""
x = 1
if x < 5:
print('Correct')
else:
print('Incorrect')
print('Done.')
""")
self.assertCodeExecution("""
x = 5
if x < 5:
print('Incorrect')
else:
print('Correct')
print('Done.')
""")
self.assertCodeExecution("""
x = 10
if x < 5:
print('Correct')
else:
print('Incorrect')
print('Done.')
""")
def test_le(self):
self.assertCodeExecution("""
x = 1
if x <= 5:
print('Correct')
else:
print('Incorrect')
print('Done.')
""")
self.assertCodeExecution("""
x = 5
if x <= 5:
print('Correct')
else:
print('Incorrect')
print('Done.')
""")
self.assertCodeExecution("""
x = 10
if x <= 5:
print('Correct')
else:
print('Incorrect')
print('Done.')
""")
def test_gt(self):
self.assertCodeExecution("""
x = 10
if x > 5:
print('Correct')
else:
print('Incorrect')
print('Done.')
""")
self.assertCodeExecution("""
x = 5
if x > 5:
print('Incorrect')
else:
print('Correct')
print('Done.')
""")
self.assertCodeExecution("""
x = 1
if x > 5:
print('Correct')
else:
print('Incorrect')
print('Done.')
""")
def test_ge(self):
self.assertCodeExecution("""
x = 10
if x >= 5:
print('Correct')
else:
print('Incorrect')
print('Done.')
""")
self.assertCodeExecution("""
x = 5
if x >= 5:
print('Correct')
else:
print('Incorrect')
print('Done.')
""")
self.assertCodeExecution("""
x = 1
if x >= 5:
print('Correct')
else:
print('Incorrect')
print('Done.')
""")
def test_eq(self):
self.assertCodeExecution("""
x = 10
if x == 5:
print('Correct')
else:
print('Incorrect')
print('Done.')
""")
self.assertCodeExecution("""
x = 5
if x == 5:
print('Correct')
else:
print('Incorrect')
print('Done.')
""")
def test_ne(self):
self.assertCodeExecution("""
x = 5
if x == 5:
print('Correct')
else:
print('Incorrect')
print('Done.')
""")
self.assertCodeExecution("""
x = 10
if x == 5:
print('Correct')
else:
print('Incorrect')
print('Done.')
""")
| [
"[email protected]"
] | |
eda77b3fb111fdadddce59be538af5500de1b8e4 | e9abcb6021cc6fcc15ef2258f09812492b4e093d | /ironic/drivers/modules/pxe_auto_deploy.py | e710fc477b8a458092f3fcfad209f81f16a14b57 | [
"Apache-2.0"
] | permissive | ericxiett/ironic-customized | e6df6a62840ae34180b8004c98ac56790462408b | 3a2ad13969e1497889a0c3be80f9f5f671ff4d1b | refs/heads/master | 2020-07-16T08:29:03.447845 | 2019-09-02T01:31:58 | 2019-09-02T01:31:58 | 205,754,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,371 | py | import os
import socket
from shutil import rmtree
import jinja2
import time
from oslo_log import log
from oslo_utils import fileutils
from ironic_lib import utils as ironic_utils
from ironic.common import exception, pxe_utils, boot_devices, states
from ironic.common import utils
from ironic.common.i18n import _, _LE, _LI, _LW
from ironic.common.pxe_utils import get_root_dir
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.conf import CONF
from ironic.drivers import base
from ironic.drivers.modules import deploy_utils
LOG = log.getLogger(__name__)
REQUIRED_PROPERTIES = ['user_kernel',
'user_ramdisk',
'management_ip',
'management_netmask',
'management_gateway']
PXE_CFG_DIR_NAME = 'pxelinux.cfg'
HOSTNAME_PREFIX = 'Host-'
AUTO_FILE_DIR = "/var/www/html/auto/"
class PXEAutoDeploy(base.DeployInterface):
def __init__(self):
pass
def clean_up(self, task):
extra_info = task.node.extra
pxe_boot_interface_mac = extra_info.get('boot_detailed').get('pxe_interface')
pxe_boot_interface_mac.replace('-', ':')
for port in task.ports:
if port.address == pxe_boot_interface_mac:
client_id = port.extra.get('client-id')
ironic_utils.unlink_without_raise(self._get_pxe_mac_path(port.address, client_id=client_id))
pxe_config_file_path = pxe_utils.get_pxe_config_file_path(task.node.uuid)
fileutils.delete_if_exists(pxe_config_file_path)
if os.path.exists(os.path.join(CONF.pxe.tftp_root, task.node.uuid)):
rmtree(os.path.join(CONF.pxe.tftp_root, task.node.uuid))
auto_file_name = task.node.uuid + '_auto.cfg'
fileutils.delete_if_exists(AUTO_FILE_DIR + auto_file_name)
@task_manager.require_exclusive_lock
def deploy(self, task):
manager_utils.node_power_action(task, states.REBOOT)
return states.DEPLOYWAIT
def get_properties(self):
pass
@task_manager.require_exclusive_lock
def prepare(self, task):
# No need to update dhcp with standalone mode
self._create_auto_config(task)
self._create_pxe_config(task)
deploy_utils.try_set_boot_device(task, boot_devices.PXE)
def _create_auto_config(self, task):
auto_info = {}
managemenet_ip = task.node.instance_info.get('management_ip')
auto_info['management_ip'] = managemenet_ip
auto_info['management_netmask'] = \
task.node.instance_info.get('management_netmask')
auto_info['management_gateway'] = \
task.node.instance_info.get('management_gateway')
auto_info['hostname'] = \
HOSTNAME_PREFIX + managemenet_ip.replace('.', '-')
auto_info['os_ver'] = \
task.node.instance_info.get('os_ver')
auto_info['server_ip'] = CONF.my_ip
extra_info = task.node.extra
pxe_boot_interface_mac = self._get_boot_interface_mac(task)
for nic in extra_info.get('nic_detailed'):
address = nic.get('mac_address')
LOG.info('address: %s', address)
if nic.get('mac_address') == pxe_boot_interface_mac:
auto_info['management_port'] = nic.get('name')
break
fileutils.ensure_tree(AUTO_FILE_DIR)
auto_file_name = task.node.uuid + '_auto.cfg'
auto_file_path = AUTO_FILE_DIR + auto_file_name
tmpl_path, tmpl_file = os.path.split(CONF.pxe_auto.pxe_auto_template)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path))
template = env.get_template(tmpl_file)
auto_info = template.render({'auto_info': auto_info,
'server_ip': CONF.my_ip,
'repo_server_ip': CONF.pxe_auto.repo_server,
'UUID': task.node.uuid,
})
utils.write_to_file(auto_file_path, auto_info)
def _get_boot_interface_mac(self, task):
extra_info = task.node.extra
# pxe_interface like '01-6c-92-bf-0c-9c-d9'. '01-' is not needed.
pxe_interface = extra_info.get('boot_detailed').get('pxe_interface')[3:]
return pxe_interface.replace('-', ':')
def _create_pxe_config(self, task):
pxe_options = self._build_pxe_options(task.node)
pxe_config_template = CONF.pxe.pxe_config_template
node_uuid = task.node.uuid
root_dir = CONF.pxe.tftp_root
fileutils.ensure_tree(os.path.join(root_dir, node_uuid))
fileutils.ensure_tree(os.path.join(root_dir, PXE_CFG_DIR_NAME))
pxe_config_file_path = pxe_utils.get_pxe_config_file_path(node_uuid)
tmpl_path, tmpl_file = os.path.split(pxe_config_template)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path))
template = env.get_template(tmpl_file)
pxe_config = template.render({'pxe_options': pxe_options,
'server_ip': CONF.my_ip,
'UUID': node_uuid,
})
utils.write_to_file(pxe_config_file_path, pxe_config)
self._link_mac_pxe_configs(task)
def _get_pxe_mac_path(self, mac, delimiter='-', client_id=None):
"""Convert a MAC address into a PXE config file name.
:param mac: A MAC address string in the format xx:xx:xx:xx:xx:xx.
:param delimiter: The MAC address delimiter. Defaults to dash ('-').
:param client_id: client_id indicate InfiniBand port.
Defaults is None (Ethernet)
:returns: the path to the config file.
"""
mac_file_name = mac.replace(':', delimiter).lower()
if not CONF.pxe.ipxe_enabled:
hw_type = '01-'
if client_id:
hw_type = '20-'
mac_file_name = hw_type + mac_file_name
return os.path.join(get_root_dir(), PXE_CFG_DIR_NAME, mac_file_name)
def _link_mac_pxe_configs(self, task):
def create_link(mac_path):
ironic_utils.unlink_without_raise(mac_path)
relative_source_path = os.path.relpath(
pxe_config_file_path, os.path.dirname(mac_path))
utils.create_link_without_raise(relative_source_path, mac_path)
pxe_config_file_path = pxe_utils.get_pxe_config_file_path(task.node.uuid)
pxe_boot_interface_mac = self._get_boot_interface_mac(task)
LOG.info("pxe_boot_interface_mac: %s", pxe_boot_interface_mac)
for port in task.ports:
LOG.info("port.address: %s", port.address)
if port.address == pxe_boot_interface_mac:
client_id = port.extra.get('client-id')
create_link(self._get_pxe_mac_path(port.address, client_id=client_id))
def _build_pxe_options(self, node):
pxe_info = {}
root_dir = pxe_utils.get_root_dir()
for label in ('user_kernel', 'user_ramdisk'):
pxe_info[label] = \
os.path.join(root_dir, node.instance_info.get(label))
return pxe_info
def take_over(self, task):
pass
def tear_down(self, task):
manager_utils.node_power_action(task, states.POWER_OFF)
def validate(self, task):
info = task.node.instance_info
for item in REQUIRED_PROPERTIES:
if not info.get(item):
error_msg = _("Cannot validate driver deploy. Some parameters were missing"
" in node's instance_info")
exc_msg = _("%(error_msg)s. Missing are: %(missing_info)s")
raise exception.MissingParameterValue(
exc_msg % {'error_msg': error_msg, 'missing_info': item})
def pxeauto(self, task, data):
task.upgrade_lock()
node = task.node
LOG.info('Pxeauto info for node %(node)s with '
'progress info %(data)s',
{'node': node.uuid, 'data': data})
# Parse progress info
title = data['Title']
progress = float(data['InstallProgress']) * 100
LOG.info('data[\'InstallProgress\']: %s', data['InstallProgress'])
LOG.info('progress: %f', progress)
if progress == 60:
task.process_event('resume')
LOG.info('resume...')
if progress == 100:
deploy_utils.try_set_boot_device(task, boot_devices.DISK)
manager_utils.node_power_action(task, states.REBOOT)
ret = self.check_conn(node.instance_info.get('management_ip'), 22)
if ret == 'success':
task.process_event('done')
LOG.info(_LI('Deployment to node %s done'), task.node.uuid)
def check_conn(self, address, port):
sock = socket.socket()
frequency = 0
while True:
try:
sock.connect((address, port))
LOG.info("Connected to %s on port %s", address, port)
return "success"
except socket.error, e:
LOG.info("Connection to %s on port %s failed: %s,"
" already wait: %s s", address, port, e, frequency*3)
frequency += 1
time.sleep(3)
| [
"[email protected]"
] | |
4d73f1009f9545a495de388d2b5332138d8fc0d7 | 237162607427106ae9564670d47427a62356861f | /users/migrations/0040_auto_20190426_1040.py | 477aac69c7a6db31f52e331f91b20015a89d3272 | [] | no_license | pitipund/basecore | 8648c1f4fa37b6e6075fd710ca422fe159ba930e | a0c20cec1e17dd0eb6abcaaa7d2623e38b60318b | refs/heads/master | 2020-09-13T20:16:02.622903 | 2019-11-20T09:07:15 | 2019-11-20T09:07:15 | 221,885,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2019-04-26 10:40
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0039_applicationdefaultrole'),
]
operations = [
migrations.AlterModelOptions(
name='applicationdefaultrole',
options={'ordering': ('id',), 'verbose_name': 'Application Default Role', 'verbose_name_plural': 'Application Default Roles'},
),
]
| [
"[email protected]"
] | |
f8fd4511a108b8fa1fb60b90cb489e7232eb676d | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/galex_j032139.63+472718.83/sdB_galex_j032139.63+472718.83_coadd.py | 7700527b519c981826539b80b5486dc86e5c9e84 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[50.415125,47.455231], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_galex_j032139.63+472718.83/sdB_galex_j032139.63+472718.83_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_galex_j032139.63+472718.83/sdB_galex_j032139.63+472718.83_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
63093190ee20e10698bd99dcea94ccf5d076a006 | 04803c70bb97012b7d500a177ac0240fb2ddbe38 | /1heptane/pdep/network4267_1.py | 8a706002eeed10a53d67be4e75593936ac4c0251 | [] | no_license | shenghuiqin/chpd | 735e0415f6688d88579fc935459c1b0f53596d1d | 396ba54629036e3f2be0b3fabe09b78c90d56939 | refs/heads/master | 2023-03-01T23:29:02.118150 | 2019-10-05T04:02:23 | 2019-10-05T04:02:23 | 192,084,217 | 0 | 0 | null | 2019-06-18T18:33:13 | 2019-06-15T13:52:28 | HTML | UTF-8 | Python | false | false | 69,142 | py | species(
label = 'C=C([CH]C)C(=C)[CH]C(24182)',
structure = SMILES('[CH2]C(=CC)C([CH2])=CC'),
E0 = (249.687,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')),
HinderedRotor(inertia=(0.735277,'amu*angstrom^2'), symmetry=1, barrier=(16.9055,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0632434,'amu*angstrom^2'), symmetry=1, barrier=(29.514,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.737545,'amu*angstrom^2'), symmetry=1, barrier=(16.9576,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.732781,'amu*angstrom^2'), symmetry=1, barrier=(16.8481,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.739219,'amu*angstrom^2'), symmetry=1, barrier=(16.9961,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.384005,0.0840749,-5.09991e-05,5.50851e-09,4.14197e-12,30198.9,28.4131], Tmin=(100,'K'), Tmax=(1039.09,'K')), NASAPolynomial(coeffs=[18.1326,0.0354522,-1.35159e-05,2.44392e-09,-1.69358e-13,25127.7,-67.5143], Tmin=(1039.09,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(249.687,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Allyl_P)"""),
)
species(
label = 'CH3CHCCH2(18175)',
structure = SMILES('C=C=CC'),
E0 = (145.615,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,540,610,2055,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655],'cm^-1')),
HinderedRotor(inertia=(0.759584,'amu*angstrom^2'), symmetry=1, barrier=(17.4643,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (54.0904,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2996.71,'J/mol'), sigma=(5.18551,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=468.08 K, Pc=48.77 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.74635,0.0218189,8.22353e-06,-2.14768e-08,8.55624e-12,17563.6,12.7381], Tmin=(100,'K'), Tmax=(1025.6,'K')), NASAPolynomial(coeffs=[6.82078,0.0192338,-7.45622e-06,1.36536e-09,-9.53195e-14,16028,-10.4333], Tmin=(1025.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(145.615,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(228.648,'J/(mol*K)'), label="""CH3CHCCH2""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = '[CH2]C1([CH]C)CC1=CC(25275)',
structure = SMILES('[CH2]C1([CH]C)CC1=CC'),
E0 = (462.221,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.263258,0.0692237,-2.26363e-05,-1.35463e-08,8.13734e-12,55737.7,31.4039], Tmin=(100,'K'), Tmax=(1105.46,'K')), NASAPolynomial(coeffs=[15.171,0.0400578,-1.66801e-05,3.13624e-09,-2.2049e-13,50927.8,-48.8594], Tmin=(1105.46,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(462.221,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Methylene_cyclopropane) + radical(Neopentyl) + radical(Cs_S)"""),
)
species(
label = 'C=[C][CH]C(18176)',
structure = SMILES('[CH2][C]=CC'),
E0 = (361.056,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655],'cm^-1')),
HinderedRotor(inertia=(0.352622,'amu*angstrom^2'), symmetry=1, barrier=(8.10748,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.828631,'amu*angstrom^2'), symmetry=1, barrier=(19.0519,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (54.0904,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.42015,0.030446,-1.69076e-05,4.64684e-09,-5.12013e-13,43485.7,14.8304], Tmin=(100,'K'), Tmax=(2065.83,'K')), NASAPolynomial(coeffs=[10.7464,0.014324,-5.20136e-06,8.69079e-10,-5.48385e-14,40045.6,-31.3799], Tmin=(2065.83,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(361.056,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(274.378,'J/(mol*K)'), comment="""Thermo library: DFT_QCI_thermo + radical(Cds_S) + radical(Allyl_P)"""),
)
species(
label = '[CH2]C(=CC)C(C)=[C]C(25412)',
structure = SMILES('[CH2]C(=CC)C(C)=[C]C'),
E0 = (336.03,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,1685,370,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655,222.04],'cm^-1')),
HinderedRotor(inertia=(0.395973,'amu*angstrom^2'), symmetry=1, barrier=(13.8694,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.396086,'amu*angstrom^2'), symmetry=1, barrier=(13.8683,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395737,'amu*angstrom^2'), symmetry=1, barrier=(13.8691,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395039,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395901,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.116365,0.0876489,-7.20737e-05,3.21805e-08,-5.96317e-12,40565.5,28.3373], Tmin=(100,'K'), Tmax=(1264.63,'K')), NASAPolynomial(coeffs=[14.5979,0.041109,-1.68732e-05,3.08148e-09,-2.10818e-13,36843.8,-46.1055], Tmin=(1264.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(336.03,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)"""),
)
species(
label = '[CH2]C(=[C]C)C(C)=CC(25413)',
structure = SMILES('[CH2]C(=[C]C)C(C)=CC'),
E0 = (336.03,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,1685,370,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655,222.04],'cm^-1')),
HinderedRotor(inertia=(0.395973,'amu*angstrom^2'), symmetry=1, barrier=(13.8694,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.396086,'amu*angstrom^2'), symmetry=1, barrier=(13.8683,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395737,'amu*angstrom^2'), symmetry=1, barrier=(13.8691,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395039,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395901,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.116365,0.0876489,-7.20737e-05,3.21805e-08,-5.96317e-12,40565.5,28.3373], Tmin=(100,'K'), Tmax=(1264.63,'K')), NASAPolynomial(coeffs=[14.5979,0.041109,-1.68732e-05,3.08148e-09,-2.10818e-13,36843.8,-46.1055], Tmin=(1264.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(336.03,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Cds_S)"""),
)
species(
label = '[CH2]C(=CC)[C](C)C=C(24605)',
structure = SMILES('[CH2]C=C(C)C([CH2])=CC'),
E0 = (216.244,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')),
HinderedRotor(inertia=(0.712083,'amu*angstrom^2'), symmetry=1, barrier=(16.3722,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.555659,'amu*angstrom^2'), symmetry=1, barrier=(96.3851,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0202512,'amu*angstrom^2'), symmetry=1, barrier=(16.3711,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.712008,'amu*angstrom^2'), symmetry=1, barrier=(16.3705,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(4.19211,'amu*angstrom^2'), symmetry=1, barrier=(96.3849,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0883175,0.0775021,-3.58132e-05,-7.55711e-09,8.27771e-12,26166.1,29.3215], Tmin=(100,'K'), Tmax=(1017.17,'K')), NASAPolynomial(coeffs=[16.4341,0.0376674,-1.41425e-05,2.53759e-09,-1.75328e-13,21504.4,-57.0638], Tmin=(1017.17,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(216.244,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(C=CC=CCJ)"""),
)
species(
label = '[CH2][C](C=C)C(C)=CC(24606)',
structure = SMILES('[CH2]C=C([CH2])C(C)=CC'),
E0 = (216.244,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0883175,0.0775021,-3.58132e-05,-7.55711e-09,8.27771e-12,26166.1,29.3215], Tmin=(100,'K'), Tmax=(1017.17,'K')), NASAPolynomial(coeffs=[16.4341,0.0376674,-1.41425e-05,2.53759e-09,-1.75328e-13,21504.4,-57.0638], Tmin=(1017.17,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(216.244,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(C=CC=CCJ)"""),
)
species(
label = '[CH2]C(=CC)[C]1CC1C(25414)',
structure = SMILES('[CH2]C(=CC)[C]1CC1C'),
E0 = (289.9,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.71289,0.0520158,3.84829e-05,-8.55933e-08,3.61457e-11,35003.5,26.4903], Tmin=(100,'K'), Tmax=(968.714,'K')), NASAPolynomial(coeffs=[16.7686,0.0352996,-1.24057e-05,2.26286e-09,-1.62921e-13,29566.5,-62.466], Tmin=(968.714,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(289.9,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Cyclopropane) + radical(Allyl_T) + radical(Allyl_P)"""),
)
species(
label = '[CH2][C]1C(=CC)CC1C(25415)',
structure = SMILES('[CH2]C1=C([CH]C)CC1C'),
E0 = (304.572,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.583091,0.0531885,4.0938e-05,-9.08388e-08,3.83549e-11,36774.2,26.4705], Tmin=(100,'K'), Tmax=(972.301,'K')), NASAPolynomial(coeffs=[18.2947,0.0339462,-1.21014e-05,2.24934e-09,-1.64353e-13,30795.4,-71.5147], Tmin=(972.301,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(304.572,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_P) + radical(Allyl_S)"""),
)
species(
label = 'CH2(S)(23)',
structure = SMILES('[CH2]'),
E0 = (419.862,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1369.36,2789.41,2993.36],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.19195,-0.00230793,8.0509e-06,-6.60123e-09,1.95638e-12,50484.3,-0.754589], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.28556,0.00460255,-1.97412e-06,4.09548e-10,-3.34695e-14,50922.4,8.67684], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(419.862,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2(S)""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = '[CH2]C(=C)C([CH2])=CC(25416)',
structure = SMILES('[CH2]C(=C)C([CH2])=CC'),
E0 = (285.713,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2950,3100,1380,975,1025,1650,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,3010,987.5,1337.5,450,1655,311.383],'cm^-1')),
HinderedRotor(inertia=(0.327475,'amu*angstrom^2'), symmetry=1, barrier=(22.5291,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.327466,'amu*angstrom^2'), symmetry=1, barrier=(22.5294,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.327318,'amu*angstrom^2'), symmetry=1, barrier=(22.5272,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.327483,'amu*angstrom^2'), symmetry=1, barrier=(22.5297,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (94.1543,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.335271,0.0676667,-2.76626e-05,-1.62749e-08,1.21982e-11,34506.8,24.024], Tmin=(100,'K'), Tmax=(980.594,'K')), NASAPolynomial(coeffs=[17.5531,0.0266059,-9.47854e-06,1.70194e-09,-1.19937e-13,29727.4,-65.8563], Tmin=(980.594,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(285.713,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(390.78,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Allyl_P)"""),
)
species(
label = 'C=C([CH]C)C[C]=CC(24184)',
structure = SMILES('[CH2]C(=CC)C[C]=CC'),
E0 = (366.985,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2995,3025,975,1000,1300,1375,400,500,1630,1680,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,1685,370,350,440,435,1725,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180,579.702],'cm^-1')),
HinderedRotor(inertia=(0.147406,'amu*angstrom^2'), symmetry=1, barrier=(3.38916,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.64226,'amu*angstrom^2'), symmetry=1, barrier=(14.7668,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.64164,'amu*angstrom^2'), symmetry=1, barrier=(14.7526,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.643937,'amu*angstrom^2'), symmetry=1, barrier=(14.8054,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.145327,'amu*angstrom^2'), symmetry=1, barrier=(3.34136,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3683.66,'J/mol'), sigma=(6.4482,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=575.38 K, Pc=31.18 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.29648,0.0786067,-5.42868e-05,1.96375e-08,-2.97459e-12,44273.2,31.2372], Tmin=(100,'K'), Tmax=(1490.43,'K')), NASAPolynomial(coeffs=[13.9025,0.0420909,-1.75363e-05,3.199e-09,-2.17227e-13,40217.5,-39.8334], Tmin=(1490.43,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(366.985,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)"""),
)
species(
label = 'CC=C1CCC1=CC(25269)',
structure = SMILES('CC=C1CCC1=CC'),
E0 = (114.107,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.677799,0.0585738,5.80411e-06,-4.1598e-08,1.78951e-11,13856,25.5085], Tmin=(100,'K'), Tmax=(1034.79,'K')), NASAPolynomial(coeffs=[13.4814,0.0415234,-1.65073e-05,3.07348e-09,-2.16896e-13,9469.28,-45.0922], Tmin=(1034.79,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(114.107,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + ring(12methylenecyclobutane)"""),
)
species(
label = 'CH2(19)',
structure = SMILES('[CH2]'),
E0 = (381.563,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1032.72,2936.3,3459],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.8328,0.000224446,4.68033e-06,-6.04743e-09,2.59009e-12,45920.8,1.40666], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.16229,0.00281798,-7.56235e-07,5.05446e-11,5.65236e-15,46099.1,4.77656], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(381.563,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = '[CH2]C([C]=CC)=CC(25417)',
structure = SMILES('[CH2]C([C]=CC)=CC'),
E0 = (334.774,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([350,440,435,1725,1685,370,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3100,440,815,1455,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')),
HinderedRotor(inertia=(0.7606,'amu*angstrom^2'), symmetry=1, barrier=(17.4877,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.760854,'amu*angstrom^2'), symmetry=1, barrier=(17.4935,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.760586,'amu*angstrom^2'), symmetry=1, barrier=(17.4874,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.15146,'amu*angstrom^2'), symmetry=1, barrier=(49.4663,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (94.1543,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.352604,0.0734369,-5.91187e-05,2.57941e-08,-4.60694e-12,40400.9,25.1788], Tmin=(100,'K'), Tmax=(1327.42,'K')), NASAPolynomial(coeffs=[14.2321,0.0316126,-1.18565e-05,2.05761e-09,-1.36512e-13,36716.1,-45.7131], Tmin=(1327.42,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(334.774,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(390.78,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + radical(C=CJC=C) + radical(Allyl_P)"""),
)
species(
label = '[CH2]C1([CH]C)C(=C)C1C(25296)',
structure = SMILES('[CH2]C1([CH]C)C(=C)C1C'),
E0 = (466.494,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.29276,0.0655305,-4.50464e-06,-3.74661e-08,1.7759e-11,56253.7,30.0992], Tmin=(100,'K'), Tmax=(1027.4,'K')), NASAPolynomial(coeffs=[16.6435,0.0372633,-1.49065e-05,2.81296e-09,-2.01072e-13,51026,-58.316], Tmin=(1027.4,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(466.494,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + ring(Methylene_cyclopropane) + radical(Neopentyl) + radical(Cs_S)"""),
)
species(
label = 'H(3)',
structure = SMILES('[H]'),
E0 = (211.792,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (1.00794,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25472.7,-0.459566], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = '[CH2]C(=CC)C(=C)C=C(24604)',
structure = SMILES('[CH2]C(=CC)C(=C)C=C'),
E0 = (242.677,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2950,3000,3050,3100,1330,1430,900,1050,1000,1050,1600,1700,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,181.962,683.313],'cm^-1')),
HinderedRotor(inertia=(0.669842,'amu*angstrom^2'), symmetry=1, barrier=(19.1337,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0582339,'amu*angstrom^2'), symmetry=1, barrier=(19.1767,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.83204,'amu*angstrom^2'), symmetry=1, barrier=(19.1302,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(4.52237,'amu*angstrom^2'), symmetry=1, barrier=(104.569,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (107.173,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.293043,0.0682771,-2.00337e-05,-2.05401e-08,1.21516e-11,29332.3,27.0261], Tmin=(100,'K'), Tmax=(1018.57,'K')), NASAPolynomial(coeffs=[15.7386,0.0358123,-1.37404e-05,2.51366e-09,-1.76142e-13,24723.4,-54.9529], Tmin=(1018.57,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(242.677,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(440.667,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(Allyl_P)"""),
)
species(
label = '[CH2]CC(=C)C([CH2])=CC(25418)',
structure = SMILES('[CH2]CC(=C)C([CH2])=CC'),
E0 = (316.814,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,325,375,415,465,420,450,1700,1750,2750,2850,1437.5,1250,1305,750,350,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,180,180],'cm^-1')),
HinderedRotor(inertia=(0.0368535,'amu*angstrom^2'), symmetry=1, barrier=(17.9864,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.00736317,'amu*angstrom^2'), symmetry=1, barrier=(3.60618,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.781153,'amu*angstrom^2'), symmetry=1, barrier=(17.9602,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.779478,'amu*angstrom^2'), symmetry=1, barrier=(17.9217,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.781104,'amu*angstrom^2'), symmetry=1, barrier=(17.9591,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.348925,0.0836004,-5.1879e-05,7.14877e-09,3.44908e-12,38270.9,31.5928], Tmin=(100,'K'), Tmax=(1044.14,'K')), NASAPolynomial(coeffs=[17.9255,0.0352115,-1.34219e-05,2.42456e-09,-1.67785e-13,33276.3,-63.0036], Tmin=(1044.14,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(316.814,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(RCCJ) + radical(Allyl_P)"""),
)
species(
label = '[CH]=C(CC)C([CH2])=CC(25419)',
structure = SMILES('[CH]=C(CC)C([CH2])=CC'),
E0 = (358.664,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3120,650,792.5,1650,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,325,375,415,465,420,450,1700,1750,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180],'cm^-1')),
HinderedRotor(inertia=(0.701639,'amu*angstrom^2'), symmetry=1, barrier=(16.1321,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.344302,'amu*angstrom^2'), symmetry=1, barrier=(16.1602,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0492932,'amu*angstrom^2'), symmetry=1, barrier=(16.1378,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.702005,'amu*angstrom^2'), symmetry=1, barrier=(16.1405,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.702379,'amu*angstrom^2'), symmetry=1, barrier=(16.1491,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.468616,0.0864938,-5.84569e-05,1.27697e-08,1.75707e-12,43308.4,30.6389], Tmin=(100,'K'), Tmax=(1047.28,'K')), NASAPolynomial(coeffs=[18.4195,0.034593,-1.31104e-05,2.35762e-09,-1.62637e-13,38242.2,-66.6572], Tmin=(1047.28,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(358.664,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_P)"""),
)
species(
label = '[CH2]C(=[C]C)C(=C)CC(25420)',
structure = SMILES('[CH2]C(=[C]C)C(=C)CC'),
E0 = (349.41,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2950,3100,1380,975,1025,1650,325,375,415,465,420,450,1700,1750,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180,180],'cm^-1')),
HinderedRotor(inertia=(0.159905,'amu*angstrom^2'), symmetry=1, barrier=(15.9368,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.693159,'amu*angstrom^2'), symmetry=1, barrier=(15.9371,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.693127,'amu*angstrom^2'), symmetry=1, barrier=(15.9364,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.693165,'amu*angstrom^2'), symmetry=1, barrier=(15.9372,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0150632,'amu*angstrom^2'), symmetry=1, barrier=(15.9371,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.583231,0.089245,-7.16619e-05,3.00631e-08,-5.07891e-12,42198.9,31.1306], Tmin=(100,'K'), Tmax=(1412.15,'K')), NASAPolynomial(coeffs=[19.0319,0.0336833,-1.2643e-05,2.20036e-09,-1.46165e-13,36659.1,-70.2702], Tmin=(1412.15,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(349.41,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_S)"""),
)
species(
label = '[CH]=C([CH]C)C(C)=CC(25421)',
structure = SMILES('[CH]C(=CC)C(C)=CC'),
E0 = (317.373,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,2995,3025,975,1000,1300,1375,400,500,1630,1680,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.247945,0.0873521,-6.16843e-05,2.31486e-08,-3.62747e-12,38328.8,29.1665], Tmin=(100,'K'), Tmax=(1460.93,'K')), NASAPolynomial(coeffs=[15.297,0.0447902,-1.7984e-05,3.20673e-09,-2.14924e-13,33786.8,-51.7212], Tmin=(1460.93,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(317.373,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(AllylJ2_triplet)"""),
)
species(
label = '[CH2][C](C=C)C(=C)CC(24623)',
structure = SMILES('[CH2]C(C=C)=C([CH2])CC'),
E0 = (228.159,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0497728,0.0733281,-1.6094e-05,-3.35123e-08,1.88363e-11,27601.1,30.4448], Tmin=(100,'K'), Tmax=(975.095,'K')), NASAPolynomial(coeffs=[18.3695,0.0342638,-1.21408e-05,2.16747e-09,-1.52112e-13,22274,-66.8493], Tmin=(975.095,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(228.159,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CC=CCJ) + radical(Allyl_P)"""),
)
species(
label = 'C[CH][C]1CCC1=CC(25422)',
structure = SMILES('C[CH]C1CCC=1[CH]C'),
E0 = (303.292,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.788866,0.0500701,4.22235e-05,-8.64809e-08,3.53174e-11,36611.5,25.2586], Tmin=(100,'K'), Tmax=(987.239,'K')), NASAPolynomial(coeffs=[16.2187,0.0373502,-1.4111e-05,2.65357e-09,-1.92503e-13,31138.2,-61.2734], Tmin=(987.239,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(303.292,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_S) + radical(Allyl_S)"""),
)
species(
label = '[CH2][C]1C(=C)C(C)C1C(25423)',
structure = SMILES('[CH2]C1=C([CH2])C(C)C1C'),
E0 = (305.852,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.377097,0.0563026,3.9705e-05,-9.53284e-08,4.14811e-11,36937,26.2973], Tmin=(100,'K'), Tmax=(959.735,'K')), NASAPolynomial(coeffs=[20.4056,0.0304853,-1.006e-05,1.83774e-09,-1.35603e-13,30437.2,-83.3398], Tmin=(959.735,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(305.852,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_P) + radical(Allyl_P)"""),
)
species(
label = 'C=CC(=C)C(C)=CC(24616)',
structure = SMILES('C=CC(=C)C(C)=CC'),
E0 = (91.1774,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.236638,0.0713806,-3.04205e-05,-5.26762e-09,5.54498e-12,11111.2,26.9518], Tmin=(100,'K'), Tmax=(1093.32,'K')), NASAPolynomial(coeffs=[14.1536,0.040705,-1.6104e-05,2.93544e-09,-2.02595e-13,6858.32,-46.9636], Tmin=(1093.32,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(91.1774,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH)"""),
)
species(
label = 'C=[C]C(C)C(=C)[CH]C(24183)',
structure = SMILES('[CH2]C(=CC)C(C)[C]=C'),
E0 = (369.44,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,350,440,435,1725,3000,3100,440,815,1455,1000,345.333,347.343],'cm^-1')),
HinderedRotor(inertia=(0.119405,'amu*angstrom^2'), symmetry=1, barrier=(9.93037,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.281457,'amu*angstrom^2'), symmetry=1, barrier=(24.022,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.116909,'amu*angstrom^2'), symmetry=1, barrier=(9.94809,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.117447,'amu*angstrom^2'), symmetry=1, barrier=(9.9744,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.116555,'amu*angstrom^2'), symmetry=1, barrier=(9.93684,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3625.33,'J/mol'), sigma=(6.4092,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=566.27 K, Pc=31.24 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.299693,0.0839308,-6.74533e-05,3.06742e-08,-6.02582e-12,44564.4,29.0122], Tmin=(100,'K'), Tmax=(1163.73,'K')), NASAPolynomial(coeffs=[10.857,0.0476425,-2.06788e-05,3.8782e-09,-2.69295e-13,42107.3,-23.5217], Tmin=(1163.73,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(369.44,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_S)"""),
)
species(
label = 'C=C1C(=CC)CC1C(25265)',
structure = SMILES('C=C1C(=CC)CC1C'),
E0 = (118.381,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.689924,0.0550304,2.3689e-05,-6.56265e-08,2.77602e-11,14372.8,24.9628], Tmin=(100,'K'), Tmax=(993.204,'K')), NASAPolynomial(coeffs=[15.3775,0.0380508,-1.43595e-05,2.66472e-09,-1.90565e-13,9375.16,-56.2678], Tmin=(993.204,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(118.381,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(12methylenecyclobutane)"""),
)
species(
label = 'CHCH3(T)(95)',
structure = SMILES('[CH]C'),
E0 = (343.893,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,592.414,4000],'cm^-1')),
HinderedRotor(inertia=(0.00438699,'amu*angstrom^2'), symmetry=1, barrier=(26.7685,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (28.0532,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.82363,-0.000909515,3.2138e-05,-3.7348e-08,1.3309e-11,41371.4,7.10948], Tmin=(100,'K'), Tmax=(960.812,'K')), NASAPolynomial(coeffs=[4.30487,0.00943069,-3.27559e-06,5.95121e-10,-4.27307e-14,40709.1,1.84202], Tmin=(960.812,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(343.893,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(128.874,'J/(mol*K)'), label="""CHCH3(T)""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = '[CH2]C([C]=C)=CC(24774)',
structure = SMILES('[CH2]C([C]=C)=CC'),
E0 = (370.8,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655,2950,3100,1380,975,1025,1650,350,440,435,1725,3000,3100,440,815,1455,1000,180],'cm^-1')),
HinderedRotor(inertia=(1.17315,'amu*angstrom^2'), symmetry=1, barrier=(26.9731,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.17496,'amu*angstrom^2'), symmetry=1, barrier=(27.0146,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.1727,'amu*angstrom^2'), symmetry=1, barrier=(26.9626,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (80.1277,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.0818,0.0569416,-3.56598e-05,4.1841e-09,3.20998e-12,44708.4,20.7527], Tmin=(100,'K'), Tmax=(982.69,'K')), NASAPolynomial(coeffs=[12.9204,0.0239405,-8.46845e-06,1.46434e-09,-9.91425e-14,41648.3,-39.886], Tmin=(982.69,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(370.8,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(320.107,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CJC=C) + radical(Allyl_P)"""),
)
species(
label = '[CH]=C([CH]C)C(=C)CC(25424)',
structure = SMILES('[CH]C(=CC)C(=C)CC'),
E0 = (330.753,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2950,3100,1380,975,1025,1650,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,325,375,415,465,420,450,1700,1750,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.442166,0.0858934,-5.1432e-05,9.5936e-09,1.54315e-12,39950.3,30.9724], Tmin=(100,'K'), Tmax=(1106.5,'K')), NASAPolynomial(coeffs=[16.3579,0.0427111,-1.66841e-05,2.99222e-09,-2.04007e-13,35158.1,-56.633], Tmin=(1106.5,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(330.753,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(AllylJ2_triplet)"""),
)
species(
label = 'C=CC(=C)C(=C)CC(24630)',
structure = SMILES('C=CC(=C)C(=C)CC'),
E0 = (104.558,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.296747,0.0670054,-1.0269e-05,-3.13536e-08,1.59568e-11,12721.3,27.8384], Tmin=(100,'K'), Tmax=(1010.3,'K')), NASAPolynomial(coeffs=[15.6889,0.0379462,-1.44599e-05,2.64736e-09,-1.86033e-13,7984.11,-54.6302], Tmin=(1010.3,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(104.558,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + group(Cds-CdsHH)"""),
)
species(
label = 'C=C1C(=C)C(C)C1C(25274)',
structure = SMILES('C=C1C(=C)C(C)C1C'),
E0 = (122.654,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.691732,0.0515838,4.13669e-05,-8.96066e-08,3.77135e-11,14890,23.0693], Tmin=(100,'K'), Tmax=(969.873,'K')), NASAPolynomial(coeffs=[17.4573,0.0342784,-1.20439e-05,2.21718e-09,-1.61071e-13,9199.74,-69.8715], Tmin=(969.873,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(122.654,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsHH) + group(Cds-CdsHH) + ring(12methylenecyclobutane)"""),
)
species(
label = 'N2',
structure = SMILES('N#N'),
E0 = (-8.69489,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0135,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43376e-09,2.58636e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.9759,0.00164141,-7.19722e-07,1.25378e-10,-7.91526e-15,-1025.84,5.53757], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'Ne',
structure = SMILES('[Ne]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (20.1797,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""),
)
transitionState(
label = 'TS1',
E0 = (291.23,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS2',
E0 = (462.221,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS3',
E0 = (538.699,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS4',
E0 = (497.951,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS5',
E0 = (380.338,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS6',
E0 = (399.474,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS7',
E0 = (350.103,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS8',
E0 = (722.113,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS9',
E0 = (343.259,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS10',
E0 = (380.132,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS11',
E0 = (705.575,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS12',
E0 = (537.022,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS13',
E0 = (257.971,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS14',
E0 = (716.337,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS15',
E0 = (466.494,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS16',
E0 = (454.469,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS17',
E0 = (430.619,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS18',
E0 = (503.849,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS19',
E0 = (393.718,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS20',
E0 = (361.682,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS21',
E0 = (350.103,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS22',
E0 = (380.132,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS23',
E0 = (375.044,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS24',
E0 = (274.66,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS25',
E0 = (463.915,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS26',
E0 = (257.971,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS27',
E0 = (714.692,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS28',
E0 = (375.062,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS29',
E0 = (258.055,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS30',
E0 = (257.971,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
reaction(
label = 'reaction1',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['CH3CHCCH2(18175)', 'CH3CHCCH2(18175)'],
transitionState = 'TS1',
kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(41.5431,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Exact match found for rate rule [RJJ]
Euclidian distance = 0
family: 1,4_Linear_birad_scission
Ea raised from 0.0 to 41.5 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction2',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2]C1([CH]C)CC1=CC(25275)'],
transitionState = 'TS2',
kinetics = Arrhenius(A=(3.36e+09,'s^-1'), n=0.84, Ea=(212.534,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment="""Estimated using template [R4_S_D;doublebond_intra_HNd;radadd_intra_cs2H] for rate rule [R4_S_(Cd)_D;doublebond_intra_HNd;radadd_intra_cs2H]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: Intra_R_Add_Exocyclic
Ea raised from 210.2 to 212.5 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction3',
reactants = ['CH3CHCCH2(18175)', 'C=[C][CH]C(18176)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS3',
kinetics = Arrhenius(A=(0.00086947,'m^3/(mol*s)'), n=2.67356, Ea=(32.0272,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Ca_Cds-HH;CJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction4',
reactants = ['[CH2]C(=CC)C(C)=[C]C(25412)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS4',
kinetics = Arrhenius(A=(7.74e+09,'s^-1'), n=1.08, Ea=(161.921,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 198 used for R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H
Exact match found for rate rule [R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction5',
reactants = ['[CH2]C(=[C]C)C(C)=CC(25413)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS5',
kinetics = Arrhenius(A=(111300,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_2H]
Euclidian distance = 2.2360679775
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction6',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2]C(=CC)[C](C)C=C(24605)'],
transitionState = 'TS6',
kinetics = Arrhenius(A=(1.6e+06,'s^-1'), n=1.81, Ea=(149.787,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 101 used for R4H_SDS;C_rad_out_2H;Cs_H_out_2H
Exact match found for rate rule [R4H_SDS;C_rad_out_2H;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 6.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction7',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2][C](C=C)C(C)=CC(24606)'],
transitionState = 'TS7',
kinetics = Arrhenius(A=(6.66e+06,'s^-1'), n=1.64, Ea=(100.416,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 96 used for R5H_SS(D)MS;C_rad_out_2H;Cs_H_out_2H
Exact match found for rate rule [R5H_SS(D)MS;C_rad_out_2H;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 6.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction8',
reactants = ['C=[C][CH]C(18176)', 'C=[C][CH]C(18176)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS8',
kinetics = Arrhenius(A=(3.73038e+06,'m^3/(mol*s)'), n=0.027223, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;Y_rad]
Euclidian distance = 0
family: R_Recombination
Ea raised from -14.4 to 0 kJ/mol."""),
)
reaction(
label = 'reaction9',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2]C(=CC)[C]1CC1C(25414)'],
transitionState = 'TS9',
kinetics = Arrhenius(A=(7.36786e+12,'s^-1'), n=-0.105173, Ea=(93.5715,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""Estimated using template [R3_D;doublebond_intra;radadd_intra_cs2H] for rate rule [R3_D;doublebond_intra_secDe_HNd;radadd_intra_cs2H]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction10',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2][C]1C(=CC)CC1C(25415)'],
transitionState = 'TS10',
kinetics = Arrhenius(A=(6.43734e+08,'s^-1'), n=0.926191, Ea=(130.445,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_cs2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction11',
reactants = ['CH2(S)(23)', '[CH2]C(=C)C([CH2])=CC(25416)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS11',
kinetics = Arrhenius(A=(7.94e+13,'cm^3/(mol*s)','*|/',0.25), n=-0.324, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 4 used for carbene;Cd_pri
Exact match found for rate rule [carbene;Cd_pri]
Euclidian distance = 0
Multiplied by reaction path degeneracy 4.0
family: 1,2_Insertion_carbene
Ea raised from -3.9 to 0 kJ/mol."""),
)
reaction(
label = 'reaction23',
reactants = ['C=C([CH]C)C[C]=CC(24184)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS12',
kinetics = Arrhenius(A=(1.74842e+09,'s^-1'), n=1.084, Ea=(170.038,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [cCsCJ;CdsJ;C] + [cCs(-HH)CJ;CJ;C] for rate rule [cCs(-HH)CJ;CdsJ;C]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction13',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['CC=C1CCC1=CC(25269)'],
transitionState = 'TS13',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment="""From training reaction 2 used for R4_SSS;C_rad_out_2H;Cpri_rad_out_2H
Exact match found for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_2H]
Euclidian distance = 0
family: Birad_recombination"""),
)
reaction(
label = 'reaction14',
reactants = ['CH2(19)', '[CH2]C([C]=CC)=CC(25417)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS14',
kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/OneDe;Birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -3.5 to 0 kJ/mol."""),
)
reaction(
label = 'reaction15',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2]C1([CH]C)C(=C)C1C(25296)'],
transitionState = 'TS15',
kinetics = Arrhenius(A=(6.72658e+10,'s^-1'), n=0.535608, Ea=(216.807,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R4_S_D;doublebond_intra;radadd_intra_csHNd] + [R4_S_D;doublebond_intra_HNd;radadd_intra_cs] for rate rule [R4_S_(Cd)_D;doublebond_intra_HNd;radadd_intra_csHNd]
Euclidian distance = 2.2360679775
Multiplied by reaction path degeneracy 2.0
family: Intra_R_Add_Exocyclic
Ea raised from 214.2 to 216.8 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction16',
reactants = ['H(3)', '[CH2]C(=CC)C(=C)C=C(24604)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS16',
kinetics = Arrhenius(A=(2.31e+08,'cm^3/(mol*s)'), n=1.64, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 2544 used for Cds-HH_Cds-CdH;HJ
Exact match found for rate rule [Cds-HH_Cds-CdH;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond
Ea raised from -2.0 to 0 kJ/mol."""),
)
reaction(
label = 'reaction17',
reactants = ['[CH2]CC(=C)C([CH2])=CC(25418)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS17',
kinetics = Arrhenius(A=(1.72e+06,'s^-1'), n=1.99, Ea=(113.805,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 84 used for R2H_S;C_rad_out_2H;Cs_H_out_H/Cd
Exact match found for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_H/Cd]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction18',
reactants = ['[CH]=C(CC)C([CH2])=CC(25419)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS18',
kinetics = Arrhenius(A=(1.846e+10,'s^-1'), n=0.74, Ea=(145.185,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 194 used for R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC
Exact match found for rate rule [R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction19',
reactants = ['[CH2]C(=[C]C)C(=C)CC(25420)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS19',
kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_H/NonDeC]
Euclidian distance = 2.2360679775
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction20',
reactants = ['[CH]=C([CH]C)C(C)=CC(25421)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS20',
kinetics = Arrhenius(A=(111300,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_2H]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction21',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2][C](C=C)C(=C)CC(24623)'],
transitionState = 'TS21',
kinetics = Arrhenius(A=(6.66e+06,'s^-1'), n=1.64, Ea=(100.416,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5H_SS(D)MS;C_rad_out_single;Cs_H_out_2H] for rate rule [R5H_SS(D)MS;C_rad_out_H/NonDeC;Cs_H_out_2H]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 6.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction22',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['C[CH][C]1CCC1=CC(25422)'],
transitionState = 'TS22',
kinetics = Arrhenius(A=(3.21867e+08,'s^-1'), n=0.926191, Ea=(130.445,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_cs2H]
Euclidian distance = 0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction23',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2][C]1C(=C)C(C)C1C(25423)'],
transitionState = 'TS23',
kinetics = Arrhenius(A=(5.16207e+08,'s^-1'), n=0.911389, Ea=(125.357,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_csHCs]
Euclidian distance = 0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction24',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['C=CC(=C)C(C)=CC(24616)'],
transitionState = 'TS24',
kinetics = Arrhenius(A=(1.27566e+10,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radEndo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 6.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction24',
reactants = ['C=[C]C(C)C(=C)[CH]C(24183)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS25',
kinetics = Arrhenius(A=(8.66e+11,'s^-1'), n=0.438, Ea=(94.4747,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 5 used for cCs(-HC)CJ;CdsJ;C
Exact match found for rate rule [cCs(-HC)CJ;CdsJ;C]
Euclidian distance = 0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction26',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['C=C1C(=CC)CC1C(25265)'],
transitionState = 'TS26',
kinetics = Arrhenius(A=(3.24e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment="""Estimated using template [R4_SSS;C_rad_out_2H;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_H/NonDeC]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: Birad_recombination"""),
)
reaction(
label = 'reaction27',
reactants = ['CHCH3(T)(95)', '[CH2]C([C]=C)=CC(24774)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS27',
kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/OneDe;Birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -3.5 to 0 kJ/mol."""),
)
reaction(
label = 'reaction28',
reactants = ['[CH]=C([CH]C)C(=C)CC(25424)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS28',
kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction29',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['C=CC(=C)C(=C)CC(24630)'],
transitionState = 'TS29',
kinetics = Arrhenius(A=(1.926e+10,'s^-1'), n=0.137, Ea=(8.368,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R5;Y_rad_NDe;XH_Rrad] for rate rule [R5radEndo;Y_rad_NDe;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 6.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction30',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['C=C1C(=C)C(C)C1C(25274)'],
transitionState = 'TS30',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4_SSS;C_rad_out_single;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_H/NonDeC;Cpri_rad_out_H/NonDeC]
Euclidian distance = 2.82842712475
family: Birad_recombination"""),
)
network(
label = '4267',
isomers = [
'C=C([CH]C)C(=C)[CH]C(24182)',
],
reactants = [
('CH3CHCCH2(18175)', 'CH3CHCCH2(18175)'),
],
bathGas = {
'N2': 0.5,
'Ne': 0.5,
},
)
pressureDependence(
label = '4267',
Tmin = (300,'K'),
Tmax = (2000,'K'),
Tcount = 8,
Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'),
Pmin = (0.01,'bar'),
Pmax = (100,'bar'),
Pcount = 5,
Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'),
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 250,
method = 'modified strong collision',
interpolationModel = ('Chebyshev', 6, 4),
activeKRotor = True,
activeJRotor = True,
rmgmode = True,
)
| [
"[email protected]"
] | |
a7c12c0c81879fc2ae0d9f7d163beeef16b99619 | 4b70a23e74a332c54e70fe33c9b0fe79bb328d85 | /WGB/tests.py | 150266ac3a772eb5520f7750260a12777f21311c | [] | no_license | tevawolf/wgb | 3b095897cbdc9b71c4b233f6b755f65f2693d582 | f30be8575b03f24bf797b305e34b7fda866fa0c0 | refs/heads/master | 2022-12-10T23:18:04.175394 | 2021-01-29T06:40:01 | 2021-01-29T06:40:01 | 159,421,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | from django.test import TestCase
from . import models
class UserAccountTests(TestCase):
def test_blank_icon(self):
account = models.UserAccount()
account.username = 'test'
account.password = 'test'
account.nickname = 'test'
account.save()
saved = models.UserAccount.objects.get(username='test')
self.assertEqual(saved.username, 'test')
| [
"[email protected]"
] | |
700fa75fb3bd427c2ace99115edf7c741cc1a10c | 9449368b4a4100f1ef6dd0f4a845faad6f1161a4 | /models/Qaw_reactnet_18_bf.py | 658a6b782cca02444f3726bafd5009b17e234335 | [
"MIT"
] | permissive | TrendingTechnology/BNN_NoBN | b6a770fb176a9881d22ccea20381084b4abc0bcc | d2777845d04449cabfcfc5ce72738e1e6287f633 | refs/heads/main | 2023-06-17T13:38:26.296326 | 2021-04-21T22:28:49 | 2021-04-21T22:28:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,526 | py | '''
React-birealnet-18(modified from resnet)
BN setting: remove all BatchNorm layers
Conv setting: replace conv2d with ScaledstdConv2d (add alpha beta each blocks)
Binary setting: only activation are binarized
'''
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
from layers import *
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return ScaledStdConv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return ScaledStdConv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
def binaryconv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return HardBinaryScaledStdConv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1)
def binaryconv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return HardBinaryScaledStdConv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, alpha, beta, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.alpha = alpha
self.beta = beta
self.move0 = LearnableBias(inplanes)
self.binary_activation = BinaryActivation()
self.binary_conv = binaryconv3x3(inplanes, planes, stride=stride)
self.move1 = LearnableBias(planes)
self.prelu = nn.PReLU(planes)
self.move2 = LearnableBias(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
x_in = x*self.beta
out = self.move0(x_in)
out = self.binary_activation(out)
out = self.binary_conv(out)
if self.downsample is not None:
residual = self.downsample(x_in)
out = out*self.alpha + residual
out = self.move1(out)
out = self.prelu(out)
out = self.move2(out)
return out
class BiRealNet(nn.Module):
def __init__(self, block, layers, imagenet=True, alpha=0.2, num_classes=1000):
super(BiRealNet, self).__init__()
self.inplanes = 64
if imagenet:
self.conv1 = ScaledStdConv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
else:
self.conv1 = ScaledStdConv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.maxpool = nn.Identity()
expected_var = 1.0
self.layer1, expected_var = self._make_layer(block, 64, layers[0], alpha, expected_var)
self.layer2, expected_var = self._make_layer(block, 128, layers[1], alpha, expected_var, stride=2)
self.layer3, expected_var = self._make_layer(block, 256, layers[2], alpha, expected_var, stride=2)
self.layer4, expected_var = self._make_layer(block, 512, layers[3], alpha, expected_var, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, blocks, alpha, expected_var, stride=1):
beta = 1. / expected_var ** 0.5
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.AvgPool2d(kernel_size=2, stride=stride),
binaryconv1x1(self.inplanes, planes * block.expansion)
)
# Reset expected var at a transition block
expected_var = 1.0
layers = []
layers.append(block(self.inplanes, planes, alpha, beta, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
beta = 1. / expected_var ** 0.5
layers.append(block(self.inplanes, planes, alpha, beta))
expected_var += alpha ** 2
return nn.Sequential(*layers), expected_var
def forward(self, x):
x = self.conv1(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def birealnet18(pretrained=False, **kwargs):
"""Constructs a BiRealNet-18 model. """
model = BiRealNet(BasicBlock, [4, 4, 4, 4], **kwargs)
return model
| [
"[email protected]"
] | |
4d4dfa1fce2d0ec301b8527dca38e03ba0e4b365 | e371a21cc31c0616da346e386fea411f39dd0f7b | /LAB04/02-CloudAlbum-Chalice/cloudalbum/chalicelib/config.py | 525345eb14cc26298fa3b523b0b550141477e306 | [
"MIT"
] | permissive | aws-kr-tnc/moving-to-serverless-renew | c0152763de822cea64a862cd395f4f940d2e4e03 | 312248c689a19ea9b589025c82f880593fc70f82 | refs/heads/master | 2023-03-21T19:59:23.717295 | 2022-03-12T15:38:59 | 2022-03-12T15:38:59 | 199,081,822 | 6 | 4 | MIT | 2023-03-07T10:02:25 | 2019-07-26T21:26:02 | Python | UTF-8 | Python | false | false | 1,530 | py | """
cloudalbum/chalicelib/cognito.py
~~~~~~~~~~~~~~~~~~~~~~~
Configurations for application.
:description: CloudAlbum is a fully featured sample application for 'Moving to AWS serverless' training course
:copyright: © 2019 written by Dayoungle Jun, Sungshik Jou.
:license: MIT, see LICENSE for more details.
"""
import boto3
from chalice import CORSConfig
from aws_parameter_store import AwsParameterStore
def get_param_path(param_path):
"""
Retrieve all key:values in the Parameter Store.
:param param_path:
:return:
"""
region = boto3.session.Session().region_name
store = AwsParameterStore(region)
return store.get_parameters_dict(param_path)
# store configuration values for Cloudalbum
conf = get_param_path('/cloudalbum/')
def get_param(param_name):
"""
This function reads a secure parameter from AWS' SSM service.
The request must be passed a valid parameter name, as well as
temporary credentials which can be used to access the parameter.
The parameter's value is returned.
"""
# Create the SSM Client
ssm = boto3.client('ssm')
# Get the requested parameter
response = ssm.get_parameters(
Names=[param_name, ], WithDecryption=True
)
# Store the credentials in a variable
result = response['Parameters'][0]['Value']
return result
cors_config = CORSConfig(
allow_origin='*',
allow_headers=['*'],
max_age=600,
expose_headers=['X-Special-Header'],
allow_credentials=True
)
| [
"[email protected]"
] | |
eba0648acc9316ce39061499fa08bb07bd36bf3e | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Scripts/pyinstaller/PyInstaller/hooks/hook-PyQt5.QtQuickWidgets.py | 5bfdb0b29bff0c35d055c5b3a918351177aeea00 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:46db77cbf463b412fb237dd8420a2e12c39b4b5c5fd0cc8d34382ca45cfc9ae0
size 1992
| [
"[email protected]"
] | |
6301edb7062fa45ed01d04ba326e978ab1a9c163 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/n1nj4sec_pupy/pupy-master/pupy/modules/screenshot.py | 1a3055e23702c2b625f5306a537f0e3d8a04c751 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 3,976 | py | # -*- coding: utf-8 -*-
# --------------------------------------------------------------
# Copyright (c) 2015, Nicolas VERDIER ([email protected]) All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE
# --------------------------------------------------------------
from pupylib.PupyModule import *
from os import path
import time
import datetime
import subprocess
__class_name__="screenshoter"
@config(cat="gather")
class screenshoter(PupyModule):
""" take a screenshot :) """
dependencies = ['mss', 'screenshot']
def init_argparse(self):
self.arg_parser = PupyArgumentParser(prog='screenshot', description=self.__doc__)
self.arg_parser.add_argument('-e', '--enum', action='store_true', help='enumerate screen')
self.arg_parser.add_argument('-s', '--screen', type=int, default=None, help='take a screenshot on a specific screen (default all screen on one screenshot)')
self.arg_parser.add_argument('-v', '--view', action='store_true', help='directly open the default image viewer on the screenshot for preview')
def run(self, args):
rscreenshot = self.client.conn.modules['screenshot']
if args.enum:
self.rawlog('{:>2} {:>9} {:>9}\n'.format('IDX', 'SIZE', 'LEFT'))
for i, screen in enumerate(rscreenshot.screens()):
if not (screen['width'] and screen['height']):
continue
self.rawlog('{:>2}: {:>9} {:>9}\n'.format(
i,
'{}x{}'.format(screen['width'], screen['height']),
'({}x{})'.format(screen['top'], screen['left'])))
return
screenshots, error = rscreenshot.screenshot(args.screen)
if not screenshots:
self.error(error)
else:
self.success('number of monitor detected: %s' % str(len(screenshots)))
for screenshot in screenshots:
filepath = path.join("data","screenshots","scr_"+self.client.short_name()+"_"+str(datetime.datetime.now()).replace(" ","_").replace(":","-")+".png")
with open(filepath, 'w') as out:
out.write(screenshot)
# sleep used to be sure the file name will be different between 2 differents screenshots
time.sleep(1)
self.success(filepath)
# if args.view:
# viewer = config.get('default_viewers', 'image_viewer')
# subprocess.Popen([viewer, output])
| [
"[email protected]"
] | |
727d7ace2d7e5bb03b05240b8fb2e711a818186e | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /modules/cctbx_project/xfel/ui/components/xfel_gui_controls.py | 3d08fe4ffe7f49a0c8341e17313896eb3ca5a7db | [
"BSD-3-Clause",
"BSD-3-Clause-LBNL"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 29,197 | py | from __future__ import absolute_import, division, print_function
import six
'''
Author : Lyubimov, A.Y.
Created : 06/03/2016
Last Changed: 06/03/2016
Description : XFEL UI Custom Widgets and Controls
'''
import os
import wx
import wx.lib.agw.floatspin as fs
from wxtbx import metallicbutton as mb
# Platform-specific stuff
# TODO: Will need to test this on Windows at some point
if wx.Platform == '__WXGTK__':
norm_font_size = 10
button_font_size = 12
LABEL_SIZE = 14
CAPTION_SIZE = 12
elif wx.Platform == '__WXMAC__':
norm_font_size = 12
button_font_size = 14
LABEL_SIZE = 14
CAPTION_SIZE = 12
elif (wx.Platform == '__WXMSW__'):
norm_font_size = 9
button_font_size = 11
LABEL_SIZE = 11
CAPTION_SIZE = 9
icons = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons/')
# --------------------------------- Buttons ---------------------------------- #
class GradButton(mb.MetallicButton):
def __init__(self, parent, label='', bmp=None, size=wx.DefaultSize,
style=mb.MB_STYLE_BOLD_LABEL, handler_function=None,
user_data=None, start_color=(218, 218, 218),
gradient_percent=0, highlight_color=(230, 230, 230),
label_size=LABEL_SIZE, caption_size=CAPTION_SIZE,
button_margin=4, disable_after_click=0) :
if isinstance(bmp, str) :
bmp = self.StandardBitmap(bmp)
bmp_size = bmp.GetSize()
if bmp_size > size[1]:
size = (size[0], 1.5 * bmp_size[1])
mb.MetallicButton.__init__(self,
parent=parent,
label=label,
bmp=bmp,
size=size,
style=style,
name=str(user_data),
start_color=start_color,
gradient_percent=gradient_percent,
highlight_color=highlight_color,
label_size=label_size,
caption_size=caption_size,
button_margin=button_margin,
disable_after_click=disable_after_click
)
if handler_function is not None:
self.bind_event(wx.EVT_BUTTON, handler_function)
def StandardBitmap(img_name, size=None):
img_path = img_name
img = wx.Image(img_path, type=wx.BITMAP_TYPE_ANY, index=-1)
if size is not None:
(w, h) = size
img.Rescale(w, h)
bmp = img.ConvertToBitmap()
return bmp
class RunBlockButton(GradButton):
def __init__(self, parent, block, size=wx.DefaultSize):
self.block = block
db = block.app
self.rnum = block.rungroup_id
self.first_run, self.last_run = block.get_first_and_last_runs()
self.use_ids = db.params.facility.name not in ['lcls']
GradButton.__init__(self, parent=parent, label='',
size=size)
self.update_label()
def update_label(self):
if self.first_run is None:
first = ' ...'
else:
if self.use_ids:
first = self.first_run.id
else:
first = self.first_run.run
if self.last_run is None:
last = ' ...'
else:
last = ' - {}'.format(self.last_run.id if self.use_ids else self.last_run.run)
self.block_label = '[{}] runs {}{}'.format(self.rnum, first, last)
self.SetLabel(self.block_label)
self.Refresh()
class TagButton(GradButton):
def __init__(self, parent, run, size=wx.DefaultSize):
self.run = run
self.tags = self.run.tags
self.parent = parent
GradButton.__init__(self, parent=parent, size=size)
self.update_label()
def update_label(self):
label = ', '.join([i.name for i in self.tags])
self.SetLabel(label)
self.SetFont(wx.Font(button_font_size, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.Refresh()
def change_tags(self):
''' Calls dialog with tag options for all runs; user will select tags
for this specific run
'''
all_tags = self.run.app.get_all_tags()
all_tag_names = [t.name for t in all_tags]
tag_dlg = wx.MultiChoiceDialog(self,
message='Available sample tags',
caption='Sample Tags',
choices=all_tag_names)
# Get indices of selected items (if any) and set them to checked
local_tag_names = [i.name for i in self.tags]
indices = [all_tag_names.index(i) for i in all_tag_names if i in local_tag_names]
tag_dlg.SetSelections(indices)
tag_dlg.Fit()
if (tag_dlg.ShowModal() == wx.ID_OK):
tag_indices = tag_dlg.GetSelections()
self.tags = [i for i in all_tags if all_tags.index(i) in
tag_indices]
old_tags = self.run.tags
old_tag_names = [t.name for t in old_tags]
new_tag_names = [t.name for t in self.tags]
for new_tag in self.tags:
if new_tag.name not in old_tag_names:
self.run.add_tag(new_tag)
for old_tag in old_tags:
if old_tag.name not in new_tag_names:
self.run.remove_tag(old_tag)
# re-synchronize, just in case
self.tags = self.run.tags
self.update_label()
# --------------------------------- Controls --------------------------------- #
class CtrlBase(wx.Panel):
''' Control panel base class '''
def __init__(self,
parent,
label_style='normal',
content_style='normal',
size=wx.DefaultSize):
wx.Panel.__init__(self, parent=parent, id=wx.ID_ANY, size=size)
if label_style == 'normal':
self.font = wx.Font(norm_font_size, wx.DEFAULT, wx.NORMAL, wx.NORMAL)
elif label_style == 'bold':
self.font = wx.Font(norm_font_size, wx.DEFAULT, wx.NORMAL, wx.BOLD)
elif label_style == 'italic':
self.font = wx.Font(norm_font_size, wx.DEFAULT, wx.ITALIC, wx.NORMAL)
elif label_style == 'italic_bold':
self.font = wx.Font(norm_font_size, wx.DEFAULT, wx.ITALIC, wx.BOLD)
if content_style == 'normal':
self.cfont = wx.Font(norm_font_size, wx.DEFAULT, wx.NORMAL, wx.NORMAL)
elif content_style == 'bold':
self.cfont = wx.Font(norm_font_size, wx.DEFAULT, wx.NORMAL, wx.BOLD)
elif content_style == 'italic':
self.cfont = wx.Font(norm_font_size, wx.DEFAULT, wx.ITALIC, wx.NORMAL)
elif content_style == 'italic_bold':
self.cfont = wx.Font(norm_font_size, wx.DEFAULT, wx.ITALIC, wx.BOLD)
class InputCtrl(CtrlBase):
''' Generic panel that will place a text control, with a label and an
optional Browse / magnifying-glass buttons into a window'''
def __init__(self, parent,
label='', label_size=(100, -1),
label_style='normal',
button=False, value=''):
CtrlBase.__init__(self, parent=parent, label_style=label_style)
output_box = wx.FlexGridSizer(1, 4, 0, 10)
self.txt = wx.StaticText(self, label=label, size=label_size)
self.txt.SetFont(self.font)
output_box.Add(self.txt)
self.ctr = wx.TextCtrl(self) #, size=ctr_size)
self.ctr.SetValue(value)
output_box.Add(self.ctr, flag=wx.EXPAND)
self.btn_browse = wx.Button(self, label='Browse...')
self.btn_mag = wx.BitmapButton(self,
bitmap=wx.Bitmap('{}/16x16/viewmag.png'
''.format(icons)))
output_box.Add(self.btn_browse, flag=wx.RESERVE_SPACE_EVEN_IF_HIDDEN)
output_box.Add(self.btn_mag, flag=wx.RESERVE_SPACE_EVEN_IF_HIDDEN)
if not button:
self.btn_browse.Hide()
self.btn_mag.Hide()
output_box.AddGrowableCol(1, 1)
self.SetSizer(output_box)
class TextCtrl(CtrlBase):
''' Generic panel placing only a text box'''
def __init__(self, parent,
ctrl_size=(200, -1),
value=''):
CtrlBase.__init__(self, parent=parent)
output_box = wx.FlexGridSizer(1, 4, 0, 10)
self.txt = wx.StaticText(self)
self.txt.SetFont(self.font)
output_box.Add(self.txt)
self.ctr = wx.TextCtrl(self, size=ctrl_size)
self.ctr.SetValue(value)
output_box.Add(self.ctr, flag=wx.EXPAND)
self.SetSizer(output_box)
class TextButtonCtrl(CtrlBase):
''' Generic panel that will place a text control, with a label and an
optional large button, and an optional bitmap button'''
def __init__(self, parent,
label='', label_size=(100, -1),
label_style='normal',
text_style=wx.TE_LEFT,
ctrl_size=(200, -1),
big_button=False,
big_button_label='Browse...',
big_button_size=wx.DefaultSize,
ghost_button=True,
value=''):
CtrlBase.__init__(self, parent=parent, label_style=label_style)
output_box = wx.FlexGridSizer(1, 4, 0, 10)
self.txt = wx.StaticText(self, label=label, size=label_size)
self.txt.SetFont(self.font)
output_box.Add(self.txt)
self.ctr = wx.TextCtrl(self, style=text_style, size=ctrl_size)
self.ctr.SetValue(value)
output_box.Add(self.ctr, flag=wx.EXPAND)
self.btn_big = wx.Button(self, label=big_button_label, size=big_button_size)
if ghost_button:
output_box.Add(self.btn_big, flag=wx.RESERVE_SPACE_EVEN_IF_HIDDEN)
else:
output_box.Add(self.btn_big)
if not big_button:
self.btn_big.Hide()
output_box.AddGrowableCol(1, 1)
self.SetSizer(output_box)
class TwoButtonCtrl(CtrlBase):
''' Generic panel that will place a text control, with a label and an
optional large button, and an optional bitmap button'''
def __init__(self, parent,
label='', label_size=(100, -1),
label_style='normal',
text_style=wx.TE_LEFT,
button1=False,
button1_label='Browse...',
button1_size=wx.DefaultSize,
button2=False,
button2_label='Default',
button2_size=wx.DefaultSize,
value=''):
CtrlBase.__init__(self, parent=parent, label_style=label_style)
output_box = wx.FlexGridSizer(1, 5, 0, 10)
self.txt = wx.StaticText(self, label=label, size=label_size)
self.txt.SetFont(self.font)
output_box.Add(self.txt)
self.ctr = wx.TextCtrl(self, style=text_style)
self.ctr.SetValue(value)
output_box.Add(self.ctr, flag=wx.EXPAND)
if button1:
self.button1 = wx.Button(self, label=button1_label, size=button1_size)
output_box.Add(self.button1)
if button2:
self.button2 = wx.Button(self, label=button2_label, size=button2_size)
output_box.Add(self.button2)
output_box.AddGrowableCol(1, 1)
self.SetSizer(output_box)
class OptionCtrl(CtrlBase):
''' Generic panel will place a text control w/ label '''
def __init__(self, parent, items,
label='',
label_size=(100, -1),
label_style='normal',
sub_labels=[],
ctrl_size=(300, -1)):
CtrlBase.__init__(self, parent=parent, label_style=label_style)
if label != '':
opt_box = wx.FlexGridSizer(1, len(items) * 2 + 1, 0, 10)
self.txt = wx.StaticText(self, label=label, size=label_size)
self.txt.SetFont(self.font)
opt_box.Add(self.txt, flag=wx.ALIGN_CENTER_VERTICAL)
else:
opt_box = wx.FlexGridSizer(1, len(items) * 2, 0, 10)
for key, value in items:
if sub_labels != []:
sub_label = sub_labels[items.index((key, value))].decode('utf-8')
else:
sub_label = key
if len(items) > 1:
opt_label = wx.StaticText(self, id=wx.ID_ANY, label=sub_label)
opt_box.Add(opt_label, flag=wx.ALIGN_CENTER_VERTICAL)
item = wx.TextCtrl(self, id=wx.ID_ANY, size=ctrl_size,
style=wx.TE_PROCESS_ENTER)
item.SetValue(str(value))
opt_box.Add(item, flag=wx.ALIGN_CENTER_VERTICAL)
self.__setattr__(key, item)
self.SetSizer(opt_box)
class VerticalOptionCtrl(CtrlBase):
''' Generic panel will place a text control w/ label in column'''
def __init__(self, parent, items,
label='',
label_size=(100, -1),
label_style='normal',
sub_labels=[],
ctrl_size=(300, -1)):
CtrlBase.__init__(self, parent=parent, label_style=label_style)
if label != '':
opt_box = wx.FlexGridSizer(len(items) * 2 + 1, 2, 10, 10)
self.txt = wx.StaticText(self, label=label, size=label_size)
self.txt.SetFont(self.font)
opt_box.Add(self.txt, flag=wx.ALIGN_CENTER_VERTICAL)
opt_box.Add((0, 0))
else:
opt_box = wx.FlexGridSizer(len(items) * 2, 2, 10, 10)
for key, value in items:
if sub_labels != []:
sub_label = sub_labels[items.index((key, value))].decode('utf-8')
else:
sub_label = key
if len(items) > 1:
opt_label = wx.StaticText(self, id=wx.ID_ANY, label=sub_label)
opt_box.Add(opt_label, flag=wx.ALIGN_CENTER_VERTICAL)
item = wx.TextCtrl(self, id=wx.ID_ANY, size=ctrl_size,
style=wx.TE_PROCESS_ENTER)
item.SetValue(str(value))
opt_box.Add(item, flag=wx.ALIGN_CENTER_VERTICAL)
self.__setattr__(key, item)
self.SetSizer(opt_box)
class IntFloatSpin(fs.FloatSpin):
def GetValue(self):
float_value = super(IntFloatSpin, self).GetValue()
int_value = int(round(float_value))
return int_value
class SpinCtrl(CtrlBase):
''' Generic panel will place a spin control w/ label '''
def __init__(self, parent,
label='',
label_size=(200, -1),
label_style='normal',
ctrl_size=(60, -1),
ctrl_value='3',
ctrl_max=10,
ctrl_min=0,
ctrl_step=1,
ctrl_digits=0):
CtrlBase.__init__(self, parent=parent, label_style=label_style)
ctr_box = wx.FlexGridSizer(1, 3, 0, 10)
self.txt = wx.StaticText(self, label=label.decode('utf-8'),
size=label_size)
self.txt.SetFont(self.font)
floatspin_class = IntFloatSpin if ctrl_digits == 0 else fs.FloatSpin
self.ctr = floatspin_class(self, value=ctrl_value, max_val=(ctrl_max),
min_val=(ctrl_min), increment=ctrl_step,
digits=ctrl_digits, size=ctrl_size)
ctr_box.Add(self.txt, flag=wx.ALIGN_CENTER_VERTICAL)
ctr_box.Add(self.ctr, flag=wx.ALIGN_CENTER_VERTICAL)
self.SetSizer(ctr_box)
class ChoiceCtrl(CtrlBase):
''' Generic panel will place a choice control w/ label '''
def __init__(self, parent,
choices,
label='',
label_size=(200, -1),
label_style='normal',
ctrl_size=(100, -1)):
CtrlBase.__init__(self, parent=parent, label_style=label_style)
ctr_box = wx.FlexGridSizer(1, 3, 0, 10)
self.txt = wx.StaticText(self, label=label, size=label_size)
self.txt.SetFont(self.font)
# Check if choices are tuples, extract data and assign to items if so
if all(isinstance(i, tuple) for i in choices):
items = [i[0] for i in choices]
self.ctr = wx.Choice(self, size=ctrl_size, choices=items)
for choice in choices:
item_idx = self.ctr.FindString(choice[0])
self.ctr.SetClientData(item_idx, choice[1])
else:
self.ctr = wx.Choice(self, size=ctrl_size, choices=choices)
ctr_box.Add(self.txt, flag=wx.ALIGN_CENTER_VERTICAL)
ctr_box.Add(self.ctr, flag=wx.ALIGN_CENTER_VERTICAL)
self.SetSizer(ctr_box)
class CheckListCtrl(CtrlBase):
def __init__(self, parent,
choices,
label='',
label_size=(200, -1),
label_style='normal',
ctrl_size=(150, -1),
direction='horizontal'):
CtrlBase.__init__(self, parent=parent, label_style=label_style)
self.txt = wx.StaticText(self, label=label, size=label_size)
self.txt.SetFont(self.font)
self.ctr = wx.CheckListBox(self, size=ctrl_size, choices=choices)
if label == '':
ctr_box = wx.BoxSizer(wx.VERTICAL)
else:
if direction == 'horizontal':
ctr_box = wx.FlexGridSizer(1, 2, 0, 10)
elif direction == 'vertical':
ctr_box = wx.FlexGridSizer(2, 1, 10, 0)
ctr_box.Add(self.txt, flag=wx.ALIGN_CENTER_VERTICAL)
ctr_box.Add(self.ctr, proportion=1,
flag=wx.ALIGN_CENTER_VERTICAL | wx.EXPAND)
self.SetSizer(ctr_box)
class MultiChoiceCtrl(CtrlBase):
''' Generic panel with multiple choice controls / labels '''
def __init__(self, parent, items,
label='',
label_size=(200, -1),
label_style='normal',
ctrl_size=(100, -1)):
CtrlBase.__init__(self, parent=parent, label_style=label_style)
choice_box = wx.FlexGridSizer(1, len(items) * 2 + 1, 0, 10)
self.txt = wx.StaticText(self, label=label, size=label_size)
self.txt.SetFont(self.font)
choice_box.Add(self.txt, flag=wx.ALIGN_CENTER_VERTICAL)
for key, choices in six.iteritems(items):
if len(items) > 1:
ch_label =wx.StaticText(self, id=wx.ID_ANY, label=key)
choice_box.Add(ch_label, flag=wx.ALIGN_CENTER_VERTICAL)
item = wx.Choice(self, id=wx.ID_ANY, size=ctrl_size, choices=choices)
choice_box.Add(item, flag=wx.ALIGN_CENTER_VERTICAL)
self.__setattr__(key, item)
self.SetSizer(choice_box)
class TableCtrl(CtrlBase):
''' Generic panel will place a table w/ x and y labels
Data must be a list of lists for multi-column tables '''
def __init__(self, parent,
clabels=[],
clabel_size=(200, -1),
rlabels=[],
rlabel_size=(200, -1),
contents=[],
label_style='normal',
content_style='normal'):
CtrlBase.__init__(self, parent=parent, label_style=label_style,
content_style=content_style)
nrows = len(rlabels) + 1
if len(clabels) == 0:
ncols = 2
else:
ncols = len(clabels) + 1
self.sizer = wx.FlexGridSizer(nrows, ncols, 10, 10)
# add column labels (xlabels)
if len(clabels) > 0:
self.sizer.Add(wx.StaticText(self, label=''))
for item in column_labels:
clabel = wx.StaticText(self, label=i.decode('utf-8'), size=clabel_size)
clabel.SetFont(self.font)
self.sizer.Add(clabel)
# add row labels and row contents
for l in rlabels:
row_label = wx.StaticText(self, label=l.decode('utf-8'), size=rlabel_size)
row_label.SetFont(self.font)
self.sizer.Add(row_label)
# Add data to table
c_index = rlabels.index(l)
for item in contents[c_index]:
cell = wx.StaticText(self, label=item.decode('utf-8'))
cell.SetFont(self.cfont)
self.sizer.Add(cell)
self.SetSizer(self.sizer)
class RadioCtrl(CtrlBase):
'''Generic panel with multiple radio buttons.'''
def __init__(self, parent,
label='',
label_size=(200, -1),
label_style='normal',
ctrl_size=(100, -1),
direction='horizontal',
items={}):
CtrlBase.__init__(self, parent=parent, label_style=label_style)
if direction == 'horizontal':
radio_group = wx.FlexGridSizer(1, len(items) + 1, 0, 10)
else:
radio_group = wx.FlexGridSizer(len(items) + 1, 1, 0, 10)
if label != '':
self.txt = wx.StaticText(self, label=label, size=label_size)
self.txt.SetFont(self.font)
radio_group.Add(self.txt, flag=wx.ALIGN_CENTER_VERTICAL)
for key, value in six.iteritems(items):
button = wx.RadioButton(self, id=wx.ID_ANY, label=value)
radio_group.Add(button)
self.__setattr__(key, button)
self.SetSizer(radio_group)
# Use a mixin to support sorting by columns
import wx.lib.mixins.listctrl as listmix
class SortableListCtrl(wx.ListCtrl, listmix.ColumnSorterMixin):
def __init__(self, parent, style=wx.LC_ICON):
self.parent = parent
self.sortable_mixin = listmix
wx.ListCtrl.__init__(self, parent, style=style)
def initialize_sortable_columns(self, n_col=0, itemDataMap={}):
self.itemDataMap = itemDataMap
self.sortable_mixin.ColumnSorterMixin.__init__(self, n_col)
sortable_list = self.GetListCtrl()
if sortable_list:
sortable_list.Bind(wx.EVT_LIST_COL_CLICK, self.__OnColClick, sortable_list)
def __OnColClick(self, e):
self._col = e.GetColumn()
self._colSortFlag[self._col] = int(not self._colSortFlag[self._col])
self.GetListCtrl().SortItems(self.GetColumnSorter())
self.OnSortOrderChanged()
if hasattr(self.parent, 'onColClick'):
self.parent.onColClick(e)
def RestoreSortOrder(self, col, colSortFlag):
self._col = col
self._colSortFlag = colSortFlag
self.GetListCtrl().SortItems(self.GetColumnSorter())
self.OnSortOrderChanged()
def GetListCtrl(self):
return self
# ------------------------------- UI Elements -------------------------------- #
class RunBlock(CtrlBase):
def __init__(self, parent, block,
label_style='normal',
content_style='normal'):
self.block = block
CtrlBase.__init__(self, parent=parent, label_style=label_style,
content_style=content_style)
self.sizer = wx.FlexGridSizer(1, 2, 0, 5)
self.new_runblock = RunBlockButton(self, size=(200, 30), block=block)
# self.del_runblock = wx.BitmapButton(self,
# bitmap=wx.Bitmap('{}/16x16/delete.png'.format(icons)))
self.sizer.Add(self.new_runblock)
# self.sizer.Add(self.del_runblock)
self.SetSizer(self.sizer)
class PHILBox(CtrlBase):
def __init__(self, parent,
btn_import=True,
btn_import_size=(120, -1),
btn_import_label='Import PHIL',
btn_export=False,
btn_export_size=(120, -1),
btn_export_label='Export PHIL',
btn_default=True,
btn_default_size=(120, -1),
btn_default_label='Default PHIL',
ctr_size=(-1, 125),
ctr_value='',
label_style='normal',
content_style='normal'):
CtrlBase.__init__(self, parent=parent, label_style=label_style,
content_style=content_style)
self.sizer = wx.GridBagSizer(5, 5)
self.SetSizer(self.sizer)
self.ctr = wx.richtext.RichTextCtrl(self,
size=ctr_size,
style=wx.VSCROLL,
value=ctr_value)
span_counter = 0
if btn_import:
self.btn_import = wx.Button(self,
label=btn_import_label,
size=btn_import_size)
self.sizer.Add(self.btn_import, pos=(span_counter, 0))
span_counter += 1
if btn_export:
self.btn_export = wx.Button(self,
label=btn_export_label,
size=btn_export_size)
self.sizer.Add(self.btn_export, pos=(span_counter, 0))
span_counter += 1
if btn_default:
self.btn_default = wx.Button(self,
label=btn_default_label,
size=btn_default_size)
self.sizer.Add(self.btn_default, pos=(span_counter, 0))
span_counter += 1
if span_counter > 0:
self.sizer.Add(self.ctr, pos=(0, 1), span=(span_counter + 1, 1),
flag=wx.EXPAND)
self.sizer.AddGrowableRow(span_counter)
elif span_counter == 0:
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.ctr, 1, flag=wx.EXPAND)
self.sizer.AddGrowableCol(1)
class GaugeBar(CtrlBase):
def __init__(self, parent,
label='',
label_size=(80, -1),
label_style='normal',
content_style='normal',
gauge_size=(250, 15),
button=False,
button_label='View Stats',
button_size=wx.DefaultSize,
choice_box=True,
choice_label='',
choice_label_size=(120, -1),
choice_size=(100, -1),
choice_style='normal',
choices=[],
gauge_max=100):
CtrlBase.__init__(self, parent=parent, label_style=label_style,
content_style=content_style)
self.sizer = wx.FlexGridSizer(1, 6, 0, 10)
self.sizer.AddGrowableCol(3)
self.bar = wx.Gauge(self, range=gauge_max, size=gauge_size)
if choice_box:
self.bins = ChoiceCtrl(self,
label=choice_label,
label_size=choice_label_size,
label_style=choice_style,
ctrl_size=choice_size,
choices=choices)
self.txt_iso = wx.StaticText(self, label=label, size=label_size)
self.txt_max = wx.StaticText(self, label=str(gauge_max))
self.txt_min = wx.StaticText(self, label='0')
self.sizer.Add(self.txt_iso)
self.sizer.Add(self.txt_min)
self.sizer.Add(self.bar)
self.sizer.Add(self.txt_max)
self.sizer.Add(self.bins)
if button:
self.btn = wx.Button(self, label=button_label, size=button_size)
self.sizer.Add(self.btn, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER)
self.SetSizer(self.sizer)
tp_EVT_STATUS_CHANGE = wx.NewEventType()
EVT_STATUS_CHANGE = wx.PyEventBinder(tp_EVT_STATUS_CHANGE, 1)
class StatusChange(wx.PyCommandEvent):
''' Send event when status light is updated '''
def __init__(self, etype, eid, status=None):
wx.PyCommandEvent.__init__(self, etype, eid)
self.status = status
def GetValue(self):
return self.status
class SentinelStatus(CtrlBase):
def __init__(self, parent,
label='',
label_size=(120, -1),
label_style='normal',
content_style='normal'):
self.label = label
self.label_size = label_size
CtrlBase.__init__(self, parent=parent, label_style=label_style,
content_style=content_style, size=(-1, 24))
bmp = wx.Bitmap('{}/16x16/led_off.png'.format(icons))
self.light = wx.StaticBitmap(self, -1, bmp)
self.sizer = wx.FlexGridSizer(1, 2, 0, 10)
self.sizer.Add(self.light)
self.sizer.Add(wx.StaticText(self, label=self.label, size=self.label_size))
self.SetSizer(self.sizer)
self.Bind(EVT_STATUS_CHANGE, self.onChangeStatus)
def change_status(self, status):
evt = StatusChange(tp_EVT_STATUS_CHANGE, -1, status)
wx.PostEvent(self, evt)
def onChangeStatus(self, evt):
status = evt.GetValue()
if status == 'on':
bmp = wx.Bitmap('{}/16x16/led_on.png'.format(icons))
elif status == 'off':
bmp = wx.Bitmap('{}/16x16/led_off.png'.format(icons))
elif status == 'idle':
bmp = wx.Bitmap('{}/16x16/led_idle.png'.format(icons))
elif status == 'alert':
bmp = wx.Bitmap('{}/16x16/led_alert.png'.format(icons))
self.light.SetBitmap(bmp)
class IsoformInfoCtrl(CtrlBase):
def __init__(self, parent,
label_style='normal',
content_style='normal'):
CtrlBase.__init__(self, parent=parent, label_style=label_style,
content_style=content_style)
self.uc_values = None
self.sizer = wx.FlexGridSizer(1, 9, 0, 10)
self.sizer.AddGrowableCol(7)
self.txt_iso = wx.StaticText(self, label='Isoform')
self.txt_pg = wx.StaticText(self, label='Point Group')
self.txt_num = wx.StaticText(self, label='No. Images')
self.txt_uc = wx.StaticText(self, label='Unit Cell')
self.ctr_iso = wx.TextCtrl(self, size=(30, -1), style=wx.TE_READONLY)
self.ctr_pg = wx.TextCtrl(self, size=(50, -1), style=wx.TE_READONLY)
self.ctr_num = wx.TextCtrl(self, size=(50, -1), style=wx.TE_READONLY)
self.ctr_uc = wx.TextCtrl(self, size=(200, -1), style=wx.TE_READONLY)
self.btn_hist = wx.Button(self, label='Histogram')
self.sizer.Add(self.txt_iso, flag=wx.ALIGN_CENTER_VERTICAL)
self.sizer.Add(self.ctr_iso, flag=wx.ALIGN_CENTER_VERTICAL)
self.sizer.Add(self.txt_pg, flag=wx.ALIGN_CENTER_VERTICAL)
self.sizer.Add(self.ctr_pg, flag=wx.ALIGN_CENTER_VERTICAL)
self.sizer.Add(self.txt_num, flag=wx.ALIGN_CENTER_VERTICAL)
self.sizer.Add(self.ctr_num, flag=wx.ALIGN_CENTER_VERTICAL)
self.sizer.Add(self.txt_uc, flag=wx.ALIGN_CENTER_VERTICAL)
self.sizer.Add(self.ctr_uc, flag=wx.EXPAND | wx.ALIGN_CENTER_VERTICAL)
self.sizer.Add(self.btn_hist, flag=wx.ALIGN_CENTER_VERTICAL)
self.Bind(wx.EVT_BUTTON, self.onClusterHistogram, self.btn_hist)
self.SetSizer(self.sizer)
def onClusterHistogram(self, e):
if self.uc_values is not None:
import xfel.ui.components.xfel_gui_plotter as pltr
plotter = pltr.PopUpCharts()
plotter.plot_uc_histogram(info_list=[self.uc_values], legend_list=[])
plotter.plt.show()
| [
"[email protected]"
] | |
337d27c4666d08ff02e5ac3fb7470dae4cbe5a9c | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/detection/SSD/mmdet/models/roi_heads/point_rend_roi_head.py | 3642628ea91a376a39ce5e5813e50509d0ea712a | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 10,905 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend # noqa
import torch
import torch.nn.functional as F
from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point
from mmdet.core import bbox2roi, bbox_mapping, merge_aug_masks
from .. import builder
from ..builder import HEADS
from .standard_roi_head import StandardRoIHead
@HEADS.register_module()
class PointRendRoIHead(StandardRoIHead):
"""`PointRend <https://arxiv.org/abs/1912.08193>`_."""
def __init__(self, point_head, *args, **kwargs):
super().__init__(*args, **kwargs)
assert self.with_bbox and self.with_mask
self.init_point_head(point_head)
def init_point_head(self, point_head):
"""Initialize ``point_head``"""
self.point_head = builder.build_head(point_head)
def init_weights(self, pretrained):
"""Initialize the weights in head.
Args:
pretrained (str, optional): Path to pre-trained weights.
"""
super().init_weights(pretrained)
self.point_head.init_weights()
def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
img_metas):
"""Run forward function and calculate loss for mask head and point head
in training."""
mask_results = super()._mask_forward_train(x, sampling_results,
bbox_feats, gt_masks,
img_metas)
if mask_results['loss_mask'] is not None:
loss_point = self._mask_point_forward_train(
x, sampling_results, mask_results['mask_pred'], gt_masks,
img_metas)
mask_results['loss_mask'].update(loss_point)
return mask_results
def _mask_point_forward_train(self, x, sampling_results, mask_pred,
gt_masks, img_metas):
"""Run forward function and calculate loss for point head in
training."""
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
rel_roi_points = self.point_head.get_roi_rel_points_train(
mask_pred, pos_labels, cfg=self.train_cfg)
rois = bbox2roi([res.pos_bboxes for res in sampling_results])
fine_grained_point_feats = self._get_fine_grained_point_feats(
x, rois, rel_roi_points, img_metas)
coarse_point_feats = point_sample(mask_pred, rel_roi_points)
mask_point_pred = self.point_head(fine_grained_point_feats,
coarse_point_feats)
mask_point_target = self.point_head.get_targets(
rois, rel_roi_points, sampling_results, gt_masks, self.train_cfg)
loss_mask_point = self.point_head.loss(mask_point_pred,
mask_point_target, pos_labels)
return loss_mask_point
def _get_fine_grained_point_feats(self, x, rois, rel_roi_points,
img_metas):
"""Sample fine grained feats from each level feature map and
concatenate them together."""
num_imgs = len(img_metas)
fine_grained_feats = []
for idx in range(self.mask_roi_extractor.num_inputs):
feats = x[idx]
spatial_scale = 1. / float(
self.mask_roi_extractor.featmap_strides[idx])
point_feats = []
for batch_ind in range(num_imgs):
# unravel batch dim
feat = feats[batch_ind].unsqueeze(0)
inds = (rois[:, 0].long() == batch_ind)
if inds.any():
rel_img_points = rel_roi_point_to_rel_img_point(
rois[inds], rel_roi_points[inds], feat.shape[2:],
spatial_scale).unsqueeze(0)
point_feat = point_sample(feat, rel_img_points)
point_feat = point_feat.squeeze(0).transpose(0, 1)
point_feats.append(point_feat)
fine_grained_feats.append(torch.cat(point_feats, dim=0))
return torch.cat(fine_grained_feats, dim=1)
def _mask_point_forward_test(self, x, rois, label_pred, mask_pred,
img_metas):
"""Mask refining process with point head in testing."""
refined_mask_pred = mask_pred.clone()
for subdivision_step in range(self.test_cfg.subdivision_steps):
refined_mask_pred = F.interpolate(
refined_mask_pred,
scale_factor=self.test_cfg.scale_factor,
mode='bilinear',
align_corners=False)
# If `subdivision_num_points` is larger or equal to the
# resolution of the next step, then we can skip this step
num_rois, channels, mask_height, mask_width = \
refined_mask_pred.shape
if (self.test_cfg.subdivision_num_points >=
self.test_cfg.scale_factor**2 * mask_height * mask_width
and
subdivision_step < self.test_cfg.subdivision_steps - 1):
continue
point_indices, rel_roi_points = \
self.point_head.get_roi_rel_points_test(
refined_mask_pred, label_pred, cfg=self.test_cfg)
fine_grained_point_feats = self._get_fine_grained_point_feats(
x, rois, rel_roi_points, img_metas)
coarse_point_feats = point_sample(mask_pred, rel_roi_points)
mask_point_pred = self.point_head(fine_grained_point_feats,
coarse_point_feats)
point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1)
refined_mask_pred = refined_mask_pred.reshape(
num_rois, channels, mask_height * mask_width)
refined_mask_pred = refined_mask_pred.scatter_(
2, point_indices, mask_point_pred)
refined_mask_pred = refined_mask_pred.view(num_rois, channels,
mask_height, mask_width)
return refined_mask_pred
def simple_test_mask(self,
x,
img_metas,
det_bboxes,
det_labels,
rescale=False):
"""Obtain mask prediction without augmentation."""
ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
num_imgs = len(det_bboxes)
if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
segm_results = [[[] for _ in range(self.mask_head.num_classes)]
for _ in range(num_imgs)]
else:
# if det_bboxes is rescaled to the original image size, we need to
# rescale it back to the testing scale to obtain RoIs.
if rescale and not isinstance(scale_factors[0], float):
scale_factors = [
torch.from_numpy(scale_factor).to(det_bboxes[0].device)
for scale_factor in scale_factors
]
_bboxes = [
det_bboxes[i][:, :4] *
scale_factors[i] if rescale else det_bboxes[i][:, :4]
for i in range(len(det_bboxes))
]
mask_rois = bbox2roi(_bboxes)
mask_results = self._mask_forward(x, mask_rois)
# split batch mask prediction back to each image
mask_pred = mask_results['mask_pred']
num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes]
mask_preds = mask_pred.split(num_mask_roi_per_img, 0)
mask_rois = mask_rois.split(num_mask_roi_per_img, 0)
# apply mask post-processing to each image individually
segm_results = []
for i in range(num_imgs):
if det_bboxes[i].shape[0] == 0:
segm_results.append(
[[] for _ in range(self.mask_head.num_classes)])
else:
x_i = [xx[[i]] for xx in x]
mask_rois_i = mask_rois[i]
mask_rois_i[:, 0] = 0 # TODO: remove this hack
mask_pred_i = self._mask_point_forward_test(
x_i, mask_rois_i, det_labels[i], mask_preds[i],
[img_metas])
segm_result = self.mask_head.get_seg_masks(
mask_pred_i, _bboxes[i], det_labels[i], self.test_cfg,
ori_shapes[i], scale_factors[i], rescale)
segm_results.append(segm_result)
return segm_results
def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):
"""Test for mask head with test time augmentation."""
if det_bboxes.shape[0] == 0:
segm_result = [[] for _ in range(self.mask_head.num_classes)]
else:
aug_masks = []
for x, img_meta in zip(feats, img_metas):
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
_bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
scale_factor, flip)
mask_rois = bbox2roi([_bboxes])
mask_results = self._mask_forward(x, mask_rois)
mask_results['mask_pred'] = self._mask_point_forward_test(
x, mask_rois, det_labels, mask_results['mask_pred'],
img_metas)
# convert to numpy array to save memory
aug_masks.append(
mask_results['mask_pred'].sigmoid().cpu().numpy())
merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg)
ori_shape = img_metas[0][0]['ori_shape']
segm_result = self.mask_head.get_seg_masks(
merged_masks,
det_bboxes,
det_labels,
self.test_cfg,
ori_shape,
scale_factor=1.0,
rescale=False)
return segm_result
| [
"[email protected]"
] | |
a8639e979db7d895673d5f6b9e4d845b351e3782 | dac57de9c28700ebacc25331d5ff04dec129b74b | /MxOnline/users/adminx.py | 59f3b2e3d6b9d7bc1ab58c529d848aaef9f1bd53 | [] | no_license | zmm064/Django- | 08144522ef9afcc3d85c11faa848554282fc6fcd | 1f8836ebb4902a738efc6c626ab10aa91fdde720 | refs/heads/master | 2021-08-09T03:00:01.049464 | 2017-11-12T01:52:34 | 2017-11-12T01:52:34 | 110,396,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | import xadmin
from xadmin import views
from .models import EmailVerifyRecord, Banner
class BaseSetting:
enable_themes = True
use_bootswatch = True
class GlobalSettings:
site_title = "慕学后台管理系统"
site_footer = "慕学在线网"
menu_style = "accordion"
class EmailVerifyRecordAdmin:
list_display = ['code', 'email', 'send_type', 'send_time']
list_filter = ['code', 'email', 'send_type', 'send_time']
search_fields = ['code', 'email', 'send_type']
class BannerAdmin:
list_display = ['title', 'image', 'url', 'index', 'add_time']
list_filter = ['title', 'image', 'url', 'index', 'add_time']
search_fields = ['title', 'image', 'url', 'index']
xadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)
xadmin.site.register(Banner, BannerAdmin)
xadmin.site.register(views.BaseAdminView, BaseSetting)
xadmin.site.register(views.CommAdminView, GlobalSettings)
| [
"[email protected]"
] | |
0b9fdfc19478cd3711fb29d2dcfef928d5c522aa | f67dec556fe0dddc0be1cf28c44964425ee38019 | /venv/lib/python3.7/types.py | b685ab0b6897f18501ae1598f2ada4d95cbdb929 | [] | no_license | AdamC66/July-18--Avoiding-Bugs-with-Linters | 3b3a050227ee7865373adec6084a16fdc21334e7 | 1a5060efc23774941606b7c70a0ec56599f4ab39 | refs/heads/master | 2020-06-22T02:06:14.023492 | 2019-07-18T14:52:12 | 2019-07-18T14:52:12 | 197,469,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | /home/oem/.pyenv/versions/3.7.3/lib/python3.7/types.py | [
"[email protected]"
] | |
463d4a3035c7536df43458eb4be4d53450af98d3 | 5fee6afe91711fbb1ca87845f502776fbfab7851 | /examples/pymanopt_autograd_demo.py | 1761abe78a82061ff7149582fca5d90df8e0d786 | [
"MIT"
] | permissive | chenxofhit/pyprobml | f66ad4c1186f0ba22e520e14700ac0bd6fee400d | fe48d6111bd121e01cfbdefe3361a993fa14abe1 | refs/heads/master | 2021-01-24T09:39:29.828935 | 2016-09-17T03:34:59 | 2016-09-17T03:34:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | #https://github.com/pymanopt/pymanopt/blob/master/pymanopt/core/problem.py
import autograd.numpy as np
from pymanopt import Problem
def cost(theta):
return np.square(theta)
problem = Problem(manifold=None, cost=cost, verbosity=1)
print problem.cost(5)
print problem.egrad(5.0) | [
"[email protected]"
] | |
eaca63e5e424fa56715f10e05ddfbe09b2ff2f4c | 44064ed79f173ddca96174913910c1610992b7cb | /Second_Processing_app/temboo/Library/RunKeeper/Weight/UpdateEntry.py | e7d943997a26bf0fc309b517c6fea8f1ba7349e6 | [] | no_license | dattasaurabh82/Final_thesis | 440fb5e29ebc28dd64fe59ecd87f01494ed6d4e5 | 8edaea62f5987db026adfffb6b52b59b119f6375 | refs/heads/master | 2021-01-20T22:25:48.999100 | 2014-10-14T18:58:00 | 2014-10-14T18:58:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,005 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# UpdateEntry
# Updates a weight entry in a user’s feed.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class UpdateEntry(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the UpdateEntry Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/RunKeeper/Weight/UpdateEntry')
def new_input_set(self):
return UpdateEntryInputSet()
def _make_result_set(self, result, path):
return UpdateEntryResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return UpdateEntryChoreographyExecution(session, exec_id, path)
class UpdateEntryInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the UpdateEntry
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Entry(self, value):
"""
Set the value of the Entry input for this Choreo. ((required, json) A JSON string containing the key/value pairs for the fields to be updated in the weight entry. See documentation for formatting examples.)
"""
InputSet._set_input(self, 'Entry', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved after the final step in the OAuth2 process.)
"""
InputSet._set_input(self, 'AccessToken', value)
def set_EntryID(self, value):
"""
Set the value of the EntryID input for this Choreo. ((required, string) This can be the individual id of the weight entry, or you can pass the full uri for the entry as returned from the RetrieveEntries Choreo (i.e. /weight/24085455).)
"""
InputSet._set_input(self, 'EntryID', value)
class UpdateEntryResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the UpdateEntry Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from RunKeeper.)
"""
return self._output.get('Response', None)
class UpdateEntryChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return UpdateEntryResultSet(response, path)
| [
"[email protected]"
] | |
6af51bf8cb1672b3a526dc92325dd61f00709985 | 63cbfedc2e6141ae12fc113a81e147e9b5769670 | /Chapt 13/sample2.py | 842aeb8880e4bceca85a07e275a5080323161ffd | [] | no_license | DamoM73/Learn-to-program-in-Python | 82d5fdfbb456186d63aa8ae244e87bf96955ff86 | 44b6b9ffa81735739180dc2055e2e803f4526c79 | refs/heads/master | 2020-04-23T06:51:58.591548 | 2019-04-27T09:16:14 | 2019-04-27T09:16:14 | 170,988,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,427 | py | # Program name: Ch 13 Sample app 2 validate password aaaaa.py
# Program askss use to login, then checks password
# in this program password is "aaaaaa"
from tkinter import *
from tkinter import messagebox
def submit():
password = entry_password.get()
username = entry_username.get()
messageAlert = Label(root, width = 30)
messageAlert.grid(row = 3, column = 0, columnspan = 2, padx = 5, pady = 5)
if password != "aaaaaa":
messageAlert.config(text = "Password incorrect")
entry_username.delete(0,"END")
entry_password.delete(0,"END")
entry_username.focus_set()
else:
messageAlert.config(text = "Password accepted")
print("password accepted")
print("Username: ", username)
print("Password: ", password)
messagebox.showinfo(title = "Password Ok", message = "Press OK to continue")
root.destroy()
# display a message box with a hint for password
def hint():
messagebox.showinfo(title = "Password hint", message = "Hint: Try password aaaaaa")
# create main window
root = Tk()
root.geometry("250x180")
root.title("Login Screen")
root.resizable(False,False)
root.configure(background = "Light blue")
# place a frame round labels and user entries
frame_entry = Frame(root, bg = 'Light blue')
frame_entry.grid(row = 0, column = 0, columnspan = 2, padx = 10, pady = 10)
# place a frame around the buttons
frame_buttons = Frame(root, bg = "Light blue")
frame_buttons.grid(row = 2, column = 0, columnspan = 3, padx = 10 , pady = 10)
# place the labels and text entry fields
Label(frame_entry, text = "Enter username: ")\
.grid(row = 0, column = 0, padx = 5, pady = 5)
entry_username = Entry(frame_entry, width = 15, bg = "white")
entry_username.grid(row = 0, column = 1, padx = 5, pady = 5)
Label(frame_entry, text = "Enter password: ")\
.grid(row = 1, column = 0, padx = 10, pady = 10)
entry_password = Entry(frame_entry, width = 15, bg = "white", show = "*")
entry_password.grid(row = 1, column = 1, padx = 5, pady = 5)
# place the submit button
submit_button = Button(frame_buttons, text = "Submit", width = 8, command = submit)
submit_button.grid(row = 0, column = 0, padx = 5, pady = 5)
# place the Hint button
hint_button = Button(frame_buttons, text = "Hint", width = 15, command = hint)
hint_button.grid(row = 0, column = 1, padx = 5, pady = 5)
# run mainloop
root.mainloop()
print("carry on now...") | [
"[email protected]"
] | |
61809667b75b77ed0658b2764d8a6580eff27210 | ba3231b25c60b73ca504cd788efa40d92cf9c037 | /nitro-python-13.0.36/nssrc/com/citrix/netscaler/nitro/resource/config/lb/lbvserver_cachepolicy_binding.py | 69f8f99dd0ca1e599fbfdbfaa6887a306492e901 | [
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | zhuweigh/vpx13 | f6d559ae85341e56472e3592cbc67062dac34b93 | b36caa3729d3ca5515fa725f2d91aeaabdb2daa9 | refs/heads/master | 2020-07-04T22:15:16.595728 | 2019-09-20T00:19:56 | 2019-09-20T00:19:56 | 202,435,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,840 | py | #
# Copyright (c) 2008-2019 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class lbvserver_cachepolicy_binding(base_resource) :
""" Binding class showing the cachepolicy that can be bound to lbvserver.
"""
def __init__(self) :
self._policyname = None
self._priority = None
self._gotopriorityexpression = None
self._bindpoint = None
self._invoke = None
self._labeltype = None
self._labelname = None
self._name = None
self.___count = None
@property
def priority(self) :
r"""Priority.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
r"""Priority.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def bindpoint(self) :
r"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE.
"""
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
r"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE
"""
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def policyname(self) :
r"""Name of the policy bound to the LB vserver.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
r"""Name of the policy bound to the LB vserver.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def labelname(self) :
r"""Name of the label invoked.
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
r"""Name of the label invoked.
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def name(self) :
r"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
r"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
r"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def invoke(self) :
r"""Invoke policies bound to a virtual server or policy label.
"""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
r"""Invoke policies bound to a virtual server or policy label.
"""
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def labeltype(self) :
r"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
r"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(lbvserver_cachepolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lbvserver_cachepolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = lbvserver_cachepolicy_binding()
updateresource.name = resource.name
updateresource.policyname = resource.policyname
updateresource.priority = resource.priority
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.bindpoint = resource.bindpoint
updateresource.invoke = resource.invoke
updateresource.labeltype = resource.labeltype
updateresource.labelname = resource.labelname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [lbvserver_cachepolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policyname = resource[i].policyname
updateresources[i].priority = resource[i].priority
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].bindpoint = resource[i].bindpoint
updateresources[i].invoke = resource[i].invoke
updateresources[i].labeltype = resource[i].labeltype
updateresources[i].labelname = resource[i].labelname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = lbvserver_cachepolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
deleteresource.bindpoint = resource.bindpoint
deleteresource.priority = resource.priority
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [lbvserver_cachepolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].bindpoint = resource[i].bindpoint
deleteresources[i].priority = resource[i].priority
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
r""" Use this API to fetch lbvserver_cachepolicy_binding resources.
"""
try :
if not name :
obj = lbvserver_cachepolicy_binding()
response = obj.get_resources(service, option_)
else :
obj = lbvserver_cachepolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
r""" Use this API to fetch filtered set of lbvserver_cachepolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lbvserver_cachepolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
r""" Use this API to count lbvserver_cachepolicy_binding resources configued on NetScaler.
"""
try :
obj = lbvserver_cachepolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
r""" Use this API to count the filtered set of lbvserver_cachepolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lbvserver_cachepolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Bindpoint:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class lbvserver_cachepolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.lbvserver_cachepolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lbvserver_cachepolicy_binding = [lbvserver_cachepolicy_binding() for _ in range(length)]
| [
"[email protected]"
] | |
d02c4a0793ee279dabe9c0b95d2105dcd9706e63 | 7b3743f052da9a74808b7d2145418ce5c3e1a477 | /v2/api.thewatcher.io/api/models/saviors.py | 89626aa29873222a92953c0510d71808dfbb67f1 | [
"MIT"
] | permissive | quebecsti/kdm-manager | 5547cbf8928d485c6449650dc77805877a67ee37 | a5fcda27d04135429e43a21ac655e6f6acc7768e | refs/heads/master | 2020-11-26T19:22:53.197651 | 2019-10-22T20:53:40 | 2019-10-22T20:53:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | #!/usr/bin/python2.7
from api.assets import saviors
from api import Models
import utils
class Assets(Models.AssetCollection):
def __init__(self, *args, **kwargs):
self.root_module = saviors
Models.AssetCollection.__init__(self, *args, **kwargs)
def get_asset_by_color(self, color=None):
""" This method will return an asset dictionary whose 'color' attrib
matches the value of the 'color' kwarg.
"""
if color is None:
msg = "get_asset_by_color() requires the 'color' kwarg!"
self.logger.exception(msg)
raise Exception(msg)
output = None
for d in self.get_dicts():
if d["color"] == color and output is None:
output = d
elif d["color"] == color and output is not None:
msg = "Multiple savior asset dicts have the color '%s'. Did you rememeber to filter?" % color
self.logger.exception(msg)
raise Exception(msg)
if output is None:
msg = "No asset dict found for color '%s'!" % color
return output
| [
"[email protected]"
] | |
14940a0b39f1f7c4e8107e47cdc734cdf845df28 | 28bf7793cde66074ac6cbe2c76df92bd4803dab9 | /answers/MridulMohanta/Day29/question1.py | bd0a470a4989c366aa27de5d8ad3952e877f35eb | [
"MIT"
] | permissive | Codechef-SRM-NCR-Chapter/30-DaysOfCode-March-2021 | 2dee33e057ba22092795a6ecc6686a9d31607c9d | 66c7d85025481074c93cfda7853b145c88a30da4 | refs/heads/main | 2023-05-29T10:33:31.795738 | 2021-06-10T14:57:30 | 2021-06-10T14:57:30 | 348,153,476 | 22 | 135 | MIT | 2021-06-10T14:57:31 | 2021-03-15T23:37:26 | Java | UTF-8 | Python | false | false | 534 | py | a=[]
b=[]
x=int(input("Enter length of the two variables"))
n=int(input("Enter test number"))
y=0
for i in range(0,x):
p=int(input("Enter element in a:"))
a.append(p)
q=int(input("Enter element in b:"))
b.append(q)
for i in range(x-1,-1,-1):
for j in range(i,-1,-1):
if ((a[i]+b[j])<=n):
print (a[i])
print (b[j])
temp=b[j]
b[j]=b[i]
b[i]=temp
y=y+1
break
print (b)
if ((x-1)<=y):
print ("YES")
else:
print("NO")
| [
"[email protected]"
] | |
fdfaf5133245d102f34dbb38f190dc97481a6095 | bdc0b8809d52933c10f8eb77442bd0b4453f28f9 | /build/std_msgs/rosidl_generator_py/std_msgs/msg/_header.py | 9b81821f0602af102a642cfc19c4bb22e9f5e525 | [] | no_license | ClaytonCalabrese/BuiltRos2Eloquent | 967f688bbca746097016dbd34563716bd98379e3 | 76bca564bfd73ef73485e5c7c48274889032e408 | refs/heads/master | 2021-03-27T22:42:12.976367 | 2020-03-17T14:24:07 | 2020-03-17T14:24:07 | 247,810,969 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,074 | py | # generated from rosidl_generator_py/resource/_idl.py.em
# with input from std_msgs:msg/Header.idl
# generated code does not contain a copyright notice
# Import statements for member types
import rosidl_parser.definition # noqa: E402, I100
class Metaclass_Header(type):
"""Metaclass of message 'Header'."""
_CREATE_ROS_MESSAGE = None
_CONVERT_FROM_PY = None
_CONVERT_TO_PY = None
_DESTROY_ROS_MESSAGE = None
_TYPE_SUPPORT = None
__constants = {
}
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('std_msgs')
except ImportError:
import logging
import traceback
logger = logging.getLogger(
'std_msgs.msg.Header')
logger.debug(
'Failed to import needed modules for type support:\n' +
traceback.format_exc())
else:
cls._CREATE_ROS_MESSAGE = module.create_ros_message_msg__msg__header
cls._CONVERT_FROM_PY = module.convert_from_py_msg__msg__header
cls._CONVERT_TO_PY = module.convert_to_py_msg__msg__header
cls._TYPE_SUPPORT = module.type_support_msg__msg__header
cls._DESTROY_ROS_MESSAGE = module.destroy_ros_message_msg__msg__header
from builtin_interfaces.msg import Time
if Time.__class__._TYPE_SUPPORT is None:
Time.__class__.__import_type_support__()
@classmethod
def __prepare__(cls, name, bases, **kwargs):
# list constant names here so that they appear in the help text of
# the message class under "Data and other attributes defined here:"
# as well as populate each message instance
return {
}
class Header(metaclass=Metaclass_Header):
"""Message class 'Header'."""
__slots__ = [
'_stamp',
'_frame_id',
]
_fields_and_field_types = {
'stamp': 'builtin_interfaces/Time',
'frame_id': 'string',
}
SLOT_TYPES = (
rosidl_parser.definition.NamespacedType(['builtin_interfaces', 'msg'], 'Time'), # noqa: E501
rosidl_parser.definition.UnboundedString(), # noqa: E501
)
def __init__(self, **kwargs):
assert all('_' + key in self.__slots__ for key in kwargs.keys()), \
'Invalid arguments passed to constructor: %s' % \
', '.join(sorted(k for k in kwargs.keys() if '_' + k not in self.__slots__))
from builtin_interfaces.msg import Time
self.stamp = kwargs.get('stamp', Time())
self.frame_id = kwargs.get('frame_id', str())
def __repr__(self):
typename = self.__class__.__module__.split('.')
typename.pop()
typename.append(self.__class__.__name__)
args = []
for s, t in zip(self.__slots__, self.SLOT_TYPES):
field = getattr(self, s)
fieldstr = repr(field)
# We use Python array type for fields that can be directly stored
# in them, and "normal" sequences for everything else. If it is
# a type that we store in an array, strip off the 'array' portion.
if (
isinstance(t, rosidl_parser.definition.AbstractSequence) and
isinstance(t.value_type, rosidl_parser.definition.BasicType) and
t.value_type.typename in ['float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64']
):
if len(field) == 0:
fieldstr = '[]'
else:
assert fieldstr.startswith('array(')
prefix = "array('X', "
suffix = ')'
fieldstr = fieldstr[len(prefix):-len(suffix)]
args.append(s[1:] + '=' + fieldstr)
return '%s(%s)' % ('.'.join(typename), ', '.join(args))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if self.stamp != other.stamp:
return False
if self.frame_id != other.frame_id:
return False
return True
@classmethod
def get_fields_and_field_types(cls):
from copy import copy
return copy(cls._fields_and_field_types)
@property
def stamp(self):
"""Message field 'stamp'."""
return self._stamp
@stamp.setter
def stamp(self, value):
if __debug__:
from builtin_interfaces.msg import Time
assert \
isinstance(value, Time), \
"The 'stamp' field must be a sub message of type 'Time'"
self._stamp = value
@property
def frame_id(self):
"""Message field 'frame_id'."""
return self._frame_id
@frame_id.setter
def frame_id(self, value):
if __debug__:
assert \
isinstance(value, str), \
"The 'frame_id' field must be of type 'str'"
self._frame_id = value
| [
"[email protected]"
] | |
76248405f1c00343fddef1efe2213d5897023cdc | 51086c09f2c920d057db12e373a01b08571c4cbf | /pebble-sdk/SDKs/4.3/sdk-core/pebble/common/tools/inject_metadata.py | 132a3d5457b902f386c30f11dc86721ecedec725 | [] | no_license | JohnHoder/pebble-dev | 66dc69258dfd009313c23ba5c2eb518aec257652 | e9d95bd564ba6f58b539a1a68f21fe82b6d0992b | refs/heads/master | 2022-11-23T17:32:26.573394 | 2018-12-26T03:17:37 | 2018-12-26T03:17:37 | 163,131,045 | 0 | 1 | null | 2022-10-31T10:03:38 | 2018-12-26T03:15:57 | Python | UTF-8 | Python | false | false | 11,374 | py | #!/usr/bin/env python
from __future__ import with_statement
from struct import pack, unpack
import os
import os.path
import sys
import time
from subprocess import Popen, PIPE
from shutil import copy2
from binascii import crc32
from struct import pack
from pbpack import ResourcePack
import stm32_crc
# Pebble App Metadata Struct
# These are offsets of the PebbleProcessInfo struct in src/fw/app_management/pebble_process_info.h
HEADER_ADDR = 0x0 # 8 bytes
STRUCT_VERSION_ADDR = 0x8 # 2 bytes
SDK_VERSION_ADDR = 0xa # 2 bytes
APP_VERSION_ADDR = 0xc # 2 bytes
LOAD_SIZE_ADDR = 0xe # 2 bytes
OFFSET_ADDR = 0x10 # 4 bytes
CRC_ADDR = 0x14 # 4 bytes
NAME_ADDR = 0x18 # 32 bytes
COMPANY_ADDR = 0x38 # 32 bytes
ICON_RES_ID_ADDR = 0x58 # 4 bytes
JUMP_TABLE_ADDR = 0x5c # 4 bytes
FLAGS_ADDR = 0x60 # 4 bytes
NUM_RELOC_ENTRIES_ADDR = 0x64 # 4 bytes
UUID_ADDR = 0x68 # 16 bytes
RESOURCE_CRC_ADDR = 0x78 # 4 bytes
RESOURCE_TIMESTAMP_ADDR = 0x7c # 4 bytes
VIRTUAL_SIZE_ADDR = 0x80 # 2 bytes
STRUCT_SIZE_BYTES = 0x82
# Pebble App Flags
# These are PebbleAppFlags from src/fw/app_management/pebble_process_info.h
PROCESS_INFO_STANDARD_APP = (0)
PROCESS_INFO_WATCH_FACE = (1 << 0)
PROCESS_INFO_VISIBILITY_HIDDEN = (1 << 1)
PROCESS_INFO_VISIBILITY_SHOWN_ON_COMMUNICATION = (1 << 2)
PROCESS_INFO_ALLOW_JS = (1 << 3)
PROCESS_INFO_HAS_WORKER = (1 << 4)
# Max app size, including the struct and reloc table
# Note that even if the app is smaller than this, it still may be too big, as it needs to share this
# space with applib/ which changes in size from release to release.
MAX_APP_BINARY_SIZE = 0x10000
# This number is a rough estimate, but should not be less than the available space.
# Currently, app_state uses up a small part of the app space.
# See also APP_RAM in stm32f2xx_flash_fw.ld and APP in pebble_app.ld.
MAX_APP_MEMORY_SIZE = 24 * 1024
# This number is a rough estimate, but should not be less than the available space.
# Currently, worker_state uses up a small part of the worker space.
# See also WORKER_RAM in stm32f2xx_flash_fw.ld
MAX_WORKER_MEMORY_SIZE = 10 * 1024
ENTRY_PT_SYMBOL = 'main'
JUMP_TABLE_ADDR_SYMBOL = 'pbl_table_addr'
DEBUG = False
class InvalidBinaryError(Exception):
pass
def inject_metadata(target_binary, target_elf, resources_file, timestamp, allow_js=False,
has_worker=False):
if target_binary[-4:] != '.bin':
raise Exception("Invalid filename <%s>! The filename should end in .bin" % target_binary)
def get_nm_output(elf_file):
nm_process = Popen(['arm-none-eabi-nm', elf_file], stdout=PIPE)
# Popen.communicate returns a tuple of (stdout, stderr)
nm_output = nm_process.communicate()[0]
if not nm_output:
raise InvalidBinaryError()
nm_output = [ line.split() for line in nm_output.splitlines() ]
return nm_output
def get_symbol_addr(nm_output, symbol):
# nm output looks like the following...
#
# U _ITM_registerTMCloneTable
# 00000084 t jump_to_pbl_function
# U _Jv_RegisterClasses
# 0000009c T main
# 00000130 T memset
#
# We don't care about the lines that only have two columns, they're not functions.
for sym in nm_output:
if symbol == sym[-1] and len(sym) == 3:
return int(sym[0], 16)
raise Exception("Could not locate symbol <%s> in binary! Failed to inject app metadata" %
(symbol))
def get_virtual_size(elf_file):
""" returns the virtual size (static memory usage, .text + .data + .bss) in bytes """
readelf_bss_process = Popen("arm-none-eabi-readelf -S '%s'" % elf_file,
shell=True, stdout=PIPE)
readelf_bss_output = readelf_bss_process.communicate()[0]
# readelf -S output looks like the following...
#
# [Nr] Name Type Addr Off Size ES Flg Lk Inf Al
# [ 0] NULL 00000000 000000 000000 00 0 0 0
# [ 1] .header PROGBITS 00000000 008000 000082 00 A 0 0 1
# [ 2] .text PROGBITS 00000084 008084 0006be 00 AX 0 0 4
# [ 3] .rel.text REL 00000000 00b66c 0004d0 08 23 2 4
# [ 4] .data PROGBITS 00000744 008744 000004 00 WA 0 0 4
# [ 5] .bss NOBITS 00000748 008748 000054 00 WA 0 0 4
last_section_end_addr = 0
# Find the .bss section and calculate the size based on the end of the .bss section
for line in readelf_bss_output.splitlines():
if len(line) < 10:
continue
# Carve off the first column, since it sometimes has a space in it which screws up the
# split. Two leading spaces, a square bracket, 2 digits (with space padding),
# a second square brack is 6
line = line[6:]
columns = line.split()
if len(columns) < 6:
continue
if columns[0] == '.bss':
addr = int(columns[2], 16)
size = int(columns[4], 16)
last_section_end_addr = addr + size
elif columns[0] == '.data' and last_section_end_addr == 0:
addr = int(columns[2], 16)
size = int(columns[4], 16)
last_section_end_addr = addr + size
if last_section_end_addr != 0:
return last_section_end_addr
sys.stderr.writeline("Failed to parse ELF sections while calculating the virtual size\n")
sys.stderr.write(readelf_bss_output)
raise Exception("Failed to parse ELF sections while calculating the virtual size")
def get_relocate_entries(elf_file):
""" returns a list of all the locations requiring an offset"""
# TODO: insert link to the wiki page I'm about to write about PIC and relocatable values
entries = []
# get the .data locations
readelf_relocs_process = Popen(['arm-none-eabi-readelf', '-r', elf_file], stdout=PIPE)
readelf_relocs_output = readelf_relocs_process.communicate()[0]
lines = readelf_relocs_output.splitlines()
i = 0
reading_section = False
while i < len(lines):
if not reading_section:
# look for the next section
if lines[i].startswith("Relocation section '.rel.data"):
reading_section = True
i += 1 # skip the column title section
else:
if len(lines[i]) == 0:
# end of the section
reading_section = False
else:
entries.append(int(lines[i].split(' ')[0], 16))
i += 1
# get any Global Offset Table (.got) entries
readelf_relocs_process = Popen(['arm-none-eabi-readelf', '--sections', elf_file],
stdout=PIPE)
readelf_relocs_output = readelf_relocs_process.communicate()[0]
lines = readelf_relocs_output.splitlines()
for line in lines:
# We shouldn't need to do anything with the Procedure Linkage Table since we don't
# actually export functions
if '.got' in line and '.got.plt' not in line:
words = line.split(' ')
while '' in words:
words.remove('')
section_label_idx = words.index('.got')
addr = int(words[section_label_idx + 2], 16)
length = int(words[section_label_idx + 4], 16)
for i in range(addr, addr + length, 4):
entries.append(i)
break
return entries
nm_output = get_nm_output(target_elf)
try:
app_entry_address = get_symbol_addr(nm_output, ENTRY_PT_SYMBOL)
except:
raise Exception("Missing app entry point! Must be `int main(void) { ... }` ")
jump_table_address = get_symbol_addr(nm_output, JUMP_TABLE_ADDR_SYMBOL)
reloc_entries = get_relocate_entries(target_elf)
statinfo = os.stat(target_binary)
app_load_size = statinfo.st_size
if resources_file is not None:
with open(resources_file, 'rb') as f:
pbpack = ResourcePack.deserialize(f, is_system=False)
resource_crc = pbpack.get_content_crc()
else:
resource_crc = 0
if DEBUG:
copy2(target_binary, target_binary + ".orig")
with open(target_binary, 'r+b') as f:
total_app_image_size = app_load_size + (len(reloc_entries) * 4)
if total_app_image_size > MAX_APP_BINARY_SIZE:
raise Exception("App image size is %u (app %u relocation table %u). Must be smaller "
"than %u bytes" % (total_app_image_size,
app_load_size,
len(reloc_entries) * 4,
MAX_APP_BINARY_SIZE))
def read_value_at_offset(offset, format_str, size):
f.seek(offset)
return unpack(format_str, f.read(size))
app_bin = f.read()
app_crc = stm32_crc.crc32(app_bin[STRUCT_SIZE_BYTES:])
[app_flags] = read_value_at_offset(FLAGS_ADDR, '<L', 4)
if allow_js:
app_flags = app_flags | PROCESS_INFO_ALLOW_JS
if has_worker:
app_flags = app_flags | PROCESS_INFO_HAS_WORKER
app_virtual_size = get_virtual_size(target_elf)
struct_changes = {
'load_size' : app_load_size,
'entry_point' : "0x%08x" % app_entry_address,
'symbol_table' : "0x%08x" % jump_table_address,
'flags' : app_flags,
'crc' : "0x%08x" % app_crc,
'num_reloc_entries': "0x%08x" % len(reloc_entries),
'resource_crc' : "0x%08x" % resource_crc,
'timestamp' : timestamp,
'virtual_size': app_virtual_size
}
def write_value_at_offset(offset, format_str, value):
f.seek(offset)
f.write(pack(format_str, value))
write_value_at_offset(LOAD_SIZE_ADDR, '<H', app_load_size)
write_value_at_offset(OFFSET_ADDR, '<L', app_entry_address)
write_value_at_offset(CRC_ADDR, '<L', app_crc)
write_value_at_offset(RESOURCE_CRC_ADDR, '<L', resource_crc)
write_value_at_offset(RESOURCE_TIMESTAMP_ADDR, '<L', timestamp)
write_value_at_offset(JUMP_TABLE_ADDR, '<L', jump_table_address)
write_value_at_offset(FLAGS_ADDR, '<L', app_flags)
write_value_at_offset(NUM_RELOC_ENTRIES_ADDR, '<L', len(reloc_entries))
write_value_at_offset(VIRTUAL_SIZE_ADDR, "<H", app_virtual_size)
# Write the reloc_entries past the end of the binary. This expands the size of the binary,
# but this new stuff won't actually be loaded into ram.
f.seek(app_load_size)
for entry in reloc_entries:
f.write(pack('<L', entry))
f.flush()
return struct_changes
| [
"[email protected]"
] | |
0edb15c99b81287d2f5f4c1a226de09d6b692c6c | ce0a34a4a1f44cda31042e4294e6cef334392a37 | /tests/test_gui_klgui.py | 9c28eb7d7c5c47e2c9694da7f660414fd1c1df94 | [
"GPL-3.0-only"
] | permissive | PhonologicalCorpusTools/CorpusTools | ba6644f90a9790d3f61d923b3b5622eaeaa24caa | 314bd30be24b1cb7ee0c252a6529bbfe964056ad | refs/heads/master | 2022-09-29T20:36:12.148289 | 2022-09-16T01:57:47 | 2022-09-16T01:57:47 | 18,848,568 | 108 | 24 | BSD-3-Clause | 2021-05-07T23:58:03 | 2014-04-16T17:14:55 | Python | UTF-8 | Python | false | false | 188 | py |
from corpustools.gui.klgui import *
def test_klgui(qtbot, specified_test_corpus, settings):
dialog = KLDialog(None, settings,specified_test_corpus, True)
qtbot.addWidget(dialog)
| [
"[email protected]"
] | |
aec5cebc7c02dfa2d6a9bd26431eef3f3eb82c51 | 9870d2c6880fd3fa558c46e3bf160aae20c74157 | /permuteUnique.py | f104cb1cf5024240cfeb1b15ac8dd83327f3196d | [] | no_license | Yigang0622/LeetCode | e7f7f115c6e730c486296ef2f1a3dd1a3fdca526 | c873cd1ee70a2bdb54571bdd50733db9f6475e9e | refs/heads/master | 2023-03-03T14:32:25.498633 | 2021-02-15T13:59:00 | 2021-02-15T13:59:00 | 281,423,565 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | # LeetCode
# permuteUnique
# Created by Yigang Zhou on 2020/9/18.
# Copyright © 2020 Yigang Zhou. All rights reserved.
# 47. 全排列 II
# https://leetcode-cn.com/problems/permutations-ii/
from typing import List
class Solution:
def permuteUnique(self, nums: List[int]) -> List[List[int]]:
ans = []
visited = [0] * len(nums)
nums.sort()
self.dfs([], visited,0,nums,ans)
return ans
def dfs(self, current, visited, i, nums, ans):
if i == len(nums):
ans.append(current[:])
return
for j, each in enumerate(nums):
if visited[j] == 1 or (j > 0 and nums[j] == nums[j - 1] and visited[j - 1] == 0):
continue
visited[j] = 1
current.append(each)
self.dfs(current, visited, i+1, nums, ans)
visited[j] = 0
current.pop()
nums = [1,1,2]
r = Solution().permuteUnique(nums)
print(r) | [
"[email protected]"
] | |
b74676e45149ad9bbe55f3f25d2e2048b5786119 | 930c207e245c320b108e9699bbbb036260a36d6a | /BRICK-RDFAlchemy/generatedCode/brick/brickschema/org/schema/_1_0_2/Brick/AHU_Heating_Demand_Setpoint.py | ec0c93fb165063c910beab5029a9309ddd5da42c | [] | no_license | InnovationSE/BRICK-Generated-By-OLGA | 24d278f543471e1ce622f5f45d9e305790181fff | 7874dfa450a8a2b6a6f9927c0f91f9c7d2abd4d2 | refs/heads/master | 2021-07-01T14:13:11.302860 | 2017-09-21T12:44:17 | 2017-09-21T12:44:17 | 104,251,784 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | from rdflib import Namespace, Graph, Literal, RDF, URIRef
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfList
from brick.brickschema.org.schema._1_0_2.Brick.Heating_Demand_Setpoint import Heating_Demand_Setpoint
class AHU_Heating_Demand_Setpoint(Heating_Demand_Setpoint):
rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').AHU_Heating_Demand_Setpoint
| [
"[email protected]"
] | |
570db6accc88fe50729a6579d12fd3b3c150c75c | 65c3e7139829829dd1410228e17f85c285ab0706 | /Aniyom Ebenezer/Phase 2/STRINGS/Day_29_Challenge_Solution/Question 8 Solution.py | d443f353ceb9d57814ceb49adf93228d5ddd05d5 | [
"MIT"
] | permissive | eaniyom/python-challenge-solutions | 167e9d897d0a72f1e264ff2fed0e4cc5541b0164 | 21f91e06421afe06b472d391429ee2138c918c38 | refs/heads/master | 2022-11-24T02:57:39.920755 | 2020-08-05T09:23:04 | 2020-08-05T09:23:04 | 277,686,791 | 1 | 0 | MIT | 2020-07-07T01:31:00 | 2020-07-07T01:30:59 | null | UTF-8 | Python | false | false | 308 | py | """
Write a Python program that takes a list of words and retuerns the length of the longest one.
"""
def longest_words(word_list):
word_len = []
for n in word_list:
word_len.append((len(n), n))
word_len.sort()
return word_len[-1][1]
print(longest_words(["PHP", "Python", "Backend"])) | [
"[email protected]"
] | |
28d5e3dae132663d27b2d5c4430019896f8b3eef | d094ba0c8a9b1217fbf014aa79a283a49aabe88c | /env/lib/python3.6/site-packages/sympy/combinatorics/free_groups.py | 2150e670e4015d91706a458585d33802adc1eba1 | [
"Apache-2.0"
] | permissive | Raniac/NEURO-LEARN | d9274e0baadd97bb02da54bdfcf6ca091fc1c703 | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | refs/heads/master | 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 | Apache-2.0 | 2022-12-09T21:01:00 | 2019-04-18T03:57:00 | CSS | UTF-8 | Python | false | false | 40,158 | py | # -*- coding: utf-8 -*-
from __future__ import print_function, division
from sympy.core.basic import Basic
from sympy.core.compatibility import is_sequence, as_int, string_types
from sympy.core.expr import Expr
from sympy.core.symbol import Symbol, symbols as _symbols
from sympy.core.sympify import CantSympify
from sympy.core import S
from sympy.printing.defaults import DefaultPrinting
from sympy.utilities import public
from sympy.utilities.iterables import flatten
from sympy.utilities.magic import pollute
from sympy import sign
@public
def free_group(symbols):
"""Construct a free group returning ``(FreeGroup, (f_0, f_1, ..., f_(n-1))``.
Parameters
----------
symbols : str, Symbol/Expr or sequence of str, Symbol/Expr (may be empty)
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y, z = free_group("x, y, z")
>>> F
<free group on the generators (x, y, z)>
>>> x**2*y**-1
x**2*y**-1
>>> type(_)
<class 'sympy.combinatorics.free_groups.FreeGroupElement'>
"""
_free_group = FreeGroup(symbols)
return (_free_group,) + tuple(_free_group.generators)
@public
def xfree_group(symbols):
"""Construct a free group returning ``(FreeGroup, (f_0, f_1, ..., f_(n-1)))``.
Parameters
----------
symbols : str, Symbol/Expr or sequence of str, Symbol/Expr (may be empty)
Examples
========
>>> from sympy.combinatorics.free_groups import xfree_group
>>> F, (x, y, z) = xfree_group("x, y, z")
>>> F
<free group on the generators (x, y, z)>
>>> y**2*x**-2*z**-1
y**2*x**-2*z**-1
>>> type(_)
<class 'sympy.combinatorics.free_groups.FreeGroupElement'>
"""
_free_group = FreeGroup(symbols)
return (_free_group, _free_group.generators)
@public
def vfree_group(symbols):
"""Construct a free group and inject ``f_0, f_1, ..., f_(n-1)`` as symbols
into the global namespace.
Parameters
----------
symbols : str, Symbol/Expr or sequence of str, Symbol/Expr (may be empty)
Examples
========
>>> from sympy.combinatorics.free_groups import vfree_group
>>> vfree_group("x, y, z")
<free group on the generators (x, y, z)>
>>> x**2*y**-2*z
x**2*y**-2*z
>>> type(_)
<class 'sympy.combinatorics.free_groups.FreeGroupElement'>
"""
_free_group = FreeGroup(symbols)
pollute([sym.name for sym in _free_group.symbols], _free_group.generators)
return _free_group
def _parse_symbols(symbols):
if not symbols:
return tuple()
if isinstance(symbols, string_types):
return _symbols(symbols, seq=True)
elif isinstance(symbols, Expr or FreeGroupElement):
return (symbols,)
elif is_sequence(symbols):
if all(isinstance(s, string_types) for s in symbols):
return _symbols(symbols)
elif all(isinstance(s, Expr) for s in symbols):
return symbols
raise ValueError("The type of `symbols` must be one of the following: "
"a str, Symbol/Expr or a sequence of "
"one of these types")
##############################################################################
# FREE GROUP #
##############################################################################
_free_group_cache = {}
class FreeGroup(DefaultPrinting):
"""
Free group with finite or infinite number of generators. Its input API
is that of a str, Symbol/Expr or a sequence of one of
these types (which may be empty)
References
==========
[1] http://www.gap-system.org/Manuals/doc/ref/chap37.html
[2] https://en.wikipedia.org/wiki/Free_group
See Also
========
sympy.polys.rings.PolyRing
"""
is_associative = True
is_group = True
is_FreeGroup = True
is_PermutationGroup = False
relators = tuple()
def __new__(cls, symbols):
symbols = tuple(_parse_symbols(symbols))
rank = len(symbols)
_hash = hash((cls.__name__, symbols, rank))
obj = _free_group_cache.get(_hash)
if obj is None:
obj = object.__new__(cls)
obj._hash = _hash
obj._rank = rank
# dtype method is used to create new instances of FreeGroupElement
obj.dtype = type("FreeGroupElement", (FreeGroupElement,), {"group": obj})
obj.symbols = symbols
obj.generators = obj._generators()
obj._gens_set = set(obj.generators)
for symbol, generator in zip(obj.symbols, obj.generators):
if isinstance(symbol, Symbol):
name = symbol.name
if hasattr(obj, name):
setattr(obj, name, generator)
_free_group_cache[_hash] = obj
return obj
def _generators(group):
"""Returns the generators of the FreeGroup.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y, z = free_group("x, y, z")
>>> F.generators
(x, y, z)
"""
gens = []
for sym in group.symbols:
elm = ((sym, 1),)
gens.append(group.dtype(elm))
return tuple(gens)
def clone(self, symbols=None):
return self.__class__(symbols or self.symbols)
def __contains__(self, i):
"""Return True if ``i`` is contained in FreeGroup."""
if not isinstance(i, FreeGroupElement):
return False
group = i.group
return self == group
def __hash__(self):
return self._hash
def __len__(self):
return self.rank
def __str__(self):
if self.rank > 30:
str_form = "<free group with %s generators>" % self.rank
else:
str_form = "<free group on the generators "
gens = self.generators
str_form += str(gens) + ">"
return str_form
__repr__ = __str__
def __getitem__(self, index):
symbols = self.symbols[index]
return self.clone(symbols=symbols)
def __eq__(self, other):
"""No ``FreeGroup`` is equal to any "other" ``FreeGroup``.
"""
return self is other
def index(self, gen):
"""Return the index of the generator `gen` from ``(f_0, ..., f_(n-1))``.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> F.index(y)
1
>>> F.index(x)
0
"""
if isinstance(gen, self.dtype):
return self.generators.index(gen)
else:
raise ValueError("expected a generator of Free Group %s, got %s" % (self, gen))
def order(self):
"""Return the order of the free group.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> F.order()
oo
>>> free_group("")[0].order()
1
"""
if self.rank == 0:
return 1
else:
return S.Infinity
@property
def elements(self):
"""
Return the elements of the free group.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> (z,) = free_group("")
>>> z.elements
{<identity>}
"""
if self.rank == 0:
# A set containing Identity element of `FreeGroup` self is returned
return {self.identity}
else:
raise ValueError("Group contains infinitely many elements"
", hence can't be represented")
@property
def rank(self):
r"""
In group theory, the `rank` of a group `G`, denoted `G.rank`,
can refer to the smallest cardinality of a generating set
for G, that is
\operatorname{rank}(G)=\min\{ |X|: X\subseteq G, \langle X\rangle =G\}.
"""
return self._rank
@property
def is_abelian(self):
"""Returns if the group is Abelian.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, x, y, z = free_group("x y z")
>>> f.is_abelian
False
"""
if self.rank == 0 or self.rank == 1:
return True
else:
return False
@property
def identity(self):
"""Returns the identity element of free group."""
return self.dtype()
def contains(self, g):
"""Tests if Free Group element ``g`` belong to self, ``G``.
In mathematical terms any linear combination of generators
of a Free Group is contained in it.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, x, y, z = free_group("x y z")
>>> f.contains(x**3*y**2)
True
"""
if not isinstance(g, FreeGroupElement):
return False
elif self != g.group:
return False
else:
return True
def center(self):
"""Returns the center of the free group `self`."""
return {self.identity}
############################################################################
# FreeGroupElement #
############################################################################
class FreeGroupElement(CantSympify, DefaultPrinting, tuple):
"""Used to create elements of FreeGroup. It can not be used directly to
create a free group element. It is called by the `dtype` method of the
`FreeGroup` class.
"""
is_assoc_word = True
def new(self, init):
return self.__class__(init)
_hash = None
def __hash__(self):
_hash = self._hash
if _hash is None:
self._hash = _hash = hash((self.group, frozenset(tuple(self))))
return _hash
def copy(self):
return self.new(self)
@property
def is_identity(self):
if self.array_form == tuple():
return True
else:
return False
@property
def array_form(self):
"""
SymPy provides two different internal kinds of representation
of associative words. The first one is called the `array_form`
which is a tuple containing `tuples` as its elements, where the
size of each tuple is two. At the first position the tuple
contains the `symbol-generator`, while at the second position
of tuple contains the exponent of that generator at the position.
Since elements (i.e. words) don't commute, the indexing of tuple
makes that property to stay.
The structure in ``array_form`` of ``FreeGroupElement`` is of form:
``( ( symbol_of_gen , exponent ), ( , ), ... ( , ) )``
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, x, y, z = free_group("x y z")
>>> (x*z).array_form
((x, 1), (z, 1))
>>> (x**2*z*y*x**2).array_form
((x, 2), (z, 1), (y, 1), (x, 2))
See Also
========
letter_repr
"""
return tuple(self)
@property
def letter_form(self):
"""
The letter representation of a ``FreeGroupElement`` is a tuple
of generator symbols, with each entry corresponding to a group
generator. Inverses of the generators are represented by
negative generator symbols.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, a, b, c, d = free_group("a b c d")
>>> (a**3).letter_form
(a, a, a)
>>> (a**2*d**-2*a*b**-4).letter_form
(a, a, -d, -d, a, -b, -b, -b, -b)
>>> (a**-2*b**3*d).letter_form
(-a, -a, b, b, b, d)
See Also
========
array_form
"""
return tuple(flatten([(i,)*j if j > 0 else (-i,)*(-j)
for i, j in self.array_form]))
def __getitem__(self, i):
group = self.group
r = self.letter_form[i]
if r.is_Symbol:
return group.dtype(((r, 1),))
else:
return group.dtype(((-r, -1),))
def index(self, gen):
if len(gen) != 1:
raise ValueError()
return (self.letter_form).index(gen.letter_form[0])
@property
def letter_form_elm(self):
"""
"""
group = self.group
r = self.letter_form
return [group.dtype(((elm,1),)) if elm.is_Symbol \
else group.dtype(((-elm,-1),)) for elm in r]
@property
def ext_rep(self):
"""This is called the External Representation of ``FreeGroupElement``
"""
return tuple(flatten(self.array_form))
def __contains__(self, gen):
return gen.array_form[0][0] in tuple([r[0] for r in self.array_form])
def __str__(self):
if self.is_identity:
return "<identity>"
symbols = self.group.symbols
str_form = ""
array_form = self.array_form
for i in range(len(array_form)):
if i == len(array_form) - 1:
if array_form[i][1] == 1:
str_form += str(array_form[i][0])
else:
str_form += str(array_form[i][0]) + \
"**" + str(array_form[i][1])
else:
if array_form[i][1] == 1:
str_form += str(array_form[i][0]) + "*"
else:
str_form += str(array_form[i][0]) + \
"**" + str(array_form[i][1]) + "*"
return str_form
__repr__ = __str__
def __pow__(self, n):
n = as_int(n)
group = self.group
if n == 0:
return group.identity
if n < 0:
n = -n
return (self.inverse())**n
result = self
for i in range(n - 1):
result = result*self
# this method can be improved instead of just returning the
# multiplication of elements
return result
def __mul__(self, other):
"""Returns the product of elements belonging to the same ``FreeGroup``.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, x, y, z = free_group("x y z")
>>> x*y**2*y**-4
x*y**-2
>>> z*y**-2
z*y**-2
>>> x**2*y*y**-1*x**-2
<identity>
"""
group = self.group
if not isinstance(other, group.dtype):
raise TypeError("only FreeGroup elements of same FreeGroup can "
"be multiplied")
if self.is_identity:
return other
if other.is_identity:
return self
r = list(self.array_form + other.array_form)
zero_mul_simp(r, len(self.array_form) - 1)
return group.dtype(tuple(r))
def __div__(self, other):
group = self.group
if not isinstance(other, group.dtype):
raise TypeError("only FreeGroup elements of same FreeGroup can "
"be multiplied")
return self*(other.inverse())
def __rdiv__(self, other):
group = self.group
if not isinstance(other, group.dtype):
raise TypeError("only FreeGroup elements of same FreeGroup can "
"be multiplied")
return other*(self.inverse())
__truediv__ = __div__
__rtruediv__ = __rdiv__
def __add__(self, other):
return NotImplemented
def inverse(self):
"""
Returns the inverse of a ``FreeGroupElement`` element
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, x, y, z = free_group("x y z")
>>> x.inverse()
x**-1
>>> (x*y).inverse()
y**-1*x**-1
"""
group = self.group
r = tuple([(i, -j) for i, j in self.array_form[::-1]])
return group.dtype(r)
def order(self):
"""Find the order of a ``FreeGroupElement``.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, x, y = free_group("x y")
>>> (x**2*y*y**-1*x**-2).order()
1
"""
if self.is_identity:
return 1
else:
return S.Infinity
def commutator(self, other):
"""
Return the commutator of `self` and `x`: ``~x*~self*x*self``
"""
group = self.group
if not isinstance(other, group.dtype):
raise ValueError("commutator of only FreeGroupElement of the same "
"FreeGroup exists")
else:
return self.inverse()*other.inverse()*self*other
def eliminate_words(self, words, _all=False, inverse=True):
'''
Replace each subword from the dictionary `words` by words[subword].
If words is a list, replace the words by the identity.
'''
again = True
new = self
if isinstance(words, dict):
while again:
again = False
for sub in words:
prev = new
new = new.eliminate_word(sub, words[sub], _all=_all, inverse=inverse)
if new != prev:
again = True
else:
while again:
again = False
for sub in words:
prev = new
new = new.eliminate_word(sub, _all=_all, inverse=inverse)
if new != prev:
again = True
return new
def eliminate_word(self, gen, by=None, _all=False, inverse=True):
"""
For an associative word `self`, a subword `gen`, and an associative
word `by` (identity by default), return the associative word obtained by
replacing each occurrence of `gen` in `self` by `by`. If `_all = True`,
the occurrences of `gen` that may appear after the first substitution will
also be replaced and so on until no occurrences are found. This might not
always terminate (e.g. `(x).eliminate_word(x, x**2, _all=True)`).
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, x, y = free_group("x y")
>>> w = x**5*y*x**2*y**-4*x
>>> w.eliminate_word( x, x**2 )
x**10*y*x**4*y**-4*x**2
>>> w.eliminate_word( x, y**-1 )
y**-11
>>> w.eliminate_word(x**5)
y*x**2*y**-4*x
>>> w.eliminate_word(x*y, y)
x**4*y*x**2*y**-4*x
See Also
========
substituted_word
"""
if by == None:
by = self.group.identity
if self.is_independent(gen) or gen == by:
return self
if gen == self:
return by
if gen**-1 == by:
_all = False
word = self
l = len(gen)
try:
i = word.subword_index(gen)
k = 1
except ValueError:
if not inverse:
return word
try:
i = word.subword_index(gen**-1)
k = -1
except ValueError:
return word
word = word.subword(0, i)*by**k*word.subword(i+l, len(word)).eliminate_word(gen, by)
if _all:
return word.eliminate_word(gen, by, _all=True, inverse=inverse)
else:
return word
def __len__(self):
"""
For an associative word `self`, returns the number of letters in it.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, a, b = free_group("a b")
>>> w = a**5*b*a**2*b**-4*a
>>> len(w)
13
>>> len(a**17)
17
>>> len(w**0)
0
"""
return sum(abs(j) for (i, j) in self)
def __eq__(self, other):
"""
Two associative words are equal if they are words over the
same alphabet and if they are sequences of the same letters.
This is equivalent to saying that the external representations
of the words are equal.
There is no "universal" empty word, every alphabet has its own
empty word.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, swapnil0, swapnil1 = free_group("swapnil0 swapnil1")
>>> f
<free group on the generators (swapnil0, swapnil1)>
>>> g, swap0, swap1 = free_group("swap0 swap1")
>>> g
<free group on the generators (swap0, swap1)>
>>> swapnil0 == swapnil1
False
>>> swapnil0*swapnil1 == swapnil1/swapnil1*swapnil0*swapnil1
True
>>> swapnil0*swapnil1 == swapnil1*swapnil0
False
>>> swapnil1**0 == swap0**0
False
"""
group = self.group
if not isinstance(other, group.dtype):
return False
return tuple.__eq__(self, other)
def __lt__(self, other):
"""
The ordering of associative words is defined by length and
lexicography (this ordering is called short-lex ordering), that
is, shorter words are smaller than longer words, and words of the
same length are compared w.r.t. the lexicographical ordering induced
by the ordering of generators. Generators are sorted according
to the order in which they were created. If the generators are
invertible then each generator `g` is larger than its inverse `g^{-1}`,
and `g^{-1}` is larger than every generator that is smaller than `g`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, a, b = free_group("a b")
>>> b < a
False
>>> a < a.inverse()
False
"""
group = self.group
if not isinstance(other, group.dtype):
raise TypeError("only FreeGroup elements of same FreeGroup can "
"be compared")
l = len(self)
m = len(other)
# implement lenlex order
if l < m:
return True
elif l > m:
return False
for i in range(l):
a = self[i].array_form[0]
b = other[i].array_form[0]
p = group.symbols.index(a[0])
q = group.symbols.index(b[0])
if p < q:
return True
elif p > q:
return False
elif a[1] < b[1]:
return True
elif a[1] > b[1]:
return False
return False
def __le__(self, other):
return (self == other or self < other)
def __gt__(self, other):
"""
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, x, y, z = free_group("x y z")
>>> y**2 > x**2
True
>>> y*z > z*y
False
>>> x > x.inverse()
True
"""
group = self.group
if not isinstance(other, group.dtype):
raise TypeError("only FreeGroup elements of same FreeGroup can "
"be compared")
return not self <= other
def __ge__(self, other):
return not self < other
def exponent_sum(self, gen):
"""
For an associative word `self` and a generator or inverse of generator
`gen`, ``exponent_sum`` returns the number of times `gen` appears in
`self` minus the number of times its inverse appears in `self`. If
neither `gen` nor its inverse occur in `self` then 0 is returned.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> w = x**2*y**3
>>> w.exponent_sum(x)
2
>>> w.exponent_sum(x**-1)
-2
>>> w = x**2*y**4*x**-3
>>> w.exponent_sum(x)
-1
See Also
========
generator_count
"""
if len(gen) != 1:
raise ValueError("gen must be a generator or inverse of a generator")
s = gen.array_form[0]
return s[1]*sum([i[1] for i in self.array_form if i[0] == s[0]])
def generator_count(self, gen):
"""
For an associative word `self` and a generator `gen`,
``generator_count`` returns the multiplicity of generator
`gen` in `self`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> w = x**2*y**3
>>> w.generator_count(x)
2
>>> w = x**2*y**4*x**-3
>>> w.generator_count(x)
5
See Also
========
exponent_sum
"""
if len(gen) != 1 or gen.array_form[0][1] < 0:
raise ValueError("gen must be a generator")
s = gen.array_form[0]
return s[1]*sum([abs(i[1]) for i in self.array_form if i[0] == s[0]])
def subword(self, from_i, to_j, strict=True):
"""
For an associative word `self` and two positive integers `from_i` and
`to_j`, `subword` returns the subword of `self` that begins at position
`from_i` and ends at `to_j - 1`, indexing is done with origin 0.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, a, b = free_group("a b")
>>> w = a**5*b*a**2*b**-4*a
>>> w.subword(2, 6)
a**3*b
"""
group = self.group
if not strict:
from_i = max(from_i, 0)
to_j = min(len(self), to_j)
if from_i < 0 or to_j > len(self):
raise ValueError("`from_i`, `to_j` must be positive and no greater than "
"the length of associative word")
if to_j <= from_i:
return group.identity
else:
letter_form = self.letter_form[from_i: to_j]
array_form = letter_form_to_array_form(letter_form, group)
return group.dtype(array_form)
def subword_index(self, word, start = 0):
'''
Find the index of `word` in `self`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, a, b = free_group("a b")
>>> w = a**2*b*a*b**3
>>> w.subword_index(a*b*a*b)
1
'''
l = len(word)
self_lf = self.letter_form
word_lf = word.letter_form
index = None
for i in range(start,len(self_lf)-l+1):
if self_lf[i:i+l] == word_lf:
index = i
break
if index is not None:
return index
else:
raise ValueError("The given word is not a subword of self")
def is_dependent(self, word):
"""
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> (x**4*y**-3).is_dependent(x**4*y**-2)
True
>>> (x**2*y**-1).is_dependent(x*y)
False
>>> (x*y**2*x*y**2).is_dependent(x*y**2)
True
>>> (x**12).is_dependent(x**-4)
True
See Also
========
is_independent
"""
try:
return self.subword_index(word) != None
except ValueError:
pass
try:
return self.subword_index(word**-1) != None
except ValueError:
return False
def is_independent(self, word):
"""
See Also
========
is_dependent
"""
return not self.is_dependent(word)
def contains_generators(self):
"""
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y, z = free_group("x, y, z")
>>> (x**2*y**-1).contains_generators()
{x, y}
>>> (x**3*z).contains_generators()
{x, z}
"""
group = self.group
gens = set()
for syllable in self.array_form:
gens.add(group.dtype(((syllable[0], 1),)))
return set(gens)
def cyclic_subword(self, from_i, to_j):
group = self.group
l = len(self)
letter_form = self.letter_form
period1 = int(from_i/l)
if from_i >= l:
from_i -= l*period1
to_j -= l*period1
diff = to_j - from_i
word = letter_form[from_i: to_j]
period2 = int(to_j/l) - 1
word += letter_form*period2 + letter_form[:diff-l+from_i-l*period2]
word = letter_form_to_array_form(word, group)
return group.dtype(word)
def cyclic_conjugates(self):
"""Returns a words which are cyclic to the word `self`.
References
==========
http://planetmath.org/cyclicpermutation
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> w = x*y*x*y*x
>>> w.cyclic_conjugates()
{x*y*x**2*y, x**2*y*x*y, y*x*y*x**2, y*x**2*y*x, x*y*x*y*x}
>>> s = x*y*x**2*y*x
>>> s.cyclic_conjugates()
{x**2*y*x**2*y, y*x**2*y*x**2, x*y*x**2*y*x}
"""
return {self.cyclic_subword(i, i+len(self)) for i in range(len(self))}
def is_cyclic_conjugate(self, w):
"""
Checks whether words ``self``, ``w`` are cyclic conjugates.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> w1 = x**2*y**5
>>> w2 = x*y**5*x
>>> w1.is_cyclic_conjugate(w2)
True
>>> w3 = x**-1*y**5*x**-1
>>> w3.is_cyclic_conjugate(w2)
False
"""
l1 = len(self)
l2 = len(w)
if l1 != l2:
return False
w1 = self.identity_cyclic_reduction()
w2 = w.identity_cyclic_reduction()
letter1 = w1.letter_form
letter2 = w2.letter_form
str1 = ' '.join(map(str, letter1))
str2 = ' '.join(map(str, letter2))
if len(str1) != len(str2):
return False
return str1 in str2 + ' ' + str2
def number_syllables(self):
"""Returns the number of syllables of the associative word `self`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, swapnil0, swapnil1 = free_group("swapnil0 swapnil1")
>>> (swapnil1**3*swapnil0*swapnil1**-1).number_syllables()
3
"""
return len(self.array_form)
def exponent_syllable(self, i):
"""
Returns the exponent of the `i`-th syllable of the associative word
`self`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, a, b = free_group("a b")
>>> w = a**5*b*a**2*b**-4*a
>>> w.exponent_syllable( 2 )
2
"""
return self.array_form[i][1]
def generator_syllable(self, i):
"""
Returns the symbol of the generator that is involved in the
i-th syllable of the associative word `self`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, a, b = free_group("a b")
>>> w = a**5*b*a**2*b**-4*a
>>> w.generator_syllable( 3 )
b
"""
return self.array_form[i][0]
def sub_syllables(self, from_i, to_j):
"""
`sub_syllables` returns the subword of the associative word `self` that
consists of syllables from positions `from_to` to `to_j`, where
`from_to` and `to_j` must be positive integers and indexing is done
with origin 0.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, a, b = free_group("a, b")
>>> w = a**5*b*a**2*b**-4*a
>>> w.sub_syllables(1, 2)
b
>>> w.sub_syllables(3, 3)
<identity>
"""
if not isinstance(from_i, int) or not isinstance(to_j, int):
raise ValueError("both arguments should be integers")
group = self.group
if to_j <= from_i:
return group.identity
else:
r = tuple(self.array_form[from_i: to_j])
return group.dtype(r)
def substituted_word(self, from_i, to_j, by):
"""
Returns the associative word obtained by replacing the subword of
`self` that begins at position `from_i` and ends at position `to_j - 1`
by the associative word `by`. `from_i` and `to_j` must be positive
integers, indexing is done with origin 0. In other words,
`w.substituted_word(w, from_i, to_j, by)` is the product of the three
words: `w.subword(0, from_i)`, `by`, and
`w.subword(to_j len(w))`.
See Also
========
eliminate_word
"""
lw = len(self)
if from_i >= to_j or from_i > lw or to_j > lw:
raise ValueError("values should be within bounds")
# otherwise there are four possibilities
# first if from=1 and to=lw then
if from_i == 0 and to_j == lw:
return by
elif from_i == 0: # second if from_i=1 (and to_j < lw) then
return by*self.subword(to_j, lw)
elif to_j == lw: # third if to_j=1 (and from_i > 1) then
return self.subword(0, from_i)*by
else: # finally
return self.subword(0, from_i)*by*self.subword(to_j, lw)
def is_cyclically_reduced(self):
r"""Returns whether the word is cyclically reduced or not.
A word is cyclically reduced if by forming the cycle of the
word, the word is not reduced, i.e a word w = `a_1 ... a_n`
is called cyclically reduced if `a_1 \ne a_n^{−1}`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> (x**2*y**-1*x**-1).is_cyclically_reduced()
False
>>> (y*x**2*y**2).is_cyclically_reduced()
True
"""
if not self:
return True
return self[0] != self[-1]**-1
def identity_cyclic_reduction(self):
"""Return a unique cyclically reduced version of the word.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> (x**2*y**2*x**-1).identity_cyclic_reduction()
x*y**2
>>> (x**-3*y**-1*x**5).identity_cyclic_reduction()
x**2*y**-1
References
==========
http://planetmath.org/cyclicallyreduced
"""
word = self.copy()
group = self.group
while not word.is_cyclically_reduced():
exp1 = word.exponent_syllable(0)
exp2 = word.exponent_syllable(-1)
r = exp1 + exp2
if r == 0:
rep = word.array_form[1: word.number_syllables() - 1]
else:
rep = ((word.generator_syllable(0), exp1 + exp2),) + \
word.array_form[1: word.number_syllables() - 1]
word = group.dtype(rep)
return word
def cyclic_reduction(self, removed=False):
"""Return a cyclically reduced version of the word. Unlike
`identity_cyclic_reduction`, this will not cyclically permute
the reduced word - just remove the "unreduced" bits on either
side of it. Compare the examples with those of
`identity_cyclic_reduction`.
When `removed` is `True`, return a tuple `(word, r)` where
self `r` is such that before the reduction the word was either
`r*word*r**-1`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> (x**2*y**2*x**-1).cyclic_reduction()
x*y**2
>>> (x**-3*y**-1*x**5).cyclic_reduction()
y**-1*x**2
>>> (x**-3*y**-1*x**5).cyclic_reduction(removed=True)
(y**-1*x**2, x**-3)
"""
word = self.copy()
group = self.group
g = self.group.identity
while not word.is_cyclically_reduced():
exp1 = abs(word.exponent_syllable(0))
exp2 = abs(word.exponent_syllable(-1))
exp = min(exp1, exp2)
start = word[0]**abs(exp)
end = word[-1]**abs(exp)
word = start**-1*word*end**-1
g = g*start
if removed:
return word, g
return word
def power_of(self, other):
'''
Check if `self == other**n` for some integer n.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> ((x*y)**2).power_of(x*y)
True
>>> (x**-3*y**-2*x**3).power_of(x**-3*y*x**3)
True
'''
if self.is_identity:
return True
l = len(other)
if l == 1:
# self has to be a power of one generator
gens = self.contains_generators()
s = other in gens or other**-1 in gens
return len(gens) == 1 and s
# if self is not cyclically reduced and it is a power of other,
# other isn't cyclically reduced and the parts removed during
# their reduction must be equal
reduced, r1 = self.cyclic_reduction(removed=True)
if not r1.is_identity:
other, r2 = other.cyclic_reduction(removed=True)
if r1 == r2:
return reduced.power_of(other)
return False
if len(self) < l or len(self) % l:
return False
prefix = self.subword(0, l)
if prefix == other or prefix**-1 == other:
rest = self.subword(l, len(self))
return rest.power_of(other)
return False
def letter_form_to_array_form(array_form, group):
"""
This method converts a list given with possible repetitions of elements in
it. It returns a new list such that repetitions of consecutive elements is
removed and replace with a tuple element of size two such that the first
index contains `value` and the second index contains the number of
consecutive repetitions of `value`.
"""
a = list(array_form[:])
new_array = []
n = 1
symbols = group.symbols
for i in range(len(a)):
if i == len(a) - 1:
if a[i] == a[i - 1]:
if (-a[i]) in symbols:
new_array.append((-a[i], -n))
else:
new_array.append((a[i], n))
else:
if (-a[i]) in symbols:
new_array.append((-a[i], -1))
else:
new_array.append((a[i], 1))
return new_array
elif a[i] == a[i + 1]:
n += 1
else:
if (-a[i]) in symbols:
new_array.append((-a[i], -n))
else:
new_array.append((a[i], n))
n = 1
def zero_mul_simp(l, index):
"""Used to combine two reduced words."""
while index >=0 and index < len(l) - 1 and l[index][0] == l[index + 1][0]:
exp = l[index][1] + l[index + 1][1]
base = l[index][0]
l[index] = (base, exp)
del l[index + 1]
if l[index][1] == 0:
del l[index]
index -= 1
| [
"[email protected]"
] | |
e4b140bd4a3681c4aff2b85b0c7660c38588549f | 9c0eebdeb427db1ea1ce33987947e22b2c897440 | /map.py | 1925fc08dc7034e1f11acc9e148e356e5ec8fb80 | [] | no_license | dkotenko/npuzz | 6d52c2ca9d733c8d59450af65f89c8bbac938134 | 461a864659893ec8276fafe3e58f73d853d1e42c | refs/heads/main | 2023-06-22T06:30:10.979771 | 2021-07-13T23:54:21 | 2021-07-13T23:54:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,075 | py | from Printer import Printer
import sys
def parse_int(s):
n = 0
try:
n = int(s)
except ValueError:
s_value = s.strip() if s.strip() else '{empty value}'
Printer.print_error_exit(f"map error: string {s_value} is not an integer")
return n
def validate_map(b):
nums = [parse_int(s) for s in b.split("/")]
dict_count = {i: nums.count(i) for i in nums}
if max(dict_count.values()) > 1:
[Printer.print_error(f'map error: duplicated number {key}') for key, val in dict_count if val > 1]
sys.exit(1)
if list(filter(lambda x: x >= len(nums) or x < 0, nums)):
for n in nums:
if n >= len(nums) or n < 1:
Printer.print_error(f'map error: invalid number {n}: must be in range 0:{int(math.sqrt(nums))}')
sys.exit(1)
def parse_map(file_name):
try:
f = open(file_name)
except FileNotFoundError:
Printer.print_error_exit(f"there is no file {file_name}")
with open(file_name, "r") as file:
bb = ''
line = file.readline()
l_p = line.partition('#')[0]
while not l_p:
line = file.readline()
l_p = line.partition("#")[0]
size_matr = parse_int(l_p)
line = file.readline()
n_str = 1
while line:
line = line.partition('#')[0]
while not line:
line = file.readline()
line = line.partition("#")[0]
plus = '/'.join(line.split())
bb += '/'.join(line.split())
bb += '/' # где конец строки нечего заменять =(
line = file.readline()
if (len(plus.split('/'))) != size_matr:
Printer.print_error_exit(f"invalid map: invalid values number at row {n_str}")
exit(0)
n_str += 1
bb = bb[0: -1]
if (n_str - 1) != size_matr:
Printer.print_error_exit(f'invalid map: invalid rows number = {n_str - 1}')
return bb | [
"[email protected]"
] | |
d58c5d69ac4d4936a7aeabe6f33219107db46479 | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/SUSYBBHToTauTau_M-1000_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0_1377467519/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_7/run_cfg.py | ecd7e088550ddaf95adcd0944c067d9074645308 | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,161 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/SUSYBBHToTauTau_M-1000_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0_1377467519/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/group/cmgtools/CMG/SUSYBBHToTauTau_M-1000_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_26_1_qK2.root',
'/store/cmst3/group/cmgtools/CMG/SUSYBBHToTauTau_M-1000_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_27_1_vSH.root',
'/store/cmst3/group/cmgtools/CMG/SUSYBBHToTauTau_M-1000_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_28_1_O6M.root')
)
| [
"[email protected]"
] | |
ecc3e6b8d119081e510084e3005d631f9d895d53 | 23c4f6d8a2a6b97077628c2a012b2b402c816d91 | /LeetCode算法题/0190_颠倒二进制位/颠倒二进制.py | a253597ca1dc577aa84d9985492621b0937a38bc | [] | no_license | exueyuanAlgorithm/AlgorithmDemo | 7ef6ff8104e8da5a81037795184115fb0ac8ca9a | d34d4b592d05e9e0e724d8834eaf9587a64c5034 | refs/heads/master | 2023-07-16T19:00:05.664780 | 2021-09-04T11:31:07 | 2021-09-04T11:31:07 | 277,327,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | class Solution:
def reverseBits(self, n: int) -> int:
result_num = 0
for i in range(31):
if n % 2 == 1:
result_num = result_num + 1 << 1
else:
result_num = result_num << 1
n = n >> 1
if n % 2 == 1:
result_num += 1
return result_num
solution = Solution()
print(solution.reverseBits(0b111)) | [
"[email protected]"
] | |
bcd1c2f1c3fdc0f2088fe69ccbcb0cb8fb88b0de | 960dd60c263cea329e27584b03bb430b025fe05a | /venv/lib/python3.6/site-packages/bigquery/client.py | eedafc23b7799a04e4b141860937b508bc7d12ac | [] | no_license | RuchiBhardwaj/covid_pipeline | 18b3c0ae5836487b150ad112d86e312544d19f9d | f21a98593383caed532b9e7178e70172984cd635 | refs/heads/master | 2022-12-04T09:02:47.076901 | 2020-06-08T14:12:18 | 2020-06-08T14:12:18 | 268,835,744 | 0 | 2 | null | 2022-11-27T19:32:17 | 2020-06-02T15:17:20 | Python | UTF-8 | Python | false | false | 70,802 | py | import calendar
import json
from logging import getLogger, NullHandler
from collections import defaultdict
from datetime import datetime, timedelta
from hashlib import sha256
from io import StringIO
from time import sleep, time
from functools import reduce
import six
from bigquery.errors import (BigQueryTimeoutException, JobExecutingException,
JobInsertException, UnfinishedQueryException)
from googleapiclient.discovery import build, DISCOVERY_URI
from googleapiclient.errors import HttpError
from httplib2 import Http
BIGQUERY_SCOPE = [
'https://www.googleapis.com/auth/bigquery'
]
BIGQUERY_SCOPE_READ_ONLY = [
'https://www.googleapis.com/auth/bigquery.readonly'
]
CACHE_TIMEOUT = timedelta(seconds=30)
JOB_CREATE_IF_NEEDED = 'CREATE_IF_NEEDED'
JOB_CREATE_NEVER = 'CREATE_NEVER'
JOB_WRITE_TRUNCATE = 'WRITE_TRUNCATE'
JOB_WRITE_APPEND = 'WRITE_APPEND'
JOB_WRITE_EMPTY = 'WRITE_EMPTY'
JOB_ENCODING_UTF_8 = 'UTF-8'
JOB_ENCODING_ISO_8859_1 = 'ISO-8859-1'
JOB_PRIORITY_INTERACTIVE = 'INTERACTIVE'
JOB_PRIORITY_BATCH = 'BATCH'
JOB_COMPRESSION_NONE = 'NONE'
JOB_COMPRESSION_GZIP = 'GZIP'
JOB_FORMAT_CSV = 'CSV'
JOB_FORMAT_NEWLINE_DELIMITED_JSON = 'NEWLINE_DELIMITED_JSON'
JOB_SOURCE_FORMAT_DATASTORE_BACKUP = 'DATASTORE_BACKUP'
JOB_SOURCE_FORMAT_NEWLINE_DELIMITED_JSON = JOB_FORMAT_NEWLINE_DELIMITED_JSON
JOB_SOURCE_FORMAT_CSV = JOB_FORMAT_CSV
JOB_DESTINATION_FORMAT_AVRO = 'AVRO'
JOB_DESTINATION_FORMAT_NEWLINE_DELIMITED_JSON = \
JOB_FORMAT_NEWLINE_DELIMITED_JSON
JOB_DESTINATION_FORMAT_CSV = JOB_FORMAT_CSV
logger = getLogger(__name__)
logger.addHandler(NullHandler())
def get_client(project_id=None, credentials=None,
service_url=None, service_account=None,
private_key=None, private_key_file=None,
json_key=None, json_key_file=None,
readonly=True, swallow_results=True,
num_retries=0):
"""Return a singleton instance of BigQueryClient. Either
AssertionCredentials or a service account and private key combination need
to be provided in order to authenticate requests to BigQuery.
Parameters
----------
project_id : str, optional
The BigQuery project id, required unless json_key or json_key_file is
provided.
credentials : oauth2client.client.SignedJwtAssertionCredentials, optional
AssertionCredentials instance to authenticate requests to BigQuery
(optional, must provide `service_account` and (`private_key` or
`private_key_file`) or (`json_key` or `json_key_file`) if not included
service_url : str, optional
A URI string template pointing to the location of Google's API
discovery service. Requires two parameters {api} and {apiVersion} that
when filled in produce an absolute URI to the discovery document for
that service. If not set then the default googleapiclient discovery URI
is used. See `credentials`
service_account : str, optional
The Google API service account name. See `credentials`
private_key : str, optional
The private key associated with the service account in PKCS12 or PEM
format. See `credentials`
private_key_file : str, optional
The name of the file containing the private key associated with the
service account in PKCS12 or PEM format. See `credentials`
json_key : dict, optional
The JSON key associated with the service account. See `credentials`
json_key_file : str, optional
The name of the JSON key file associated with the service account. See
`credentials`.
readonly : bool
Bool indicating if BigQuery access is read-only. Has no effect if
credentials are provided. Default True.
swallow_results : bool
If set to False, then return the actual response value instead of
converting to boolean. Default True.
num_retries : int, optional
The number of times to retry the request. Default 0 (no retry).
Returns
-------
BigQueryClient
An instance of the BigQuery client.
"""
if not credentials:
assert (service_account and (private_key or private_key_file)) or (
json_key or json_key_file), \
'Must provide AssertionCredentials or service account and P12 key\
or JSON key'
if not project_id:
assert json_key or json_key_file, \
'Must provide project_id unless json_key or json_key_file is\
provided'
if service_url is None:
service_url = DISCOVERY_URI
scope = BIGQUERY_SCOPE_READ_ONLY if readonly else BIGQUERY_SCOPE
if private_key_file:
credentials = _credentials().from_p12_keyfile(service_account,
private_key_file,
scopes=scope)
if private_key:
try:
if isinstance(private_key, basestring):
private_key = private_key.decode('utf-8')
except NameError:
# python3 -- private_key is already unicode
pass
credentials = _credentials().from_p12_keyfile_buffer(
service_account,
StringIO(private_key),
scopes=scope)
if json_key_file:
with open(json_key_file, 'r') as key_file:
json_key = json.load(key_file)
if json_key:
credentials = _credentials().from_json_keyfile_dict(json_key,
scopes=scope)
if not project_id:
project_id = json_key['project_id']
bq_service = _get_bq_service(credentials=credentials,
service_url=service_url)
return BigQueryClient(bq_service, project_id, swallow_results,
num_retries)
def get_projects(bq_service):
"""Given the BigQuery service, return data about all projects."""
projects_request = bq_service.projects().list().execute()
projects = []
for project in projects_request.get('projects', []):
project_data = {
'id': project['id'],
'name': project['friendlyName']
}
projects.append(project_data)
return projects
def _get_bq_service(credentials=None, service_url=None):
"""Construct an authorized BigQuery service object."""
assert credentials, 'Must provide ServiceAccountCredentials'
http = credentials.authorize(Http())
service = build(
'bigquery',
'v2',
http=http,
discoveryServiceUrl=service_url,
cache_discovery=False
)
return service
def _credentials():
"""Import and return SignedJwtAssertionCredentials class"""
from oauth2client.service_account import ServiceAccountCredentials
return ServiceAccountCredentials
class BigQueryClient(object):
def __init__(self, bq_service, project_id, swallow_results=True,
num_retries=0):
self.bigquery = bq_service
self.project_id = project_id
self.swallow_results = swallow_results
self.num_retries = num_retries
self.cache = {}
def _get_project_id(self, project_id=None):
""" Get new project_id
Default is self.project_id, which is the project client authenticate to.
A new project_id is specified when client wants to authenticate to 1 project,
but run jobs in a different project.
Parameters
----------
project_id : str
BigQuery project_id
Returns
-------
project_id: BigQuery project_id
"""
if project_id is None:
project_id = self.project_id
return project_id
def _submit_query_job(self, query_data):
""" Submit a query job to BigQuery.
This is similar to BigQueryClient.query, but gives the user
direct access to the query method on the offical BigQuery
python client.
For fine-grained control over a query job, see:
https://google-api-client-libraries.appspot.com/documentation/bigquery/v2/python/latest/bigquery_v2.jobs.html#query
Parameters
----------
query_data
query object as per "configuration.query" in
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query
Returns
-------
tuple
job id and query results if query completed. If dry_run is True,
job id will be None and results will be [cacheHit and totalBytesProcessed] if the query is valid
or a dict containing the response if invalid.
Raises
------
BigQueryTimeoutException
On timeout
"""
logger.debug('Submitting query job: %s' % query_data)
job_collection = self.bigquery.jobs()
try:
query_reply = job_collection.query(
projectId=self.project_id, body=query_data).execute(
num_retries=self.num_retries)
except HttpError as e:
if query_data.get("dryRun", False):
return None, json.loads(e.content.decode('utf8'))
raise
job_id = query_reply['jobReference'].get('jobId')
schema = query_reply.get('schema', {'fields': None})['fields']
rows = query_reply.get('rows', [])
job_complete = query_reply.get('jobComplete', False)
cache_hit = query_reply['cacheHit']
total_bytes_processed = query_reply['totalBytesProcessed']
# raise exceptions if it's not an async query
# and job is not completed after timeout
if not job_complete and query_data.get("timeoutMs", False):
logger.error('BigQuery job %s timeout' % job_id)
raise BigQueryTimeoutException()
if query_data.get("dryRun", True):
return job_id, [cache_hit, total_bytes_processed]
return job_id, [self._transform_row(row, schema) for row in rows]
def _get_job_reference(self, job_id):
""" Get job reference from job_id
For more details, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#resource
Parameters
----------
job_id:
Id of the job
Returns
-------
job_reference: json of job_reference
"""
job_reference = {
"projectId": self.project_id,
"jobId": job_id
}
return job_reference
def _insert_job(self, body_object):
""" Submit a job to BigQuery
Direct proxy to the insert() method of the offical BigQuery
python client.
Able to submit load, link, query, copy, or extract jobs.
For more details, see:
https://google-api-client-libraries.appspot.com/documentation/bigquery/v2/python/latest/bigquery_v2.jobs.html#insert
Parameters
----------
body_object : body object passed to bigquery.jobs().insert()
Returns
-------
response of the bigquery.jobs().insert().execute() call
Raises
------
BigQueryTimeoutException on timeout
"""
logger.debug('Submitting job: %s' % body_object)
job_collection = self.bigquery.jobs()
return job_collection.insert(
projectId=self.project_id,
body=body_object
).execute(num_retries=self.num_retries)
def query(self, query, max_results=None, timeout=0, dry_run=False, use_legacy_sql=None, external_udf_uris=None):
"""Submit a query to BigQuery.
Parameters
----------
query : str
BigQuery query string
max_results : int, optional
The maximum number of rows to return per page of results.
timeout : float, optional
How long to wait for the query to complete, in seconds before
the request times out and returns.
dry_run : bool, optional
If True, the query isn't actually run. A valid query will return
cache hit, and total bytes processed, while an invalid one will return the same error
message it would if it wasn't a dry run.
use_legacy_sql : bool, optional. Default True.
If False, the query will use BigQuery's standard SQL (https://cloud.google.com/bigquery/sql-reference/)
external_udf_uris : list, optional
Contains external UDF URIs. If given, URIs must be Google Cloud
Storage and have .js extensions.
Returns
-------
tuple
(job id, query results) if the query completed. If dry_run is True,
job id will be None and results will be [cacheHit and totalBytesProcessed] if the query is valid
or a ``dict`` containing the response if invalid.
Raises
------
BigQueryTimeoutException
on timeout
"""
logger.debug('Executing query: %s' % query)
query_data = {
'query': query,
'timeoutMs': timeout * 1000,
'dryRun': dry_run,
'maxResults': max_results
}
if use_legacy_sql is not None:
query_data['useLegacySql'] = use_legacy_sql
if external_udf_uris:
query_data['userDefinedFunctionResources'] = \
[ {'resourceUri': u} for u in external_udf_uris ]
return self._submit_query_job(query_data)
def get_query_schema(self, job_id):
"""Retrieve the schema of a query by job id.
Parameters
----------
job_id : str
The job_id that references a BigQuery query
Returns
-------
list
A ``list`` of ``dict`` objects that represent the schema.
"""
query_reply = self.get_query_results(job_id, offset=0, limit=0)
if not query_reply['jobComplete']:
logger.warning('BigQuery job %s not complete' % job_id)
raise UnfinishedQueryException()
return query_reply['schema']['fields']
def get_table_schema(self, dataset, table, project_id=None):
"""Return the table schema.
Parameters
----------
dataset : str
The dataset containing the `table`.
table : str
The table to get the schema for
project_id: str, optional
The project of the dataset.
Returns
-------
list
A ``list`` of ``dict`` objects that represent the table schema. If
the table doesn't exist, None is returned.
"""
project_id = self._get_project_id(project_id)
try:
result = self.bigquery.tables().get(
projectId=project_id,
tableId=table,
datasetId=dataset).execute(num_retries=self.num_retries)
except HttpError as e:
if int(e.resp['status']) == 404:
logger.warn('Table %s.%s does not exist', dataset, table)
return None
raise
return result['schema']['fields']
def check_job(self, job_id):
"""Return the state and number of results of a query by job id.
Parameters
----------
job_id : str
The job id of the query to check.
Returns
-------
tuple
(``bool``, ``int``) Whether or not the query has completed and the
total number of rows included in the query table if it has
completed (else 0)
"""
query_reply = self.get_query_results(job_id, offset=0, limit=0)
return (query_reply.get('jobComplete', False),
int(query_reply.get('totalRows', 0)))
def get_query_rows(self, job_id, offset=None, limit=None, timeout=0):
"""Retrieve a list of rows from a query table by job id.
This method will append results from multiple pages together. If you
want to manually page through results, you can use `get_query_results`
method directly.
Parameters
----------
job_id : str
The job id that references a BigQuery query.
offset : int, optional
The offset of the rows to pull from BigQuery
limit : int, optional
The number of rows to retrieve from a query table.
timeout : float, optional
Timeout in seconds.
Returns
-------
list
A ``list`` of ``dict`` objects that represent table rows.
"""
# Get query results
query_reply = self.get_query_results(job_id, offset=offset,
limit=limit, timeout=timeout)
if not query_reply['jobComplete']:
logger.warning('BigQuery job %s not complete' % job_id)
raise UnfinishedQueryException()
schema = query_reply["schema"]["fields"]
rows = query_reply.get('rows', [])
page_token = query_reply.get("pageToken")
records = [self._transform_row(row, schema) for row in rows]
# Append to records if there are multiple pages for query results
while page_token and (not limit or len(records) < limit):
query_reply = self.get_query_results(
job_id, offset=offset, limit=limit, page_token=page_token,
timeout=timeout)
page_token = query_reply.get("pageToken")
rows = query_reply.get('rows', [])
records += [self._transform_row(row, schema) for row in rows]
return records[:limit] if limit else records
def check_dataset(self, dataset_id, project_id=None):
"""Check to see if a dataset exists.
Parameters
----------
dataset_id : str
Dataset unique id
project_id: str, optional
The project the dataset is in
Returns
-------
bool
True if dataset at `dataset_id` exists, else Fasle
"""
dataset = self.get_dataset(dataset_id, project_id)
return bool(dataset)
def get_dataset(self, dataset_id, project_id=None):
"""Retrieve a dataset if it exists, otherwise return an empty dict.
Parameters
----------
dataset_id : str
Dataset unique id
project_id: str, optional
The project the dataset is in
Returns
-------
dict
Contains dataset object if it exists, else empty
"""
project_id = self._get_project_id(project_id)
try:
dataset = self.bigquery.datasets().get(
projectId=project_id, datasetId=dataset_id).execute(
num_retries=self.num_retries)
except HttpError:
dataset = {}
return dataset
def check_table(self, dataset, table, project_id=None):
"""Check to see if a table exists.
Parameters
----------
dataset : str
The dataset to check
table : str
The name of the table
project_id: str, optional
The project the table is in
Returns
-------
bool
True if table exists, else False
"""
table = self.get_table(dataset, table, project_id)
return bool(table)
def get_table(self, dataset, table, project_id=None):
""" Retrieve a table if it exists, otherwise return an empty dict.
Parameters
----------
dataset : str
The dataset that the table is in
table : str
The name of the table
project_id: str, optional
The project that the table is in
Returns
-------
dict
Containing the table object if it exists, else empty
"""
project_id = self._get_project_id(project_id)
try:
table = self.bigquery.tables().get(
projectId=project_id, datasetId=dataset,
tableId=table).execute(num_retries=self.num_retries)
except HttpError:
table = {}
return table
def create_table(self, dataset, table, schema,
expiration_time=None, time_partitioning=False,
project_id=None):
"""Create a new table in the dataset.
Parameters
----------
dataset : str
The dataset to create the table in
table : str
The name of the table to create
schema : dict
The table schema
expiration_time : int or double, optional
The expiry time in milliseconds since the epoch.
time_partitioning : bool, optional
Create a time partitioning.
project_id: str, optional
The project to create the table in
Returns
-------
Union[bool, dict]
If the table was successfully created, or response from BigQuery
if swallow_results is set to False
"""
project_id = self._get_project_id(project_id)
body = {
'schema': {'fields': schema},
'tableReference': {
'tableId': table,
'projectId': project_id,
'datasetId': dataset
}
}
if expiration_time is not None:
body['expirationTime'] = expiration_time
if time_partitioning:
body['timePartitioning'] = {'type': 'DAY'}
try:
table = self.bigquery.tables().insert(
projectId=project_id,
datasetId=dataset,
body=body
).execute(num_retries=self.num_retries)
if self.swallow_results:
return True
else:
return table
except HttpError as e:
logger.error(('Cannot create table {0}.{1}.{2}\n'
'Http Error: {3}').format(project_id, dataset, table, e.content))
if self.swallow_results:
return False
else:
return {}
def update_table(self, dataset, table, schema, project_id=None):
"""Update an existing table in the dataset.
Parameters
----------
dataset : str
The dataset to update the table in
table : str
The name of the table to update
schema : dict
Table schema
project_id: str, optional
The project to update the table in
Returns
-------
Union[bool, dict]
bool indicating if the table was successfully updated or not,
or response from BigQuery if swallow_results is set to False.
"""
project_id = self._get_project_id(project_id)
body = {
'schema': {'fields': schema},
'tableReference': {
'tableId': table,
'projectId': project_id,
'datasetId': dataset
}
}
try:
result = self.bigquery.tables().update(
projectId=project_id,
tableId= table,
datasetId=dataset,
body=body
).execute(num_retries=self.num_retries)
if self.swallow_results:
return True
else:
return result
except HttpError as e:
logger.error(('Cannot update table {0}.{1}.{2}\n'
'Http Error: {3}').format(project_id, dataset, table, e.content))
if self.swallow_results:
return False
else:
return {}
def patch_table(self, dataset, table, schema, project_id=None):
"""Patch an existing table in the dataset.
Parameters
----------
dataset : str
The dataset to patch the table in
table : str
The name of the table to patch
schema : dict
The table schema
project_id: str, optional
The project to patch the table in
Returns
-------
Union[bool, dict]
Bool indicating if the table was successfully patched or not,
or response from BigQuery if swallow_results is set to False
"""
project_id = self._get_project_id(project_id)
body = {
'schema': {'fields': schema},
}
try:
result = self.bigquery.tables().patch(
projectId=project_id,
datasetId=dataset,
tableId=table,
body=body
).execute(num_retries=self.num_retries)
if self.swallow_results:
return True
else:
return result
except HttpError as e:
logger.error(('Cannot patch table {0}.{1}.{2}\n'
'Http Error: {3}').format(project_id, dataset, table, e.content))
if self.swallow_results:
return False
else:
return {}
def create_view(self, dataset, view, query, use_legacy_sql=None, project_id=None):
"""Create a new view in the dataset.
Parameters
----------
dataset : str
The dataset to create the view in
view : str
The name of the view to create
query : dict
A query that BigQuery executes when the view is referenced.
use_legacy_sql : bool, optional
If False, the query will use BigQuery's standard SQL
(https://cloud.google.com/bigquery/sql-reference/)
project_id: str, optional
The project to create the view in
Returns
-------
Union[bool, dict]
bool indicating if the view was successfully created or not,
or response from BigQuery if swallow_results is set to False.
"""
project_id = self._get_project_id(project_id)
body = {
'tableReference': {
'tableId': view,
'projectId': project_id,
'datasetId': dataset
},
'view': {
'query': query
}
}
if use_legacy_sql is not None:
body['view']['useLegacySql'] = use_legacy_sql
try:
view = self.bigquery.tables().insert(
projectId=project_id,
datasetId=dataset,
body=body
).execute(num_retries=self.num_retries)
if self.swallow_results:
return True
else:
return view
except HttpError as e:
logger.error(('Cannot create view {0}.{1}\n'
'Http Error: {2}').format(dataset, view, e.content))
if self.swallow_results:
return False
else:
return {}
def delete_table(self, dataset, table, project_id=None):
"""Delete a table from the dataset.
Parameters
----------
dataset : str
The dataset to delete the table from.
table : str
The name of the table to delete
project_id: str, optional
String id of the project
Returns
-------
Union[bool, dict]
bool indicating if the table was successfully deleted or not,
or response from BigQuery if swallow_results is set for False.
"""
project_id = self._get_project_id(project_id)
try:
response = self.bigquery.tables().delete(
projectId=project_id,
datasetId=dataset,
tableId=table
).execute(num_retries=self.num_retries)
if self.swallow_results:
return True
else:
return response
except HttpError as e:
logger.error(('Cannot delete table {0}.{1}\n'
'Http Error: {2}').format(dataset, table, e.content))
if self.swallow_results:
return False
else:
return {}
def get_tables(self, dataset_id, app_id, start_time, end_time, project_id=None):
"""Retrieve a list of tables that are related to the given app id
and are inside the range of start and end times.
Parameters
----------
dataset_id : str
The BigQuery dataset id to consider.
app_id : str
The appspot name
start_time : Union[datetime, int]
The datetime or unix time after which records will be fetched.
end_time : Union[datetime, int]
The datetime or unix time up to which records will be fetched.
project_id: str, optional
String id of the project
Returns
-------
list
A ``list`` of table names.
"""
if isinstance(start_time, datetime):
start_time = calendar.timegm(start_time.utctimetuple())
if isinstance(end_time, datetime):
end_time = calendar.timegm(end_time.utctimetuple())
every_table = self._get_all_tables(dataset_id, project_id)
app_tables = every_table.get(app_id, {})
return self._filter_tables_by_time(app_tables, start_time, end_time)
def import_data_from_uris(
self,
source_uris,
dataset,
table,
schema=None,
job=None,
source_format=None,
create_disposition=None,
write_disposition=None,
encoding=None,
ignore_unknown_values=None,
max_bad_records=None,
allow_jagged_rows=None,
allow_quoted_newlines=None,
field_delimiter=None,
quote=None,
skip_leading_rows=None,
project_id=None,
):
"""
Imports data into a BigQuery table from cloud storage. Optional
arguments that are not specified are determined by BigQuery as
described:
https://developers.google.com/bigquery/docs/reference/v2/jobs
Parameters
----------
source_urls : list
A ``list`` of ``str`` objects representing the urls on cloud
storage of the form: gs://bucket/filename
dataset : str
String id of the dataset
table : str
String id of the table
schema : list, optional
Represents the BigQuery schema
job : str, optional
Identifies the job (a unique job id is automatically generated if
not provided)
source_format : str, optional
One of the JOB_SOURCE_FORMAT_* constants
create_disposition : str, optional
One of the JOB_CREATE_* constants
write_disposition : str, optional
One of the JOB_WRITE_* constants
encoding : str, optional
One of the JOB_ENCODING_* constants
ignore_unknown_values : bool, optional
Whether or not to ignore unknown values
max_bad_records : int, optional
Maximum number of bad records
allow_jagged_rows : bool, optional
For csv only
allow_quoted_newlines : bool, optional
For csv only
field_delimiter : str, optional
For csv only
quote : str, optional
Quote character for csv only
skip_leading_rows : int, optional
For csv only
project_id: str, optional
String id of the project
Returns
-------
dict
A BigQuery job response
Raises
------
JobInsertException
on http/auth failures or error in result
"""
source_uris = source_uris if isinstance(source_uris, list) \
else [source_uris]
project_id = self._get_project_id(project_id)
configuration = {
"destinationTable": {
"projectId": project_id,
"tableId": table,
"datasetId": dataset
},
"sourceUris": source_uris,
}
if max_bad_records:
configuration['maxBadRecords'] = max_bad_records
if ignore_unknown_values:
configuration['ignoreUnknownValues'] = ignore_unknown_values
if create_disposition:
configuration['createDisposition'] = create_disposition
if write_disposition:
configuration['writeDisposition'] = write_disposition
if encoding:
configuration['encoding'] = encoding
if schema:
configuration['schema'] = {'fields': schema}
if source_format:
configuration['sourceFormat'] = source_format
if not job:
hex = self._generate_hex_for_uris(source_uris)
job = "{dataset}-{table}-{digest}".format(
dataset=dataset,
table=table,
digest=hex
)
if source_format == JOB_SOURCE_FORMAT_CSV:
if field_delimiter:
configuration['fieldDelimiter'] = field_delimiter
if allow_jagged_rows:
configuration['allowJaggedRows'] = allow_jagged_rows
if allow_quoted_newlines:
configuration['allowQuotedNewlines'] = allow_quoted_newlines
if quote:
configuration['quote'] = quote
if skip_leading_rows:
configuration['skipLeadingRows'] = skip_leading_rows
elif field_delimiter or allow_jagged_rows \
or allow_quoted_newlines or quote or skip_leading_rows:
all_values = dict(field_delimiter=field_delimiter,
allow_jagged_rows=allow_jagged_rows,
allow_quoted_newlines=allow_quoted_newlines,
skip_leading_rows=skip_leading_rows,
quote=quote)
non_null_values = dict((k, v) for k, v
in list(all_values.items())
if v)
raise Exception("Parameters field_delimiter, allow_jagged_rows, "
"allow_quoted_newlines, quote and "
"skip_leading_rows are only allowed when "
"source_format=JOB_SOURCE_FORMAT_CSV: %s"
% non_null_values)
body = {
"configuration": {
'load': configuration
},
"jobReference": self._get_job_reference(job)
}
logger.debug("Creating load job %s" % body)
job_resource = self._insert_job(body)
self._raise_insert_exception_if_error(job_resource)
return job_resource
def export_data_to_uris(
self,
destination_uris,
dataset,
table,
job=None,
compression=None,
destination_format=None,
print_header=None,
field_delimiter=None,
project_id=None,
):
"""
Export data from a BigQuery table to cloud storage. Optional arguments
that are not specified are determined by BigQuery as described:
https://developers.google.com/bigquery/docs/reference/v2/jobs
Parameters
----------
destination_uris : Union[str, list]
``str`` or ``list`` of ``str`` objects representing the URIs on
cloud storage of the form: gs://bucket/filename
dataset : str
String id of the dataset
table : str
String id of the table
job : str, optional
String identifying the job (a unique jobid is automatically
generated if not provided)
compression : str, optional
One of the JOB_COMPRESSION_* constants
destination_format : str, optional
One of the JOB_DESTination_FORMAT_* constants
print_header : bool, optional
Whether or not to print the header
field_delimiter : str, optional
Character separating fields in delimited file
project_id: str, optional
String id of the project
Returns
-------
dict
A BigQuery job resource
Raises
------
JobInsertException
On http/auth failures or error in result
"""
destination_uris = destination_uris \
if isinstance(destination_uris, list) else [destination_uris]
project_id = self._get_project_id(project_id)
configuration = {
"sourceTable": {
"projectId": project_id,
"tableId": table,
"datasetId": dataset
},
"destinationUris": destination_uris,
}
if compression:
configuration['compression'] = compression
if destination_format:
configuration['destinationFormat'] = destination_format
if print_header is not None:
configuration['printHeader'] = print_header
if field_delimiter:
configuration['fieldDelimiter'] = field_delimiter
if not job:
hex = self._generate_hex_for_uris(destination_uris)
job = "{dataset}-{table}-{digest}".format(
dataset=dataset,
table=table,
digest=hex
)
body = {
"configuration": {
'extract': configuration
},
"jobReference": self._get_job_reference(job)
}
logger.info("Creating export job %s" % body)
job_resource = self._insert_job(body)
self._raise_insert_exception_if_error(job_resource)
return job_resource
def write_to_table(
self,
query,
dataset=None,
table=None,
external_udf_uris=None,
allow_large_results=None,
use_query_cache=None,
priority=None,
create_disposition=None,
write_disposition=None,
use_legacy_sql=None,
maximum_billing_tier=None,
flatten=None,
project_id=None,
):
"""
Write query result to table. If dataset or table is not provided,
Bigquery will write the result to temporary table. Optional arguments
that are not specified are determined by BigQuery as described:
https://developers.google.com/bigquery/docs/reference/v2/jobs
Parameters
----------
query : str
BigQuery query string
dataset : str, optional
String id of the dataset
table : str, optional
String id of the table
external_udf_uris : list, optional
Contains external UDF URIs. If given, URIs must be Google Cloud
Storage and have .js extensions.
allow_large_results : bool, optional
Whether or not to allow large results
use_query_cache : bool, optional
Whether or not to use query cache
priority : str, optional
One of the JOB_PRIORITY_* constants
create_disposition : str, optional
One of the JOB_CREATE_* constants
write_disposition : str, optional
One of the JOB_WRITE_* constants
use_legacy_sql: bool, optional
If False, the query will use BigQuery's standard SQL
(https://cloud.google.com/bigquery/sql-reference/)
maximum_billing_tier : integer, optional
Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If
unspecified, this will be set to your project default. For more
information,
see https://cloud.google.com/bigquery/pricing#high-compute
flatten : bool, optional
Whether or not to flatten nested and repeated fields
in query results
project_id: str, optional
String id of the project
Returns
-------
dict
A BigQuery job resource
Raises
------
JobInsertException
On http/auth failures or error in result
"""
configuration = {
"query": query,
}
project_id = self._get_project_id(project_id)
if dataset and table:
configuration['destinationTable'] = {
"projectId": project_id,
"tableId": table,
"datasetId": dataset
}
if allow_large_results is not None:
configuration['allowLargeResults'] = allow_large_results
if flatten is not None:
configuration['flattenResults'] = flatten
if maximum_billing_tier is not None:
configuration['maximumBillingTier'] = maximum_billing_tier
if use_query_cache is not None:
configuration['useQueryCache'] = use_query_cache
if use_legacy_sql is not None:
configuration['useLegacySql'] = use_legacy_sql
if priority:
configuration['priority'] = priority
if create_disposition:
configuration['createDisposition'] = create_disposition
if write_disposition:
configuration['writeDisposition'] = write_disposition
if external_udf_uris:
configuration['userDefinedFunctionResources'] = \
[ {'resourceUri': u} for u in external_udf_uris ]
body = {
"configuration": {
'query': configuration
}
}
logger.info("Creating write to table job %s" % body)
job_resource = self._insert_job(body)
self._raise_insert_exception_if_error(job_resource)
return job_resource
def wait_for_job(self, job, interval=5, timeout=60):
"""
Waits until the job indicated by job_resource is done or has failed
Parameters
----------
job : Union[dict, str]
``dict`` representing a BigQuery job resource, or a ``str``
representing the BigQuery job id
interval : float, optional
Polling interval in seconds, default = 5
timeout : float, optional
Timeout in seconds, default = 60
Returns
-------
dict
Final state of the job resouce, as described here:
https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.jobs.html#get
Raises
------
Union[JobExecutingException, BigQueryTimeoutException]
On http/auth failures or timeout
"""
complete = False
job_id = str(job if isinstance(job,
(six.binary_type, six.text_type, int))
else job['jobReference']['jobId'])
job_resource = None
start_time = time()
elapsed_time = 0
while not (complete or elapsed_time > timeout):
sleep(interval)
request = self.bigquery.jobs().get(projectId=self.project_id,
jobId=job_id)
job_resource = request.execute(num_retries=self.num_retries)
self._raise_executing_exception_if_error(job_resource)
complete = job_resource.get('status').get('state') == u'DONE'
elapsed_time = time() - start_time
# raise exceptions if timeout
if not complete:
logger.error('BigQuery job %s timeout' % job_id)
raise BigQueryTimeoutException()
return job_resource
def push_rows(self, dataset, table, rows, insert_id_key=None,
skip_invalid_rows=None, ignore_unknown_values=None,
template_suffix=None, project_id=None):
"""Upload rows to BigQuery table.
Parameters
----------
dataset : str
The dataset to upload to
table : str
The name of the table to insert rows into
rows : list
A ``list`` of rows (``dict`` objects) to add to the table
insert_id_key : str, optional
Key for insertId in row.
You can use dot separated key for nested column.
skip_invalid_rows : bool, optional
Insert all valid rows of a request, even if invalid rows exist.
ignore_unknown_values : bool, optional
Accept rows that contain values that do not match the schema.
template_suffix : str, optional
Inserts the rows into an {table}{template_suffix}.
If table {table}{template_suffix} doesn't exist, create from {table}.
project_id: str, optional
The project to upload to
Returns
-------
Union[bool, dict]
bool indicating if insert succeeded or not, or response
from BigQuery if swallow_results is set for False.
"""
project_id = self._get_project_id(project_id)
table_data = self.bigquery.tabledata()
rows_data = []
for row in rows:
each_row = {}
each_row["json"] = row
if insert_id_key is not None:
keys = insert_id_key.split('.')
val = reduce(lambda d, key: d.get(key) if d else None, keys, row)
if val is not None:
each_row["insertId"] = val
rows_data.append(each_row)
data = {
"kind": "bigquery#tableDataInsertAllRequest",
"rows": rows_data
}
if skip_invalid_rows is not None:
data['skipInvalidRows'] = skip_invalid_rows
if ignore_unknown_values is not None:
data['ignoreUnknownValues'] = ignore_unknown_values
if template_suffix is not None:
data['templateSuffix'] = template_suffix
try:
response = table_data.insertAll(
projectId=project_id,
datasetId=dataset,
tableId=table,
body=data
).execute(num_retries=self.num_retries)
if response.get('insertErrors'):
logger.error('BigQuery insert errors: %s' % response)
if self.swallow_results:
return False
else:
return response
if self.swallow_results:
return True
else:
return response
except HttpError as e:
logger.exception('Problem with BigQuery insertAll')
if self.swallow_results:
return False
else:
return {
'insertErrors': [{
'errors': [{
'reason': 'httperror',
'message': e
}]
}]
}
def get_all_tables(self, dataset_id, project_id=None):
"""Retrieve a list of tables for the dataset.
Parameters
----------
dataset_id : str
The dataset to retrieve table data for.
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
A ``list`` with all table names
"""
tables_data = self._get_all_tables_for_dataset(dataset_id, project_id)
tables = []
for table in tables_data.get('tables', []):
table_name = table.get('tableReference', {}).get('tableId')
if table_name:
tables.append(table_name)
return tables
def _get_all_tables(self, dataset_id, cache=False, project_id=None):
"""Retrieve the list of tables for dataset, that respect the formats:
* appid_YYYY_MM
* YYYY_MM_appid
Parameters
----------
dataset_id : str
The dataset to retrieve table names for
cache : bool, optional
To use cached value or not (default False). Timeout value equals
CACHE_TIMEOUT.
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
dict
A ``dict`` of app ids mapped to their table names
"""
do_fetch = True
if cache and self.cache.get(dataset_id):
time, result = self.cache.get(dataset_id)
if datetime.now() - time < CACHE_TIMEOUT:
do_fetch = False
if do_fetch:
result = self._get_all_tables_for_dataset(dataset_id, project_id)
self.cache[dataset_id] = (datetime.now(), result)
return self._parse_table_list_response(result)
def _get_all_tables_for_dataset(self, dataset_id, project_id=None):
"""Retrieve a list of all tables for the dataset.
Parameters
----------
dataset_id : str
The dataset to retrieve table names for
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
dict
A ``dict`` containing tables key with all tables
"""
project_id = self._get_project_id(project_id)
result = self.bigquery.tables().list(
projectId=project_id,
datasetId=dataset_id).execute(num_retries=self.num_retries)
page_token = result.get('nextPageToken')
while page_token:
res = self.bigquery.tables().list(
projectId=project_id,
datasetId=dataset_id,
pageToken=page_token
).execute(num_retries=self.num_retries)
page_token = res.get('nextPageToken')
result['tables'] += res.get('tables', [])
return result
def _parse_table_list_response(self, list_response):
"""Parse the response received from calling list on tables.
Parameters
----------
list_response
The response found by calling list on a BigQuery table object.
Returns
-------
dict
Dates referenced by table names
"""
tables = defaultdict(dict)
for table in list_response.get('tables', []):
table_ref = table.get('tableReference')
if not table_ref:
continue
table_id = table_ref.get('tableId', '')
year_month, app_id = self._parse_table_name(table_id)
if not year_month:
continue
table_date = datetime.strptime(year_month, '%Y-%m')
unix_seconds = calendar.timegm(table_date.timetuple())
tables[app_id].update({table_id: unix_seconds})
# Turn off defualting
tables.default_factory = None
return tables
def _parse_table_name(self, table_id):
"""Parse a table name in the form of appid_YYYY_MM or
YYYY_MM_appid and return a tuple consisting of YYYY-MM and the app id.
Returns (None, None) in the event of a name like <desc>_YYYYMMDD_<int>
Parameters
----------
table_id : str
The table id as listed by BigQuery
Returns
-------
tuple
(year/month, app id), or (None, None) if the table id cannot be
parsed.
"""
# Prefix date
attributes = table_id.split('_')
year_month = "-".join(attributes[:2])
app_id = "-".join(attributes[2:])
# Check if date parsed correctly
if year_month.count("-") == 1 and all(
[num.isdigit() for num in year_month.split('-')]):
return year_month, app_id
# Postfix date
attributes = table_id.split('_')
year_month = "-".join(attributes[-2:])
app_id = "-".join(attributes[:-2])
# Check if date parsed correctly
if year_month.count("-") == 1 and all(
[num.isdigit() for num in year_month.split('-')]) and len(year_month) == 7:
return year_month, app_id
return None, None
def _filter_tables_by_time(self, tables, start_time, end_time):
"""Filter a table dictionary and return table names based on the range
of start and end times in unix seconds.
Parameters
----------
tables : dict
Dates referenced by table names
start_time : int
The unix time after which records will be fetched
end_time : int
The unix time up to which records will be fetched
Returns
-------
list
Table names that are inside the time range
"""
return [table_name for (table_name, unix_seconds) in tables.items()
if self._in_range(start_time, end_time, unix_seconds)]
def _in_range(self, start_time, end_time, time):
"""Indicate if the given time falls inside of the given range.
Parameters
----------
start_time : int
The unix time for the start of the range
end_time : int
The unix time for the end of the range
time : int
The unix time to check
Returns
-------
bool
True if the time falls within the range, False otherwise.
"""
ONE_MONTH = 2764800 # 32 days
return start_time <= time <= end_time or \
time <= start_time <= time + ONE_MONTH or \
time <= end_time <= time + ONE_MONTH
def get_query_results(self, job_id, offset=None, limit=None,
page_token=None, timeout=0):
"""Execute the query job indicated by the given job id. This is direct
mapping to bigquery api
https://cloud.google.com/bigquery/docs/reference/v2/jobs/getQueryResults
Parameters
----------
job_id : str
The job id of the query to check
offset : optional
The index the result set should start at.
limit : int, optional
The maximum number of results to retrieve.
page_token : optional
Page token, returned by previous call, to request the next page of
results.
timeout : float, optional
Timeout in seconds
Returns
-------
out
The query reply
"""
job_collection = self.bigquery.jobs()
return job_collection.getQueryResults(
projectId=self.project_id,
jobId=job_id,
startIndex=offset,
maxResults=limit,
pageToken=page_token,
timeoutMs=timeout * 1000).execute(num_retries=self.num_retries)
def _transform_row(self, row, schema):
"""Apply the given schema to the given BigQuery data row.
Parameters
----------
row
A single BigQuery row to transform
schema : list
The BigQuery table schema to apply to the row, specifically
the list of field dicts.
Returns
-------
dict
Mapping schema to row
"""
log = {}
# Match each schema column with its associated row value
for index, col_dict in enumerate(schema):
col_name = col_dict['name']
row_value = row['f'][index]['v']
if row_value is None:
log[col_name] = None
continue
# Recurse on nested records
if col_dict['type'] == 'RECORD':
row_value = self._recurse_on_row(col_dict, row_value)
# Otherwise just cast the value
elif col_dict['type'] == 'INTEGER':
row_value = int(row_value)
elif col_dict['type'] == 'FLOAT':
row_value = float(row_value)
elif col_dict['type'] == 'BOOLEAN':
row_value = row_value in ('True', 'true', 'TRUE')
elif col_dict['type'] == 'TIMESTAMP':
row_value = float(row_value)
log[col_name] = row_value
return log
def _recurse_on_row(self, col_dict, nested_value):
"""Apply the schema specified by the given dict to the nested value by
recursing on it.
Parameters
----------
col_dict : dict
The schema to apply to the nested value.
nested_value : A value nested in a BigQuery row.
Returns
-------
Union[dict, list]
``dict`` or ``list`` of ``dict`` objects from applied schema.
"""
row_value = None
# Multiple nested records
if col_dict['mode'] == 'REPEATED' and isinstance(nested_value, list):
row_value = [self._transform_row(record['v'], col_dict['fields'])
for record in nested_value]
# A single nested record
else:
row_value = self._transform_row(nested_value, col_dict['fields'])
return row_value
def _generate_hex_for_uris(self, uris):
"""Given uris, generate and return hex version of it
Parameters
----------
uris : list
Containing all uris
Returns
-------
str
Hexed uris
"""
return sha256((":".join(uris) + str(time())).encode()).hexdigest()
def _raise_insert_exception_if_error(self, job):
error_http = job.get('error')
if error_http:
raise JobInsertException(
"Error in export job API request: {0}".format(error_http))
# handle errorResult - API request is successful but error in result
error_result = job.get('status').get('errorResult')
if error_result:
raise JobInsertException(
"Reason:{reason}. Message:{message}".format(**error_result))
def _raise_executing_exception_if_error(self, job):
error_http = job.get('error')
if error_http:
raise JobExecutingException(
"Error in export job API request: {0}".format(error_http))
# handle errorResult - API request is successful but error in result
error_result = job.get('status').get('errorResult')
if error_result:
raise JobExecutingException(
"Reason:{reason}. Message:{message}".format(**error_result))
#
# DataSet manipulation methods
#
def create_dataset(self, dataset_id, friendly_name=None, description=None,
access=None, location=None, project_id=None):
"""Create a new BigQuery dataset.
Parameters
----------
dataset_id : str
Unique ``str`` identifying the dataset with the project (the
referenceID of the dataset, not the integer id of the dataset)
friendly_name: str, optional
A human readable name
description: str, optional
Longer string providing a description
access : list, optional
Indicating access permissions (see
https://developers.google.com/bigquery/docs/reference/v2/datasets#resource)
location : str, optional
Indicating where dataset should be stored: EU or US (see
https://developers.google.com/bigquery/docs/reference/v2/datasets#resource)
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
Union[bool, dict]
``bool`` indicating if dataset was created or not, or response
from BigQuery if swallow_results is set for False
"""
project_id = self._get_project_id(project_id)
try:
datasets = self.bigquery.datasets()
dataset_data = self.dataset_resource(dataset_id,
project_id=project_id,
friendly_name=friendly_name,
description=description,
access=access,
location=location
)
response = datasets.insert(projectId=project_id,
body=dataset_data).execute(
num_retries=self.num_retries)
if self.swallow_results:
return True
else:
return response
except HttpError as e:
logger.error(
'Cannot create dataset {0}, {1}'.format(dataset_id, e))
if self.swallow_results:
return False
else:
return {}
def get_datasets(self, project_id=None):
"""List all datasets in the project.
Parameters
----------
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
list
Dataset resources
"""
project_id = self._get_project_id(project_id)
try:
datasets = self.bigquery.datasets()
request = datasets.list(projectId=project_id)
result = request.execute(num_retries=self.num_retries)
return result.get('datasets', [])
except HttpError as e:
logger.error("Cannot list datasets: {0}".format(e))
return None
def delete_dataset(self, dataset_id, delete_contents=False, project_id=None):
"""Delete a BigQuery dataset.
Parameters
----------
dataset_id : str
Unique ``str`` identifying the dataset with the project (the
referenceId of the dataset)
Unique ``str`` identifying the BigQuery project contains the dataset
delete_contents : bool, optional
If True, forces the deletion of the dataset even when the dataset
contains data (Default = False)
project_id: str, optional
Returns
-------
Union[bool, dict[
ool indicating if the delete was successful or not, or response
from BigQuery if swallow_results is set for False
Raises
-------
HttpError
404 when dataset with dataset_id does not exist
"""
project_id = self._get_project_id(project_id)
try:
datasets = self.bigquery.datasets()
request = datasets.delete(projectId=project_id,
datasetId=dataset_id,
deleteContents=delete_contents)
response = request.execute(num_retries=self.num_retries)
if self.swallow_results:
return True
else:
return response
except HttpError as e:
logger.error(
'Cannot delete dataset {0}: {1}'.format(dataset_id, e))
if self.swallow_results:
return False
else:
return {}
def update_dataset(self, dataset_id, friendly_name=None, description=None,
access=None, project_id=None):
"""Updates information in an existing dataset. The update method
replaces the entire dataset resource, whereas the patch method only
replaces fields that are provided in the submitted dataset resource.
Parameters
----------
dataset_id : str
Unique ``str`` identifying the dataset with the project (the
referencedId of the dataset)
friendly_name : str, optional
An optional descriptive name for the dataset.
description : str, optional
An optional description of the dataset.
access : list, optional
Indicating access permissions
project_id: str, optional
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
Union[bool, dict]
``bool`` indicating if the update was successful or not, or
response from BigQuery if swallow_results is set for False.
"""
project_id = self._get_project_id(project_id)
try:
datasets = self.bigquery.datasets()
body = self.dataset_resource(dataset_id,
friendly_name=friendly_name,
description=description,
access=access,
project_id=project_id)
request = datasets.update(projectId=project_id,
datasetId=dataset_id,
body=body)
response = request.execute(num_retries=self.num_retries)
if self.swallow_results:
return True
else:
return response
except HttpError as e:
logger.error(
'Cannot update dataset {0}: {1}'.format(dataset_id, e))
if self.swallow_results:
return False
else:
return {}
def patch_dataset(self, dataset_id, friendly_name=None, description=None,
access=None, project_id=None):
"""Updates information in an existing dataset. The update method
replaces the entire dataset resource, whereas the patch method only
replaces fields that are provided in the submitted dataset resource.
Parameters
----------
dataset_id : str
Unique string idenfitying the dataset with the project (the
referenceId of the dataset)
friendly_name : str, optional
An optional descriptive name for the dataset.
description : str, optional
An optional description of the dataset.
access : list, optional
Indicating access permissions.
project_id: str, optional
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
Union[bool, dict]
``bool`` indicating if the patch was successful or not, or response
from BigQuery if swallow_results is set for False.
"""
project_id = self._get_project_id(project_id)
try:
datasets = self.bigquery.datasets()
body = self.dataset_resource(dataset_id,
friendly_name=friendly_name,
description=description,
access=access,
project_id=project_id)
request = datasets.patch(projectId=project_id,
datasetId=dataset_id, body=body)
response = request.execute(num_retries=self.num_retries)
if self.swallow_results:
return True
else:
return response
except HttpError as e:
logger.error('Cannot patch dataset {0}: {1}'.format(dataset_id, e))
if self.swallow_results:
return False
else:
return {}
def dataset_resource(self, ref_id, friendly_name=None, description=None,
access=None, location=None, project_id=None):
"""See
https://developers.google.com/bigquery/docs/reference/v2/datasets#resource
Parameters
----------
ref_id : str
Dataset id (the reference id, not the integer id)
friendly_name : str, optional
An optional descriptive name for the dataset
description : str, optional
An optional description for the dataset
access : list, optional
Indicating access permissions
location: str, optional, 'EU' or 'US'
An optional geographical location for the dataset(EU or US)
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
dict
Representing BigQuery dataset resource
"""
project_id = self._get_project_id(project_id)
data = {
"datasetReference": {
"datasetId": ref_id,
"projectId": project_id
}
}
if friendly_name:
data["friendlyName"] = friendly_name
if description:
data["description"] = description
if access:
data["access"] = access
if location:
data["location"] = location
return data
@classmethod
def schema_from_record(cls, record):
"""Given a dict representing a record instance to be inserted into
BigQuery, calculate the schema.
Parameters
----------
record : dict
representing a record to be inserted into big query,
where all keys are ``str`` objects (representing column names in
the record) and all values are of type ``int``, ``str``,
``unicode``, ``float``, ``bool``, ``datetime``, or ``dict``. A
``dict`` value represents a record, and must conform to the same
restrictions as record.
Returns
-------
list
BigQuery schema
Notes
-----
Results are undefined if a different value type is provided for a
repeated field: E.g.
>>> { rfield: [ { x: 1}, {x: "a string"} ] } # undefined!
"""
from bigquery.schema_builder import schema_from_record
return schema_from_record(record)
| [
"[email protected]"
] | |
10b5605b4bccd6d1f948a4c6810b3e573adb67ae | a961aa04d7c7d18fd2ac7da8a8016bacfabc6e1b | /elevennote/src/notes/migrations/0007_auto_20200509_1450.py | 38a6a80a43cd9fce7abbf51b8a93bfb99cfc98ae | [] | no_license | EgorovM/cs102 | a4f6423f3e96064c68a9015118cd141a8a7eea14 | 0f72f9027dbcda510c67f815348a8ce58f76d857 | refs/heads/master | 2021-06-21T16:21:10.880523 | 2020-06-06T08:34:28 | 2020-06-06T08:34:28 | 214,231,423 | 0 | 1 | null | 2021-06-10T22:52:37 | 2019-10-10T16:24:08 | JavaScript | UTF-8 | Python | false | false | 440 | py | # Generated by Django 2.0.1 on 2020-05-09 14:50
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notes', '0006_note_shared'),
]
operations = [
migrations.AlterField(
model_name='note',
name='shared',
field=models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
] | |
cde9c5c591a0868fda460d5f45c15e0897cb2d77 | 89c4a43a505df8fdf1f0d7386988c4896c2e631b | /google/ads/googleads/v6/services/services/gender_view_service/transports/base.py | c2715ba063f55d3fe0da66e820e30cd4ad4a3ba0 | [
"Apache-2.0"
] | permissive | hurricanelennane/google-ads-python | a0a1fed690776a8bb2e81f637eb7eae10fb4992f | 310a488b6fdad9d5beea8fa4b166edce779a2511 | refs/heads/master | 2023-07-04T03:07:53.344466 | 2021-07-16T19:06:36 | 2021-07-16T19:06:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,582 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.ads.googleads.v6.resources.types import gender_view
from google.ads.googleads.v6.services.types import gender_view_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class GenderViewServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for GenderViewService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_gender_view: gapic_v1.method.wrap_method(
self.get_gender_view,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_gender_view(
self,
) -> typing.Callable[
[gender_view_service.GetGenderViewRequest], gender_view.GenderView
]:
raise NotImplementedError
__all__ = ("GenderViewServiceTransport",)
| [
"[email protected]"
] | |
0a86e75c70dcb21815b1a3f7ca3483db5fd939cc | 707c6a7f3b3213c8a996967ede905aeb18a8c6d9 | /solutions/Insert-Interval.py | d680a3144665d7fbb6a2e681c4be95c980267521 | [] | no_license | Ziyilan/Pyleetcode | d35b9c2ae6c890dfd42804264b139bfddb8db563 | 81a9d98607b4ce554507d16763ee82f7dad49edd | refs/heads/master | 2020-12-11T02:11:38.470153 | 2015-10-27T18:46:47 | 2015-10-27T18:46:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,486 | py | """
Author: Jing (https://github.com/gnijuohz)
Insert Interval: https://oj.leetcode.com/problems/insert-interval
Given a set of non-overlapping intervals, insert a new interval into the intervals (merge if necessary).
You may assume that the intervals were initially sorted according to their start times.
Example 1:
Given intervals [1,3],[6,9], insert and merge [2,5] in as [1,5],[6,9].
Example 2:
Given [1,2],[3,5],[6,7],[8,10],[12,16], insert and merge [4,9] in as [1,2],[3,10],[12,16].
This is because the new interval [4,9] overlaps with [3,5],[6,7],[8,10].
Tags
Array, Sort, Show Similar Problems, (H) Merge Intervals
"""
# Definition for an interval.
# class Interval:
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution:
# @param intervals, a list of Intervals
# @param newInterval, a Interval
# @return a list of Interval
def insert(self, intervals, newInterval):
intervals.append(newInterval)
return self.merge(intervals)
def merge(self, intervals):
if not intervals or len(intervals) == 1:
return intervals
intervals = sorted(intervals, key=operator.attrgetter('start'))
res = [intervals[0]]
for i in range(1, len(intervals)):
if intervals[i].start <= res[-1].end:
res[-1].end = max(res[-1].end, intervals[i].end)
else:
res.append(intervals[i])
return res | [
"[email protected]"
] | |
fe49918b93bc0175098d4277f76f2d191bfbce49 | 5a69cab2b5ed410f944b57f3ec586b9c624a735c | /lib/demo_test/multi_platform_demo | 0cd943b292114efcecb53e660e9883bb98850ba5 | [
"Apache-2.0"
] | permissive | T3kton/architect | 214a176dd5f9a9bc340d358d692e16a61f362ebe | 3368a66c0c1836eca12dbc7af97f01d5ba13984a | refs/heads/master | 2021-01-20T09:03:25.451300 | 2018-09-17T23:03:24 | 2018-09-17T23:03:24 | 90,217,916 | 0 | 2 | Apache-2.0 | 2018-09-17T23:03:24 | 2017-05-04T03:29:18 | Python | UTF-8 | Python | false | false | 2,211 | #!/usr/bin/env python3
import os
import django
os.environ.setdefault( 'DJANGO_SETTINGS_MODULE', 'architect.settings' )
django.setup()
from datetime import datetime, timezone, timedelta
from architect.Contractor.models import Complex, BluePrint
from architect.Plan.models import Plan, PlanComplex, PlanBluePrint, PlanTimeSeries
from architect.TimeSeries.models import CostTS, AvailabilityTS, ReliabilityTS, RawTimeSeries
print( 'Giving Blueprints their names...')
for blueprint in BluePrint.objects.filter( name__isnull=True ):
blueprint.name = blueprint.contractor_id
blueprint.full_clean()
blueprint.save()
try:
plan = Plan.objects.get( name='demo' )
except Plan.DoesNotExist:
print( 'Creating the Plan...' )
plan = Plan( name='demo', description='demo', enabled=True )
plan.script = """
cut_off: 0
demo: weighted( *INDEX*, @count, ( 1 / *COST* ) )
#demo-web: above_inclusive( demo, cut_off )
#demo-ssh: below( demo, cut_off )
"""
plan.config_values = {}
plan.max_inflight = 10
plan.last_change = datetime.now( timezone.utc ) - timedelta( days=1 )
plan.can_build = True
plan.can_destroy = True
plan.full_clean()
plan.save()
ts = RawTimeSeries( metric='data.count' )
ts.full_clean()
ts.save()
pts = PlanTimeSeries( plan=plan, timeseries=ts, script_name='count' )
pts.full_clean()
pts.save()
print( 'setting up blueprint link...' )
blueprint = BluePrint.objects.get( name='demo-web' )
pb = PlanBluePrint( plan=plan, blueprint=blueprint )
pb.full_clean()
pb.save()
blueprint = BluePrint.objects.get( name='demo-ssh' )
pb = PlanBluePrint( plan=plan, blueprint=blueprint )
pb.full_clean()
pb.save()
print( 'Giving Complexes their tsnames, and setting up buckets...')
for complex in Complex.objects.filter( tsname__isnull=True ):
complex.tsname = complex.contractor_id
complex.full_clean()
complex.save()
costts = CostTS( complex=complex )
costts.save()
availts = AvailabilityTS( complex=complex )
availts.save()
reliabts = ReliabilityTS( complex=complex )
reliabts.save()
pc = PlanComplex( plan=plan, complex=complex )
pc.cost = costts
pc.availability = availts
pc.reliability = reliabts
pc.full_clean()
pc.save()
| [
"[email protected]"
] | ||
f4a5841f5d31f26e0da2530d937bbf5ce64db363 | ac1bbabc7c1b3149711c416dd8b5f5969a0dbd04 | /Programming Fundamentals/objects_and_classes/class.py | 7289e5275a0ea25652981f2f7b0b49c310acc71b | [] | no_license | AssiaHristova/SoftUni-Software-Engineering | 9e904221e50cad5b6c7953c81bc8b3b23c1e8d24 | d4910098ed5aa19770d30a7d9cdf49f9aeaea165 | refs/heads/main | 2023-07-04T04:47:00.524677 | 2021-08-08T23:31:51 | 2021-08-08T23:31:51 | 324,847,727 | 1 | 0 | null | 2021-08-08T23:31:52 | 2020-12-27T20:58:01 | Python | UTF-8 | Python | false | false | 701 | py | class Class:
def __init__(self, name):
self.name = name
self.students = []
self.grades = []
__students_count = 22
def add_student(self, name, grade):
if len(self.students) < Class.__students_count:
self.students.append(name)
self.grades.append(grade)
def get_average_grade(self):
return sum(self.grades) / len(self.grades)
def __repr__(self):
return f"The students in {self.name}: {', '.join(self.students)}. Average grade: {Class.get_average_grade(self):.2f}"
a_class = Class("11B")
a_class.add_student("Peter", 4.80)
a_class.add_student("George", 6.00)
a_class.add_student("Amy", 3.50)
print(a_class)
| [
"[email protected]"
] | |
dda48464dce73f3af0af909f3571d348d3d0d84e | f8dd8d046100f1223713e047074f30c7ce5a59cd | /testing/epilogue/decorators.py | 35dbdffbffc9e1b88e69fb384d455179a4f387c3 | [] | no_license | dotslash227/98fitcortex | 57aed99270799eff68fdff62db0b8c1d9aabd4a2 | bd4002151e5def00c3dea1f5a1abfb06ba3e809a | refs/heads/master | 2022-12-17T00:51:20.302948 | 2019-02-27T13:54:22 | 2019-02-27T13:54:22 | 197,362,824 | 0 | 0 | null | 2022-12-08T00:02:42 | 2019-07-17T09:55:14 | HTML | UTF-8 | Python | false | false | 3,694 | py | import functools
import datetime
from django.db import models
def last_days(days = 6):
today = datetime.datetime.today().date()
while days >= 0:
val = today - datetime.timedelta(days = days)
days -= 1
yield val
def last_weeks(weeks = 6):
today = datetime.datetime.today().date()
current_year , current_week , current_day = today.isocalendar()
start_week = current_week
year = current_year
if start_week >= 6:
while weeks >= 0:
yield (year ,current_week)
current_week -= 1
weeks -= 1
else:
while weeks >= 0:
yield (year , current_week)
current_week -= 1
current_week = abs(52+current_week)%52
if current_week == 0:
current_week = 52
year -= 1
weeks -= 1
def add_today(f):
@functools.wraps(f)
def wrapper(*args , **kwargs):
kwargs['today'] = datetime.datetime.today().date()
return f(*args , **kwargs)
return wrapper
def add_empty_day_in_week(defaults , days_range = 6):
def decorator(f):
@functools.wraps(f)
def wrapper(*args , **kwargs):
vals = f(*args , **kwargs)
days = set(vals.values_list("date" , flat = True))
data = []
for e in last_days(days = days_range):
if e not in days:
d = {
"date" : e,
**defaults,
}
data.append(d)
return data + list(vals)
return wrapper
return decorator
def add_empty_weeks(defaults , sort = lambda x : (x['year'],x['week'])):
def decorator(f):
@functools.wraps(f)
def wrapper(*args , **kwargs):
weeks , data = f(*args , **kwargs)
for y,w in last_weeks():
if (y,w) not in weeks:
d = {
"week" : w,
"year" : y,
**defaults
}
data.append(d)
return sorted(data , key = sort)
return wrapper
return decorator
def sorter(key , reverse = False):
def decorator(f):
@functools.wraps(f)
def wrapper(*args , **kwargs):
vals = f(*args , **kwargs)
return sorted(vals , key = key , reverse = reverse)
return wrapper
return decorator
def scale_field(field,goal):
def decorator(fn):
@functools.wraps(fn)
def wrapper(*args , **kwargs):
returned_value = fn(*args , **kwargs)
field_values = (e.get(field) for e in returned_value)
scaling_factor = 100/(max(goal ,max(field_values)))
for e in returned_value:
e['plotting_value'] = e.get(field , 0) * scaling_factor
return returned_value
return wrapper
return decorator
def weekly_average(field):
def decorator(f):
@functools.wraps(f)
def wrapper(*args , **kwargs):
vals = f(*args , **kwargs)
weeks = set(vals.values_list("week" , flat = True) )
data = []
curr_week = datetime.datetime.now().isocalendar()[1]
for e in range(curr_week - 6 , curr_week +1):
if e not in weeks:
data.append({
"week" : e,
"avg" : 0
})
continue
avg = vals.filter(
week = e
).aggregate(
avg = models.Sum(field)
)
d = {
"week" : e,
"avg" : avg['avg']
}
data.append(d)
return data
return wrapper
return decorator
def monthly_average(field):
def decorator(f):
@functools.wraps(f)
def wrapper(self):
vals = f(self)
months = set(vals.values_list("month" , flat = True) )
data = []
for e in months:
avg = vals.filter(
month = e
).aggregate(
avg = models.Avg(field)
)
d = {
"month" : e,
"avg" : avg['avg']
}
data.append(d)
return data
return wrapper
return decorator
def map_transform_queryset(iterable , *fields):
def decorator(f):
@functools.wraps(f)
def mapper(*args , **kwargs):
l = map(lambda x : functools.partial(x , *fields) , iterable)
val = f(*args , **kwargs)
d = {}
for e in l:
d.update(**e(val))
return d
return mapper
return decorator
| [
"[email protected]"
] | |
3ac83d2ac2af4145c059505d5214c148e2fa8ab9 | f80ef3a3cf859b13e8af8433af549b6b1043bf6e | /pyobjc-framework-MailKit/PyObjCTest/test_memessagedecoder.py | 9ebaeafd6990e8c6d9d71d48d0c727eca4fb01ad | [
"MIT"
] | permissive | ronaldoussoren/pyobjc | 29dc9ca0af838a56105a9ddd62fb38ec415f0b86 | 77b98382e52818690449111cd2e23cd469b53cf5 | refs/heads/master | 2023-09-01T05:15:21.814504 | 2023-06-13T20:00:17 | 2023-06-13T20:00:17 | 243,933,900 | 439 | 49 | null | 2023-06-25T02:49:07 | 2020-02-29T08:43:12 | Python | UTF-8 | Python | false | false | 198 | py | from PyObjCTools.TestSupport import TestCase
import MailKit # noqa: F401
class TestMEMessageDecoder(TestCase):
def test_protocols(self):
self.assertProtocolExists("MEMessageDecoder")
| [
"[email protected]"
] | |
ee62c946bacf7cf765e57fe18224aea84ff72185 | 2fcf361eb89f1f01fe4d677d4772ddaba89b06ad | /hydrus/HydrusGlobals.py | 06ab47dbf5399492ca0bfda15b4892944f211c47 | [
"WTFPL"
] | permissive | matjojo/hydrus | 9f13f35e817bfe7e170ec7be22e18b64e393cb01 | 8f87206ea6ef242bc38235d7053bb33b5a785e68 | refs/heads/master | 2021-05-17T03:26:19.183503 | 2020-03-27T23:32:58 | 2020-03-27T23:32:58 | 250,597,596 | 0 | 0 | NOASSERTION | 2020-03-27T17:18:53 | 2020-03-27T17:18:52 | null | UTF-8 | Python | false | false | 1,239 | py | import threading
controller = None
client_controller = None
server_controller = None
test_controller = None
view_shutdown = False
model_shutdown = False
no_daemons = False
no_wal = False
no_db_temp_files = False
db_memory_journaling = False
db_synchronous_override = None
import_folders_running = False
export_folders_running = False
callto_report_mode = False
db_report_mode = False
db_profile_mode = False
file_report_mode = False
media_load_report_mode = False
gui_report_mode = False
shortcut_report_mode = False
subprocess_report_mode = False
subscription_report_mode = False
hover_window_report_mode = False
file_import_report_mode = False
phash_generation_report_mode = False
menu_profile_mode = False
network_report_mode = False
pubsub_report_mode = False
pubsub_profile_mode = False
ui_timer_profile_mode = False
daemon_report_mode = False
force_idle_mode = False
no_page_limit_mode = False
thumbnail_debug_mode = False
currently_uploading_pending = False
shutting_down_due_to_already_running = False
do_idle_shutdown_work = False
program_is_shutting_down = False
shutdown_complete = False
restart = False
emergency_exit = False
twisted_is_broke = False
dirty_object_lock = threading.Lock()
server_busy = threading.Lock()
| [
"[email protected]"
] | |
f6ced2b4805a2ac25e3a6f5f5bc67b175ac0c922 | 69d3680f881833a0a4906ad708eac11401bc03c6 | /python3/515. 在每个树行中找最大值.py | 7f9663db2eb82e0576ad697414cd72b43c7432df | [] | no_license | menghuu/YALeetcode | 21df4b5ea6cb0a249263b0ce2df37e7580477ddd | 1959a884bb1cc9f2f1acb1ba6f413498ea0d1aca | refs/heads/master | 2023-08-18T03:55:41.470428 | 2021-09-11T12:39:02 | 2021-09-11T12:39:02 | 269,104,152 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 882 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 m <[email protected]>
#
# Distributed under terms of the MIT license.
"""
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def largestValues(self, root: TreeNode) -> List[int]:
if not root:
return []
ans = []
level = [root]
while level:
l = len(level)
m = float('-inf')
for i in range(l):
root = level[i]
m = max(root.val, m)
if root.left:
level.append(root.left)
if root.right:
level.append(root.right)
level = level[l:]
ans.append(m)
return ans
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.