blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dda125c8083666e799a4bccbfac1e27a51202a18 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/ec_13284-2532/sdB_EC_13284-2532_lc.py | dfa44dd24cbfe5cc7255aa0893f9c5a3ba440b9b | []
| no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[202.803875,-25.791181], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_EC_13284-2532 /sdB_EC_13284-2532_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
d9bdb178ecc13cd0d02f628d51c3fc104d950945 | 0ddcfcbfc3faa81c79e320c34c35a972dab86498 | /puzzles/power_of_three.py | 0c90784597ced25c72515a818f2ab265938bf1d4 | []
| no_license | IvanWoo/coding-interview-questions | 3311da45895ac4f3c394b22530079c79a9215a1c | 1312305b199b65a11804a000432ebe28d1fba87e | refs/heads/master | 2023-08-09T19:46:28.278111 | 2023-06-21T01:47:07 | 2023-06-21T01:47:07 | 135,307,912 | 0 | 0 | null | 2023-07-20T12:14:38 | 2018-05-29T14:24:43 | Python | UTF-8 | Python | false | false | 862 | py | # https://leetcode.com/problems/power-of-three/
"""
Given an integer n, return true if it is a power of three. Otherwise, return false.
An integer n is a power of three, if there exists an integer x such that n == 3x.
Example 1:
Input: n = 27
Output: true
Example 2:
Input: n = 0
Output: false
Example 3:
Input: n = 9
Output: true
Constraints:
-231 <= n <= 231 - 1
Follow up: Could you solve it without loops/recursion?
"""
from math import log
def is_power_of_three(n: int) -> bool:
if n <= 0:
return False
val = round(log(n, 3))
return 3**val == n
def is_power_of_three(n: int) -> bool:
def helper(n: int):
if n <= 0:
return False
if n == 1:
return True
div, residual = divmod(n, 3)
if residual:
return False
return helper(div)
return helper(n)
| [
"[email protected]"
]
| |
799ae55b2b7a4557348b168f0a3fc74d923f5fd4 | 2cd0a84aefb8a7141d1c8da99845a8ada0cc009c | /tensorflow/python/ops/rnn_cell.py | 9aa2314e5e65b02c0d4f7ee1661b77200ea50ef1 | [
"Apache-2.0"
]
| permissive | hholst80/tensorflow-old | d466cee96eac717524ab8e4ee85275ce28bb5d68 | 79df325975402e03df89747947ff5b7f18407c52 | refs/heads/master | 2022-12-20T22:07:40.427519 | 2016-05-13T09:57:24 | 2016-05-13T09:57:24 | 58,914,336 | 1 | 1 | Apache-2.0 | 2022-12-09T21:52:14 | 2016-05-16T08:00:04 | C++ | UTF-8 | Python | false | false | 26,838 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for constructing RNN Cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops.math_ops import sigmoid
from tensorflow.python.ops.math_ops import tanh
class RNNCell(object):
"""Abstract object representing an RNN cell.
An RNN cell, in the most abstract setting, is anything that has
a state -- a vector of floats of size self.state_size -- and performs some
operation that takes inputs of size self.input_size. This operation
results in an output of size self.output_size and a new state.
This module provides a number of basic commonly used RNN cells, such as
LSTM (Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number
of operators that allow add dropouts, projections, or embeddings for inputs.
Constructing multi-layer cells is supported by a super-class, MultiRNNCell,
defined later. Every RNNCell must have the properties below and and
implement __call__ with the following signature.
"""
def __call__(self, inputs, state, scope=None):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: 2D Tensor with shape [batch_size x self.input_size].
state: 2D Tensor with shape [batch_size x self.state_size].
scope: VariableScope for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A 2D Tensor with shape [batch_size x self.output_size]
- New state: A 2D Tensor with shape [batch_size x self.state_size].
"""
raise NotImplementedError("Abstract method")
@property
def input_size(self):
"""Integer: size of inputs accepted by this cell."""
raise NotImplementedError("Abstract method")
@property
def output_size(self):
"""Integer: size of outputs produced by this cell."""
raise NotImplementedError("Abstract method")
@property
def state_size(self):
"""Integer: size of state used by this cell."""
raise NotImplementedError("Abstract method")
def zero_state(self, batch_size, dtype):
"""Return state tensor (shape [batch_size x state_size]) filled with 0.
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
A 2D Tensor of shape [batch_size x state_size] filled with zeros.
"""
zeros = array_ops.zeros(
array_ops.pack([batch_size, self.state_size]), dtype=dtype)
zeros.set_shape([None, self.state_size])
return zeros
class BasicRNNCell(RNNCell):
"""The most basic RNN cell."""
def __init__(self, num_units, input_size=None):
self._num_units = num_units
self._input_size = num_units if input_size is None else input_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._num_units
@property
def state_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Most basic RNN: output = new_state = tanh(W * input + U * state + B)."""
with vs.variable_scope(scope or type(self).__name__): # "BasicRNNCell"
output = tanh(linear([inputs, state], self._num_units, True))
return output, output
class GRUCell(RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078)."""
def __init__(self, num_units, input_size=None):
self._num_units = num_units
self._input_size = num_units if input_size is None else input_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._num_units
@property
def state_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) with nunits cells."""
with vs.variable_scope(scope or type(self).__name__): # "GRUCell"
with vs.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
r, u = array_ops.split(1, 2, linear([inputs, state],
2 * self._num_units, True, 1.0))
r, u = sigmoid(r), sigmoid(u)
with vs.variable_scope("Candidate"):
c = tanh(linear([inputs, r * state], self._num_units, True))
new_h = u * state + (1 - u) * c
return new_h, new_h
class BasicLSTMCell(RNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
For advanced models, please use the full LSTMCell that follows.
"""
def __init__(self, num_units, forget_bias=1.0, input_size=None):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
input_size: int, The dimensionality of the inputs into the LSTM cell,
by default equal to num_units.
"""
self._num_units = num_units
self._input_size = num_units if input_size is None else input_size
self._forget_bias = forget_bias
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._num_units
@property
def state_size(self):
return 2 * self._num_units
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell (LSTM)."""
with vs.variable_scope(scope or type(self).__name__): # "BasicLSTMCell"
# Parameters of gates are concatenated into one multiply for efficiency.
c, h = array_ops.split(1, 2, state)
concat = linear([inputs, h], 4 * self._num_units, True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(1, 4, concat)
new_c = c * sigmoid(f + self._forget_bias) + sigmoid(i) * tanh(j)
new_h = tanh(new_c) * sigmoid(o)
return new_h, array_ops.concat(1, [new_c, new_h])
def _get_concat_variable(name, shape, dtype, num_shards):
"""Get a sharded variable concatenated into one tensor."""
sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)
if len(sharded_variable) == 1:
return sharded_variable[0]
concat_name = name + "/concat"
concat_full_name = vs.get_variable_scope().name + "/" + concat_name + ":0"
for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES):
if value.name == concat_full_name:
return value
concat_variable = array_ops.concat(0, sharded_variable, name=concat_name)
ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES,
concat_variable)
return concat_variable
def _get_sharded_variable(name, shape, dtype, num_shards):
"""Get a list of sharded variables with the given dtype."""
if num_shards > shape[0]:
raise ValueError("Too many shards: shape=%s, num_shards=%d" %
(shape, num_shards))
unit_shard_size = int(math.floor(shape[0] / num_shards))
remaining_rows = shape[0] - unit_shard_size * num_shards
shards = []
for i in range(num_shards):
current_size = unit_shard_size
if i < remaining_rows:
current_size += 1
shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:],
dtype=dtype))
return shards
class LSTMCell(RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
This implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
It uses peep-hole connections, optional cell clipping, and an optional
projection layer.
"""
def __init__(self, num_units, input_size=None,
use_peepholes=False, cell_clip=None,
initializer=None, num_proj=None,
num_unit_shards=1, num_proj_shards=1, forget_bias=1.0):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
input_size: int, The dimensionality of the inputs into the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
num_unit_shards: How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
num_proj_shards: How to split the projection matrix. If >1, the
projection matrix is stored across num_proj_shards.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of the training.
"""
self._num_units = num_units
self._input_size = input_size
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_proj = num_proj
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
if num_proj:
self._state_size = num_units + num_proj
self._output_size = num_proj
else:
self._state_size = 2 * num_units
self._output_size = num_units
@property
def input_size(self):
return self._num_units if self._input_size is None else self._input_size
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def __call__(self, inputs, state, scope=None):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: state Tensor, 2D, batch x state_size.
scope: VariableScope for the created subgraph; defaults to "LSTMCell".
Returns:
A tuple containing:
- A 2D, batch x output_dim, Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- A 2D, batch x state_size, Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
num_proj = self._num_units if self._num_proj is None else self._num_proj
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
dtype = inputs.dtype
actual_input_size = inputs.get_shape().as_list()[1]
if self._input_size and self._input_size != actual_input_size:
raise ValueError("Actual input size not same as specified: %d vs %d." %
(actual_input_size, self._input_size))
with vs.variable_scope(scope or type(self).__name__,
initializer=self._initializer): # "LSTMCell"
concat_w = _get_concat_variable(
"W", [actual_input_size + num_proj, 4 * self._num_units],
dtype, self._num_unit_shards)
b = vs.get_variable(
"B", shape=[4 * self._num_units],
initializer=array_ops.zeros_initializer, dtype=dtype)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat(1, [inputs, m_prev])
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
i, j, f, o = array_ops.split(1, 4, lstm_matrix)
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"W_I_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
c = (sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * tanh(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * tanh(j))
if self._cell_clip is not None:
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * tanh(c)
else:
m = sigmoid(o) * tanh(c)
if self._num_proj is not None:
concat_w_proj = _get_concat_variable(
"W_P", [self._num_units, self._num_proj],
dtype, self._num_proj_shards)
m = math_ops.matmul(m, concat_w_proj)
return m, array_ops.concat(1, [c, m])
class OutputProjectionWrapper(RNNCell):
"""Operator adding an output projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your outputs in time,
do the projection on this batch-concatenated sequence, then split it
if needed or directly feed into a softmax.
"""
def __init__(self, cell, output_size):
"""Create a cell with output projection.
Args:
cell: an RNNCell, a projection to output_size is added to it.
output_size: integer, the size of the output after projection.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if output_size is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if output_size < 1:
raise ValueError("Parameter output_size must be > 0: %d." % output_size)
self._cell = cell
self._output_size = output_size
@property
def input_size(self):
return self._cell.input_size
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, inputs, state, scope=None):
"""Run the cell and output projection on inputs, starting from state."""
output, res_state = self._cell(inputs, state)
# Default scope: "OutputProjectionWrapper"
with vs.variable_scope(scope or type(self).__name__):
projected = linear(output, self._output_size, True)
return projected, res_state
class InputProjectionWrapper(RNNCell):
"""Operator adding an input projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the projection on this batch-concatenated sequence, then split it.
"""
def __init__(self, cell, input_size):
"""Create a cell with input projection.
Args:
cell: an RNNCell, a projection of inputs is added before it.
input_size: integer, the size of the inputs before projection.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if input_size is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if input_size < 1:
raise ValueError("Parameter input_size must be > 0: %d." % input_size)
self._cell = cell
self._input_size = input_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._cell.output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, inputs, state, scope=None):
"""Run the input projection and then the cell."""
# Default scope: "InputProjectionWrapper"
with vs.variable_scope(scope or type(self).__name__):
projected = linear(inputs, self._cell.input_size, True)
return self._cell(projected, state)
class DropoutWrapper(RNNCell):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0,
seed=None):
"""Create a cell with added input and/or output dropout.
Dropout is never used on the state.
Args:
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is float and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is float and 1, no output dropout will be added.
seed: (optional) integer, the randomness seed.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if keep_prob is not between 0 and 1.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not a RNNCell.")
if (isinstance(input_keep_prob, float) and
not (input_keep_prob >= 0.0 and input_keep_prob <= 1.0)):
raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d"
% input_keep_prob)
if (isinstance(output_keep_prob, float) and
not (output_keep_prob >= 0.0 and output_keep_prob <= 1.0)):
raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d"
% output_keep_prob)
self._cell = cell
self._input_keep_prob = input_keep_prob
self._output_keep_prob = output_keep_prob
self._seed = seed
@property
def input_size(self):
return self._cell.input_size
@property
def output_size(self):
return self._cell.output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, inputs, state, scope=None):
"""Run the cell with the declared dropouts."""
if (not isinstance(self._input_keep_prob, float) or
self._input_keep_prob < 1):
inputs = nn_ops.dropout(inputs, self._input_keep_prob, seed=self._seed)
output, new_state = self._cell(inputs, state)
if (not isinstance(self._output_keep_prob, float) or
self._output_keep_prob < 1):
output = nn_ops.dropout(output, self._output_keep_prob, seed=self._seed)
return output, new_state
class EmbeddingWrapper(RNNCell):
"""Operator adding input embedding to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the embedding on this batch-concatenated sequence, then split it and
feed into your RNN.
"""
def __init__(self, cell, embedding_classes, embedding_size, initializer=None):
"""Create a cell with an added input embedding.
Args:
cell: an RNNCell, an embedding will be put before its inputs.
embedding_classes: integer, how many symbols will be embedded.
embedding_size: integer, the size of the vectors we embed into.
initializer: an initializer to use when creating the embedding;
if None, the initializer from variable scope or a default one is used.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if embedding_classes is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if embedding_classes <= 0 or embedding_size <= 0:
raise ValueError("Both embedding_classes and embedding_size must be > 0: "
"%d, %d." % (embedding_classes, embedding_size))
self._cell = cell
self._embedding_classes = embedding_classes
self._embedding_size = embedding_size
self._initializer = initializer
@property
def input_size(self):
return 1
@property
def output_size(self):
return self._cell.output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, inputs, state, scope=None):
"""Run the cell on embedded inputs."""
with vs.variable_scope(scope or type(self).__name__): # "EmbeddingWrapper"
with ops.device("/cpu:0"):
if self._initializer:
initializer = self._initializer
elif vs.get_variable_scope().initializer:
initializer = vs.get_variable_scope().initializer
else:
# Default initializer for embeddings should have variance=1.
sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1.
initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)
embedding = vs.get_variable("embedding", [self._embedding_classes,
self._embedding_size],
initializer=initializer)
embedded = embedding_ops.embedding_lookup(
embedding, array_ops.reshape(inputs, [-1]))
return self._cell(embedded, state)
class MultiRNNCell(RNNCell):
"""RNN cell composed sequentially of multiple simple cells."""
def __init__(self, cells):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cells: list of RNNCells that will be composed in this order.
Raises:
ValueError: if cells is empty (not allowed) or if their sizes don't match.
"""
if not cells:
raise ValueError("Must specify at least one cell for MultiRNNCell.")
for i in xrange(len(cells) - 1):
if cells[i + 1].input_size != cells[i].output_size:
raise ValueError("In MultiRNNCell, the input size of each next"
" cell must match the output size of the previous one."
" Mismatched output size in cell %d." % i)
self._cells = cells
@property
def input_size(self):
return self._cells[0].input_size
@property
def output_size(self):
return self._cells[-1].output_size
@property
def state_size(self):
return sum([cell.state_size for cell in self._cells])
def __call__(self, inputs, state, scope=None):
"""Run this multi-layer cell on inputs, starting from state."""
with vs.variable_scope(scope or type(self).__name__): # "MultiRNNCell"
cur_state_pos = 0
cur_inp = inputs
new_states = []
for i, cell in enumerate(self._cells):
with vs.variable_scope("Cell%d" % i):
cur_state = array_ops.slice(
state, [0, cur_state_pos], [-1, cell.state_size])
cur_state_pos += cell.state_size
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
return cur_inp, array_ops.concat(1, new_states)
class SlimRNNCell(RNNCell):
"""A simple wrapper for slim.rnn_cells."""
def __init__(self, cell_fn):
"""Create a SlimRNNCell from a cell_fn.
Args:
cell_fn: a function which takes (inputs, state, scope) and produces the
outputs and the new_state. Additionally when called with inputs=None and
state=None it should return (initial_outputs, initial_state).
Raises:
TypeError: if cell_fn is not callable
ValueError: if cell_fn cannot produce a valid initial state.
"""
if not callable(cell_fn):
raise TypeError("cell_fn %s needs to be callable", cell_fn)
self._cell_fn = cell_fn
self._cell_name = cell_fn.func.__name__
_, init_state = self._cell_fn(None, None)
state_shape = init_state.get_shape()
self._state_size = state_shape.with_rank(2)[1].value
if self._state_size is None:
raise ValueError("Initial state created by %s has invalid shape %s",
self._cell_name, state_shape)
@property
def state_size(self):
return self._state_size
def __call__(self, inputs, state, scope=None):
scope = scope or self._cell_name
output, state = self._cell_fn(inputs, state, scope=scope)
return output, state
def linear(args, output_size, bias, bias_start=0.0, scope=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
scope: VariableScope for the created subgraph; defaults to "Linear".
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (isinstance(args, (list, tuple)) and not args):
raise ValueError("`args` must be specified")
if not isinstance(args, (list, tuple)):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError("Linear is expecting 2D arguments: %s" % str(shapes))
if not shape[1]:
raise ValueError("Linear expects shape[1] of arguments: %s" % str(shapes))
else:
total_arg_size += shape[1]
# Now the computation.
with vs.variable_scope(scope or "Linear"):
matrix = vs.get_variable("Matrix", [total_arg_size, output_size])
if len(args) == 1:
res = math_ops.matmul(args[0], matrix)
else:
res = math_ops.matmul(array_ops.concat(1, args), matrix)
if not bias:
return res
bias_term = vs.get_variable(
"Bias", [output_size],
initializer=init_ops.constant_initializer(bias_start))
return res + bias_term
| [
"[email protected]"
]
| |
bb9b8448866a42aee485331c76d2d094853127b4 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_231/ch22_2020_06_20_19_00_14_584797.py | bd79702d66dbec13e717be885a2a86143f73ec2b | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | c= int(input('quantos cigarros vc fuma por dia?'))
a= int(input('ha quantos anos?'))
t= ((10*c*a*365)/1440)
print(t)
| [
"[email protected]"
]
| |
566fdde94b7a27a1ac308ac870b09e58209d60fc | 2827d7a837eb29c3cb07793ab6d3d5a753e18669 | /alipay/aop/api/request/AlipayMarketingCampaignDiscountBudgetAppendRequest.py | 3e1af80821fc15b93a0a4328c878c0180e7b136d | [
"Apache-2.0"
]
| permissive | shaobenbin/alipay-sdk-python | 22e809b8f5096bec57d2bb25414f64bdc87fa8b3 | 5232ad74dff2e8a6e0e7646ab3318feefa07a37d | refs/heads/master | 2020-03-21T04:51:39.935692 | 2018-06-21T07:03:31 | 2018-06-21T07:03:31 | 138,131,022 | 0 | 0 | null | 2018-06-21T06:50:24 | 2018-06-21T06:50:24 | null | UTF-8 | Python | false | false | 4,058 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayMarketingCampaignDiscountBudgetAppendModel import AlipayMarketingCampaignDiscountBudgetAppendModel
class AlipayMarketingCampaignDiscountBudgetAppendRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayMarketingCampaignDiscountBudgetAppendModel):
self._biz_content = value
else:
self._biz_content = AlipayMarketingCampaignDiscountBudgetAppendModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._notify_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.marketing.campaign.discount.budget.append'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"[email protected]"
]
| |
22eb63305890280ff00427e395dc7ee12f3f314c | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/4330/codes/1594_1800.py | 0302eb5caf63f16066aa6406b53455d42458aa87 | []
| no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | a=int(input("Insira o valor de A"))
b=int(input("Insira o valor de B"))
c=int(input("Insira o valor de C"))
x = (a**2)+(b**2)+(c**2)
y = a+b+c
t=x/y
print(round(t,7))
| [
"[email protected]"
]
| |
c9580567614da5bed9f9c744137f3d463eb77515 | dac7d0abff54dbeb9e6587f17866a34b5e7f3948 | /Cobbity/compare.py | ec3b6cf07d175832a7fb04e914de1c0c894bf84c | []
| no_license | KipCrossing/EMI_Field | 5665aba5ff5fbf4a4d42fc9b3efc9aa3b3f51eea | e52142648388a25d26f682986c586cd1827e31e0 | refs/heads/master | 2020-05-22T12:37:42.892290 | 2019-09-12T01:27:24 | 2019-09-12T01:27:24 | 186,342,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,487 | py | import pandas as pd
df_OpenEM = pd.read_csv("~/Cobbity/Output/Smooth_C2_OpenEM.xyz", header=None, delimiter=r"\s+")
df_DUALEM = pd.read_csv("~/Cobbity/Output/Smooth_C2_DUALEM.xyz", header=None, delimiter=r"\s+")
print(df_OpenEM.head())
print(df_DUALEM.head())
New_OpenEM_readings = []
New_OpenEM_lon = []
New_OpenEM_lat = []
sum = 0
for read in df_OpenEM[2].tolist():
if read > -9999:
New_OpenEM_readings.append(read)
New_OpenEM_lon.append(df_OpenEM[0].tolist()[sum])
New_OpenEM_lat.append(df_OpenEM[1].tolist()[sum])
sum += 1
print(len(New_OpenEM_lon),len(New_OpenEM_lat),len(New_OpenEM_readings))
New_DUALEM_readings = []
New_DUALEM_lon = []
New_DUALEM_lat = []
sum = 0
for read in df_DUALEM[2].tolist():
if read > -9999:
New_DUALEM_readings.append(read)
New_DUALEM_lon.append(df_DUALEM[0].tolist()[sum])
New_DUALEM_lat.append(df_DUALEM[1].tolist()[sum])
sum += 1
print(len(New_DUALEM_lon),len(New_DUALEM_lat),len(New_DUALEM_readings))
data = {"DUALEM": New_DUALEM_readings,"OpenEM": New_OpenEM_readings,"X1":New_DUALEM_lon,"X2":New_OpenEM_lon,"Y1":New_DUALEM_lat,"Y2":New_OpenEM_lat}
df_out = pd.DataFrame(data, columns=["DUALEM","OpenEM","X1","X2","Y1","Y2"])
df_out.to_csv("~/Cobbity/Output/compare_Smooth_DUALEM_OpenEm.csv")
count = 0
for i in New_DUALEM_lon:
if New_DUALEM_lon[count] == New_OpenEM_lon[count] and New_DUALEM_lat[count] == New_OpenEM_lat[count]:
print(count)
count += 1
| [
"[email protected]"
]
| |
d839e4467adb97c603f1bbf720207d83942d87d2 | 46267e38d63bb487ccef4612593676412ea956d7 | /astraeus/core.py | 268d58bf9ad346c038f6b1a1989ccc7a00c0339b | [
"MIT"
]
| permissive | eos-sns/astraeus | 17f63fc02e27b8b40b8470fb8202b9bb4b50e3d6 | bbbe820bdc02d7c0209854b80b1f952bfaaf984a | refs/heads/master | 2020-04-25T12:56:35.666259 | 2019-09-18T12:15:04 | 2019-09-18T12:15:04 | 172,793,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,379 | py | # -*- coding: utf-8 -*-
import abc
import datetime
import uuid
from astraeus.models.memcache import MemcacheClientBuilder, MemcacheFacade
from astraeus.models.mongodb import MongoDBBuilder
class Hasher:
""" Something that hashes something """
@abc.abstractmethod
def hash_key(self, key):
return 0
class UUIDHasher(Hasher):
""" Hashing based on UUID4 """
def hash_key(self, key=None):
hashed = str(uuid.uuid4())
hashed = hashed.replace('-', '')
return hashed
class Astraeus(object):
""" Saves in-memory data about stuff """
MEMCACHE_PORT = 11211 # default memcache port
EXPIRE_SECONDS = ((60 * 60) * 24) * 14 # 14 days
def __init__(self,
port=MEMCACHE_PORT,
expire_seconds=EXPIRE_SECONDS,
hash_function=UUIDHasher().hash_key):
"""
:param port: port where memcache runs
:param expire_seconds: values in memcache will be null after that
:param hash_function: function to compute hash of key
"""
client = MemcacheClientBuilder() \
.with_server('localhost') \
.with_port(port) \
.build()
self.memcache = MemcacheFacade(client, expire_seconds)
self.hasher = hash_function # function to hash stuff
def _get_key(self, val):
return self.hasher(str(val)) # todo better jsonify ?
def save(self, val):
"""
:param val: Saves val in memcache database
:return: key of memcache
"""
assert not (val is None)
key = self._get_key(val)
if self.memcache.set(key, val):
return key
return None
def retrieve(self, key):
assert not (key is None)
return self.memcache.get(key)
class MongoAstraeus(Astraeus):
""" Normal Astraeus, but saves data also in MongoDB for reduncancy
reasons """
MONGO_DB = 'astraeus' # todo move to config
def _get_parent(self):
return super(self.__class__, self)
def __init__(self,
mongo_collection,
mongo_db=MONGO_DB,
port=Astraeus.MEMCACHE_PORT,
expire_seconds=Astraeus.EXPIRE_SECONDS,
hash_function=UUIDHasher().hash_key):
super(self.__class__, self).__init__(port, expire_seconds, hash_function)
mongo = MongoDBBuilder() \
.with_db(mongo_db) \
.build()
self.mongo = mongo[mongo_collection] # specify collection
def _try_save_to_memcache(self, val):
try:
return self._get_parent().save(val)
except:
print('Cannot save {} to memcache'.format(val))
return None
def _try_save_to_mongodb(self, memcache_key, val):
if not memcache_key:
memcache_key = self._get_key(val)
try:
item = self.build_mongo_item(memcache_key, val)
self.mongo.insert_one(item)
return memcache_key
except:
print('Cannot save {} to mongodb'.format(val))
return None
def save(self, val):
key = self._try_save_to_memcache(val) # first save to memcache ...
key = self._try_save_to_mongodb(key, val) # ... then in mongo
return key
def _try_retrieve_from_memcache(self, key):
try:
return self._get_parent().retrieve(key)
except:
print('Cannot retrieve {} from memcache'.format(key))
return None
def _try_retrieve_from_mongodb(self, key):
try:
results = self.mongo.find({'key': key})
if results:
most_recent = max(results, key=lambda x: x['time']) # sort by date
return most_recent['val'] # DO NOT check expiration: this is a redundant database
except:
print('Cannot retrieve {} from mongodb'.format(key))
return None
def retrieve(self, key):
val = self._try_retrieve_from_memcache(key) # first try with memcache ...
if not val:
return self._try_retrieve_from_mongodb(key) # ... then with mongo
return val
@staticmethod
def build_mongo_item(key, val):
time_now = datetime.datetime.now()
return {
'key': key,
'val': val,
'time': time_now
}
| [
"[email protected]"
]
| |
19d14b124965f2f461568792ad34bb6bbd4dc10d | 5fe72bb13baf3649058ebe11aa86ad4fc56c69ed | /hard-gists/367ff95d4d3d3770fa7b/snippet.py | 6cd51cef4fd2bff70541bd8d5ea0c23646114dd5 | [
"Apache-2.0"
]
| permissive | dockerizeme/dockerizeme | 8825fed45ff0ce8fb1dbe34959237e8048900a29 | 408f3fa3d36542d8fc1236ba1cac804de6f14b0c | refs/heads/master | 2022-12-10T09:30:51.029846 | 2020-09-02T13:34:49 | 2020-09-02T13:34:49 | 144,501,661 | 24 | 20 | Apache-2.0 | 2022-11-21T12:34:29 | 2018-08-12T21:21:04 | Python | UTF-8 | Python | false | false | 1,003 | py | from collections import defaultdict
from django.db.models.signals import *
class DisableSignals(object):
def __init__(self, disabled_signals=None):
self.stashed_signals = defaultdict(list)
self.disabled_signals = disabled_signals or [
pre_init, post_init,
pre_save, post_save,
pre_delete, post_delete,
pre_migrate, post_migrate,
]
def __enter__(self):
for signal in self.disabled_signals:
self.disconnect(signal)
def __exit__(self, exc_type, exc_val, exc_tb):
for signal in self.stashed_signals.keys():
self.reconnect(signal)
def disconnect(self, signal):
self.stashed_signals[signal] = signal.receivers
signal.receivers = []
def reconnect(self, signal):
signal.receivers = self.stashed_signals.get(signal, [])
del self.stashed_signals[signal]
# Example usage:
# with DisableSignals():
# user.save() # will not call any signals
| [
"[email protected]"
]
| |
f931f93487dee0b1d116ef38d52fa5222198b620 | b6c09a1b87074d6e58884211ce24df8ec354da5c | /345. 反转字符串中的元音字母.py | f259c3af854c1e4b250ef47b593bf61f4f86067c | []
| no_license | fengxiaolong886/leetcode | a0ee12d67c4a10fb12d6ca4369762ab5b090cab1 | 4c0897bc06a297fa9225a0c46d8ec9217d876db8 | refs/heads/master | 2023-03-18T22:16:29.212016 | 2021-03-07T03:48:16 | 2021-03-07T03:48:16 | 339,604,263 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | """
编写一个函数,以字符串作为输入,反转该字符串中的元音字母。
"""
def reverseVowels(s):
query = "aeiouAEIOU"
vow = []
idx = []
for i, j in enumerate(s):
if j in query:
vow.append(j)
idx.append(i)
vow = vow[::-1]
s = list(s)
for i, j in zip(idx, vow):
s[i] = j
return "".join(s)
print(reverseVowels("hello"))
print(reverseVowels("leetcode")) | [
"[email protected]"
]
| |
e063920acaa40258008dba8ae5ed79c9bd2b66b7 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/ENTERASYS-VLAN-AUTHORIZATION-MIB.py | c846ff1eb1ce291ffe2d355f4fb5cea046a7128a | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 12,071 | py | #
# PySNMP MIB module ENTERASYS-VLAN-AUTHORIZATION-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ENTERASYS-VLAN-AUTHORIZATION-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:04:50 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint")
dot1dBasePortEntry, = mibBuilder.importSymbols("BRIDGE-MIB", "dot1dBasePortEntry")
etsysModules, = mibBuilder.importSymbols("ENTERASYS-MIB-NAMES", "etsysModules")
EnabledStatus, = mibBuilder.importSymbols("P-BRIDGE-MIB", "EnabledStatus")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
MibIdentifier, Bits, NotificationType, IpAddress, TimeTicks, Counter64, iso, Integer32, Counter32, ObjectIdentity, Unsigned32, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Bits", "NotificationType", "IpAddress", "TimeTicks", "Counter64", "iso", "Integer32", "Counter32", "ObjectIdentity", "Unsigned32", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
etsysVlanAuthorizationMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48))
etsysVlanAuthorizationMIB.setRevisions(('2004-06-02 19:22',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: etsysVlanAuthorizationMIB.setRevisionsDescriptions(('The initial version of this MIB module',))
if mibBuilder.loadTexts: etsysVlanAuthorizationMIB.setLastUpdated('200406021922Z')
if mibBuilder.loadTexts: etsysVlanAuthorizationMIB.setOrganization('Enterasys Networks, Inc')
if mibBuilder.loadTexts: etsysVlanAuthorizationMIB.setContactInfo('Postal: Enterasys Networks, Inc. 50 Minuteman Rd. Andover, MA 01810-1008 USA Phone: +1 978 684 1000 E-mail: [email protected] WWW: http://www.enterasys.com')
if mibBuilder.loadTexts: etsysVlanAuthorizationMIB.setDescription("This MIB module defines a portion of the SNMP MIB under Enterasys Networks' enterprise OID pertaining to proprietary extensions to the IETF Q-BRIDGE-MIB, as specified in RFC2674, pertaining to VLAN authorization, as specified in RFC3580. Specifically, the enabling and disabling of support for the VLAN Tunnel-Type attribute returned from a RADIUS authentication, and how that attribute is applied to the port which initiated the authentication.")
class VlanAuthEgressStatus(TextualConvention, Integer32):
description = 'The possible egress configurations which may be applied in response to a successful authentication. none(1) No egress manipulation will be made. tagged(2) The authenticating port will be added to the current egress for the VLAN-ID returned. untagged(3) The authenticating port will be added to the current untagged egress for the VLAN-ID returned. dynamic(4) The authenticating port will use information returned in the authentication response to modify the current egress lists.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("none", 1), ("tagged", 2), ("untagged", 3), ("dynamic", 4))
etsysVlanAuthorizationObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1))
etsysVlanAuthorizationSystem = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 1))
etsysVlanAuthorizationPorts = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 2))
etsysVlanAuthorizationEnable = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 1, 1), EnabledStatus().clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysVlanAuthorizationEnable.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationEnable.setDescription('The enable/disable state for the VLAN authorization feature. When disabled, no modifications to the VLAN attributes related to packet switching should be enforced.')
etsysVlanAuthorizationTable = MibTable((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 2, 1), )
if mibBuilder.loadTexts: etsysVlanAuthorizationTable.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationTable.setDescription('Extensions to the table that contains information about every port that is associated with this transparent bridge.')
etsysVlanAuthorizationEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 2, 1, 1), )
dot1dBasePortEntry.registerAugmentions(("ENTERASYS-VLAN-AUTHORIZATION-MIB", "etsysVlanAuthorizationEntry"))
etsysVlanAuthorizationEntry.setIndexNames(*dot1dBasePortEntry.getIndexNames())
if mibBuilder.loadTexts: etsysVlanAuthorizationEntry.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationEntry.setDescription('A list of extensions that support the management of proprietary features for each port of a transparent bridge. This is indexed by dot1dBasePort.')
etsysVlanAuthorizationStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 2, 1, 1, 1), EnabledStatus().clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysVlanAuthorizationStatus.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationStatus.setDescription('The enabled/disabled status for the application of VLAN authorization on this port, if disabled, the information returned in the VLAN-Tunnel-Type from the authentication will not be applied to the port (although it should be represented in this table). If enabled, those results will be applied to the port.')
etsysVlanAuthorizationAdminEgress = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 2, 1, 1, 2), VlanAuthEgressStatus().clone('untagged')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysVlanAuthorizationAdminEgress.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationAdminEgress.setDescription('Controls the modification of the current vlan egress list (of the vlan returned in the VLAN-Tunnel-Type, and reported by etsysVlanAuthorizationVlanID) upon successful authentication in the following manner: none(1) No egress manipulation will be made. tagged(2) The authenticating port will be added to the current egress for the VLAN-ID returned. untagged(3) The authenticating port will be added to the current untagged egress for the VLAN-ID returned. dynamic(4) The authenticating port will use information returned in the authentication response to modify the current egress lists. This value is supported only if the device supports a mechanism through which the egress status may be returned through the RADIUS response. Should etsysVlanAuthorizationEnable become disabled, etsysVlanAuthorizationStatus become disabled for a port, or should etsysVlanAuthorizationVlanID become 0 or 4095, all effect on the port egress MUST be removed.')
etsysVlanAuthorizationOperEgress = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 2, 1, 1, 3), VlanAuthEgressStatus().clone('none')).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysVlanAuthorizationOperEgress.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationOperEgress.setDescription('Reports the current state of modification to the current vlan egress list (of the vlan returned in the VLAN-Tunnel-Type) upon successful authentication, if etsysVlanAuthorizationStatus is enabled, in the following manner: none(1) No egress manipulation will be made. tagged(2) The authenticating port will be added to the current egress for the VLAN-ID returned. untagged(3) The authenticating port will be added to the current untagged egress for the VLAN-ID returned. The purpose of this leaf is to report, specifically when etsysVlanAuthorizationAdminEgress has been set to dynamic(4), the currently enforced egress modification. If the port is unauthenticated, or no VLAN-ID has been applied, this leaf should return none(1).')
etsysVlanAuthorizationVlanID = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 4094), ValueRangeConstraint(4095, 4095), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysVlanAuthorizationVlanID.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationVlanID.setDescription('The 12 bit VLAN identifier for a given port, used to override the PVID of the given port, obtained as a result of an authentication. A value of zero indicates that there is no authenticated VLAN ID for the given port. Should a port become unauthenticated this value MUST be returned to zero. A value of 4095 indicates that a the port has been authenticated, but that the VLAN returned could not be applied to the port (possibly because of resource constraints or misconfiguration). In this instance, the original PVID should still be applied. Should the feature become disabled or the session terminate, all effect on the Port VLAN ID MUST be removed.')
etsysVlanAuthorizationConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 2))
etsysVlanAuthorizationGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 2, 1))
etsysVlanAuthorizationCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 2, 2))
etsysVlanAuthorizationGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 2, 1, 1)).setObjects(("ENTERASYS-VLAN-AUTHORIZATION-MIB", "etsysVlanAuthorizationEnable"), ("ENTERASYS-VLAN-AUTHORIZATION-MIB", "etsysVlanAuthorizationStatus"), ("ENTERASYS-VLAN-AUTHORIZATION-MIB", "etsysVlanAuthorizationAdminEgress"), ("ENTERASYS-VLAN-AUTHORIZATION-MIB", "etsysVlanAuthorizationOperEgress"), ("ENTERASYS-VLAN-AUTHORIZATION-MIB", "etsysVlanAuthorizationVlanID"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
etsysVlanAuthorizationGroup = etsysVlanAuthorizationGroup.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationGroup.setDescription('A collection of objects relating to VLAN Authorization.')
etsysVlanAuthorizationCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 2, 2, 1)).setObjects(("ENTERASYS-VLAN-AUTHORIZATION-MIB", "etsysVlanAuthorizationGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
etsysVlanAuthorizationCompliance = etsysVlanAuthorizationCompliance.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationCompliance.setDescription('The compliance statement for devices that support the Enterasys VLAN Authorization MIB.')
mibBuilder.exportSymbols("ENTERASYS-VLAN-AUTHORIZATION-MIB", etsysVlanAuthorizationVlanID=etsysVlanAuthorizationVlanID, etsysVlanAuthorizationGroup=etsysVlanAuthorizationGroup, etsysVlanAuthorizationEnable=etsysVlanAuthorizationEnable, etsysVlanAuthorizationOperEgress=etsysVlanAuthorizationOperEgress, etsysVlanAuthorizationAdminEgress=etsysVlanAuthorizationAdminEgress, etsysVlanAuthorizationConformance=etsysVlanAuthorizationConformance, VlanAuthEgressStatus=VlanAuthEgressStatus, etsysVlanAuthorizationPorts=etsysVlanAuthorizationPorts, etsysVlanAuthorizationStatus=etsysVlanAuthorizationStatus, etsysVlanAuthorizationCompliance=etsysVlanAuthorizationCompliance, etsysVlanAuthorizationMIB=etsysVlanAuthorizationMIB, etsysVlanAuthorizationGroups=etsysVlanAuthorizationGroups, etsysVlanAuthorizationObjects=etsysVlanAuthorizationObjects, etsysVlanAuthorizationTable=etsysVlanAuthorizationTable, etsysVlanAuthorizationSystem=etsysVlanAuthorizationSystem, etsysVlanAuthorizationEntry=etsysVlanAuthorizationEntry, etsysVlanAuthorizationCompliances=etsysVlanAuthorizationCompliances, PYSNMP_MODULE_ID=etsysVlanAuthorizationMIB)
| [
"[email protected]"
]
| |
9258811529068e0ef737d4531c5f0d6ea7426561 | 4d6975caece0acdc793a41e8bc6d700d8c2fec9a | /leetcode/1692.number-of-ways-to-reorder-array-to-get-same-bst/1692.number-of-ways-to-reorder-array-to-get-same-bst.py | 00644cb83012c5b2e15d2232e9f6f7f861427b4f | []
| no_license | guiconti/workout | 36a3923f2381d6e7023e127100409b3a2e7e4ccb | 5162d14cd64b720351eb30161283e8727cfcf376 | refs/heads/master | 2021-08-03T10:32:02.108714 | 2021-07-26T04:38:14 | 2021-07-26T04:38:14 | 221,025,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | class Solution:
def numOfWays(self, nums: List[int]) -> int:
| [
"[email protected]"
]
| |
fc72058027cff3d6df1073e107bb3a426e164f7b | 85b6e009c45f2dd530d8ae186feb7e6e67d076a8 | /cohesity_management_sdk/models/protection_job_request.py | 3109e3d98f4406b033242dbb266e3567bd18c46e | [
"MIT"
]
| permissive | priyambiswas0/management-sdk-python | 4a60153b038d0a04de02f2308362a2531b0ff9cb | 5807c85e003f271ce069b52529b31abfd08ec153 | refs/heads/master | 2021-10-20T05:43:34.626369 | 2018-05-22T06:04:20 | 2019-02-25T23:56:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,335 | py | # Copyright 2019 Cohesity Inc.
# -*- coding: utf-8 -*-
import cohesity_management_sdk.models.alerting_config
import cohesity_management_sdk.models.cloud_parameters
import cohesity_management_sdk.models.environment_specific_common_job_parameters
import cohesity_management_sdk.models.time_of_day
import cohesity_management_sdk.models.indexing_policy
import cohesity_management_sdk.models.backup_script
import cohesity_management_sdk.models.remote_adapter
import cohesity_management_sdk.models.source_special_parameters
class ProtectionJobRequest(object):
"""Implementation of the 'Protection Job Request.' model.
Specifies information about a Protection Job.
Attributes:
abort_in_blackout_period (bool): If true, the Cohesity Cluster aborts
any currently executing Job Runs of this Protection Job when a
blackout period specified for this Job starts, even if the Job Run
started before the blackout period began. If false, a Job Run
continues to execute, if the Job Run started before the blackout
period starts.
alerting_config (AlertingConfig): Specifies optional settings for
alerting.
alerting_policy (list of AlertingPolicyEnum): Array of Job Events.
During Job Runs, the following Job Events are generated: 1) Job
succeeds 2) Job fails 3) Job violates the SLA These Job Events can
cause Alerts to be generated. 'kSuccess' means the Protection Job
succeeded. 'kFailure' means the Protection Job failed.
'kSlaViolation' means the Protection Job took longer than the time
period specified in the SLA.
cloud_parameters (CloudParameters): Specifies Cloud parameters that
are applicable to all Protection Sources in a Protection Job in
certain scenarios.
continue_on_quiesce_failure (bool): Whether to continue backing up on
quiesce failure.
dedup_disabled_source_ids (list of long|int): List of source ids for
which source side dedup is disabled from the backup job.
description (string): Specifies a text description about the
Protection Job.
end_time_usecs (long|int): Specifies the epoch time (in microseconds)
after which the Protection Job becomes dormant.
environment (Environment10Enum): Specifies the environment type (such
as kVMware or kSQL) of the Protection Source this Job is
protecting. Supported environment types such as 'kView', 'kSQL',
'kVMware', etc. NOTE: 'kPuppeteer' refers to Cohesity's Remote
Adapter. 'kVMware' indicates the VMware Protection Source
environment. 'kHyperV' indicates the HyperV Protection Source
environment. 'kSQL' indicates the SQL Protection Source
environment. 'kView' indicates the View Protection Source
environment. 'kPuppeteer' indicates the Cohesity's Remote Adapter.
'kPhysical' indicates the physical Protection Source environment.
'kPure' indicates the Pure Storage Protection Source environment.
'kAzure' indicates the Microsoft's Azure Protection Source
environment. 'kNetapp' indicates the Netapp Protection Source
environment. 'kAgent' indicates the Agent Protection Source
environment. 'kGenericNas' indicates the Genreric Network Attached
Storage Protection Source environment. 'kAcropolis' indicates the
Acropolis Protection Source environment. 'kPhsicalFiles' indicates
the Physical Files Protection Source environment. 'kIsilon'
indicates the Dell EMC's Isilon Protection Source environment.
'kKVM' indicates the KVM Protection Source environment. 'kAWS'
indicates the AWS Protection Source environment. 'kExchange'
indicates the Exchange Protection Source environment. 'kHyperVVSS'
indicates the HyperV VSS Protection Source environment. 'kOracle'
indicates the Oracle Protection Source environment. 'kGCP'
indicates the Google Cloud Platform Protection Source environment.
'kFlashBlade' indicates the Flash Blade Protection Source
environment. 'kAWSNative' indicates the AWS Native Protection
Source environment. 'kVCD' indicates the VMware's Virtual cloud
Director Protection Source environment. 'kO365' indicates the
Office 365 Protection Source environment. 'kO365Outlook' indicates
Office 365 outlook Protection Source environment. 'kHyperFlex'
indicates the Hyper Flex Protection Source environment.
'kGCPNative' indicates the GCP Native Protection Source
environment. 'kAzureNative' indicates the Azure Native Protection
Source environment.
environment_parameters (EnvironmentSpecificCommonJobParameters):
Specifies additional parameters that are common to all Protection
Sources in a Protection Job created for a particular environment
type.
exclude_source_ids (list of long|int): Array of Excluded Source
Objects. List of Object ids from a Protection Source that should
not be protected and are excluded from being backed up by the
Protection Job. Leaf and non-leaf Objects may be in this list and
an Object in this list must have an ancestor in the sourceId
list.
exclude_vm_tag_ids (list of long|int): Array of Arrays of VM Tag Ids
that Specify VMs to Exclude. Optionally specify a list of VMs to
exclude from protecting by listing Protection Source ids of VM
Tags in this two dimensional array. Using this two dimensional
array of Tag ids, the Cluster generates a list of VMs to exclude
from protecting, which are derived from intersections of the inner
arrays and union of the outer array, as shown by the following
example. For example a Datacenter is selected to be protected but
you want to exclude all the 'Former Employees' VMs in the East and
West but keep all the VMs for 'Former Employees' in the South
which are also stored in this Datacenter, by specifying the
following tag id array: [ [1000, 2221], [1000, 3031] ], where 1000
is the 'Former Employee' VM Tag id, 2221 is the 'East' VM Tag id
and 3031 is the 'West' VM Tag id. The first inner array [1000,
2221] produces a list of VMs that are both tagged with 'Former
Employees' and 'East' (an intersection). The second inner array
[1000, 3031] produces a list of VMs that are both tagged with
'Former Employees' and 'West' (an intersection). The outer array
combines the list of VMs from the two inner arrays. The list of
resulting VMs are excluded from being protected this Job.
full_protection_sla_time_mins (long|int): If specified, this setting
is number of minutes that a Job Run of a Full (no CBT) backup
schedule is expected to complete, which is known as a
Service-Level Agreement (SLA). A SLA violation is reported when
the run time of a Job Run exceeds the SLA time period specified
for this backup schedule.
full_protection_start_time (TimeOfDay): Specifies the time of day to
start the Full Protection Schedule. This is optional and only
applicable if the Protection Policy defines a monthly or a daily
Full (no CBT) Protection Schedule. Default value is 02:00 AM.
deprecated: true
incremental_protection_sla_time_mins (long|int): If specified, this
setting is number of minutes that a Job Run of a CBT-based backup
schedule is expected to complete, which is known as a
Service-Level Agreement (SLA). A SLA violation is reported when
the run time of a Job Run exceeds the SLA time period specified
for this backup schedule.
incremental_protection_start_time (TimeOfDay): Specifies the time of
day to start the CBT-based Protection Schedule. This is optional
and only applicable if the Protection Policy defines a monthly or
a daily CBT-based Protection Schedule. Default value is 02:00 AM.
deprecated: true
indexing_policy (IndexingPolicy): Specifies settings for indexing
files found in an Object (such as a VM) so these files can be
searched and recovered. This also specifies inclusion and
exclusion rules that determine the directories to index.
leverage_storage_snapshots (bool): Specifies whether to leverage the
storage array based snapshots for this backup job. To leverage
storage snapshots, the storage array has to be registered as a
source. If storage based snapshots can not be taken, job will
fallback to the default backup method.
leverage_storage_snapshots_for_hyperflex (bool): Specifies whether to
leverage Hyperflex as the storage snapshot array
name (string): Specifies the name of the Protection Job.
parent_source_id (long|int): Specifies the id of the registered
Protection Source that is the parent of the Objects that may be
protected by this Job. For example when a vCenter Server is
registered on a Cohesity Cluster, the Cohesity Cluster assigns a
unique id to this field that represents the vCenter Server.
perform_source_side_dedup (bool): Specifies whether source side dedupe
should be performed or not.
policy_id (string): Specifies the unique id of the Protection Policy
associated with the Protection Job. The Policy provides retry
settings, Protection Schedules, Priority, SLA, etc. The Job
defines the Storage Domain (View Box), the Objects to Protect (if
applicable), Start Time, Indexing settings, etc.
post_backup_script (BackupScript): Specifies the script associated
with the backup job. This field must be specified for 'kPhysical'
jobs. This script will be executed post backup run.
pre_backup_script (BackupScript): Specifies the script associated with
the backup job. This field must be specified for 'kPhysical' jobs.
This script will be executed pre backup run. The 'remoteScript'
field will be used for remote adapter jobs and 'preBackupScript'
field will be used for 'kPhysical' jobs.
priority (PriorityEnum): Specifies the priority of execution for a
Protection Job. Cohesity supports concurrent backups but if the
number of Jobs exceeds the ability to process Jobs, the specified
priority determines the execution Job priority. This field also
specifies the replication priority. 'kLow' indicates lowest
execution priority for a Protection job. 'kMedium' indicates
medium execution priority for a Protection job. 'kHigh' indicates
highest execution priority for a Protection job.
qos_type (QosTypeEnum): Specifies the QoS policy type to use for this
Protection Job. 'kBackupHDD' indicates the Cohesity Cluster writes
data directly to the HDD tier for this Protection Job. This is the
recommended setting. 'kBackupSSD' indicates the Cohesity Cluster
writes data directly to the SSD tier for this Protection Job. Only
specify this policy if you need fast ingest speed for a small
number of Protection Jobs.
quiesce (bool): Indicates if the App-Consistent option is enabled for
this Job. If the option is enabled, the Cohesity Cluster quiesces
the file system and applications before taking
Application-Consistent Snapshots. VMware Tools must be installed
on the guest Operating System.
remote_script (RemoteAdapter): For a Remote Adapter 'kPuppeteer' Job,
this field specifies the settings about the remote script that
will be executed by this Job. Only specify this field for Remote
Adapter 'kPuppeteer' Jobs.
source_ids (list of long|int): Array of Protected Source Objects.
Specifies the list of Object ids from the Protection Source to
protect (or back up) by the Protection Job. An Object in this list
may be descendant of another Object in this list. For example a
Datacenter could be selected but its child Host excluded. However,
a child VM under the Host could be explicitly selected to be
protected. Both the Datacenter and the VM are listed.
source_special_parameters (list of SourceSpecialParameters): Array of
Special Source Parameters. Specifies additional settings that can
apply to a subset of the Sources listed in the Protection Job. For
example, you can specify a list of files and folders to protect
instead of protecting the entire Physical Server. If this field's
setting conflicts with environmentParameters, then this setting
will be used.
start_time (TimeOfDay): Specifies the time of day to start the
Protection Schedule. This is optional and only applicable if the
Protection Policy defines a monthly or a daily Protection
Schedule. Default value is 02:00 AM.
timezone (string): Specifies the timezone to use when calculating time
for this Protection Job such as the Job start time. Specify the
timezone in the following format: "Area/Location", for example:
"America/New_York".
view_box_id (long|int): Specifies the Storage Domain (View Box) id
where this Job writes data.
view_name (string): For a Remote Adapter 'kPuppeteer' Job or a 'kView'
Job, this field specifies a View name that should be protected.
Specify this field when creating a Protection Job for the first
time for a View. If this field is specified, ParentSourceId,
SourceIds, and ExcludeSourceIds should not be specified.
vm_tag_ids (list of long|int): Array of Arrays of VMs Tags Ids that
Specify VMs to Protect. Optionally specify a list of VMs to
protect by listing Protection Source ids of VM Tags in this two
dimensional array. Using this two dimensional array of Tag ids,
the Cluster generates a list of VMs to protect which are derived
from intersections of the inner arrays and union of the outer
array, as shown by the following example. To protect only 'Eng'
VMs in the East and all the VMs in the West, specify the following
tag id array: [ [1101, 2221], [3031] ], where 1101 is the 'Eng' VM
Tag id, 2221 is the 'East' VM Tag id and 3031 is the 'West' VM Tag
id. The inner array [1101, 2221] produces a list of VMs that are
both tagged with 'Eng' and 'East' (an intersection). The outer
array combines the list from the inner array with list of VMs
tagged with 'West' (a union). The list of resulting VMs are
protected by this Job.
"""
# Create a mapping from Model property names to API property names
_names = {
"name":'name',
"policy_id":'policyId',
"view_box_id":'viewBoxId',
"abort_in_blackout_period":'abortInBlackoutPeriod',
"alerting_config":'alertingConfig',
"alerting_policy":'alertingPolicy',
"cloud_parameters":'cloudParameters',
"continue_on_quiesce_failure":'continueOnQuiesceFailure',
"dedup_disabled_source_ids":'dedupDisabledSourceIds',
"description":'description',
"end_time_usecs":'endTimeUsecs',
"environment":'environment',
"environment_parameters":'environmentParameters',
"exclude_source_ids":'excludeSourceIds',
"exclude_vm_tag_ids":'excludeVmTagIds',
"full_protection_sla_time_mins":'fullProtectionSlaTimeMins',
"full_protection_start_time":'fullProtectionStartTime',
"incremental_protection_sla_time_mins":'incrementalProtectionSlaTimeMins',
"incremental_protection_start_time":'incrementalProtectionStartTime',
"indexing_policy":'indexingPolicy',
"leverage_storage_snapshots":'leverageStorageSnapshots',
"leverage_storage_snapshots_for_hyperflex":'leverageStorageSnapshotsForHyperflex',
"parent_source_id":'parentSourceId',
"perform_source_side_dedup":'performSourceSideDedup',
"post_backup_script":'postBackupScript',
"pre_backup_script":'preBackupScript',
"priority":'priority',
"qos_type":'qosType',
"quiesce":'quiesce',
"remote_script":'remoteScript',
"source_ids":'sourceIds',
"source_special_parameters":'sourceSpecialParameters',
"start_time":'startTime',
"timezone":'timezone',
"view_name":'viewName',
"vm_tag_ids":'vmTagIds'
}
def __init__(self,
name=None,
policy_id=None,
view_box_id=None,
abort_in_blackout_period=None,
alerting_config=None,
alerting_policy=None,
cloud_parameters=None,
continue_on_quiesce_failure=None,
dedup_disabled_source_ids=None,
description=None,
end_time_usecs=None,
environment=None,
environment_parameters=None,
exclude_source_ids=None,
exclude_vm_tag_ids=None,
full_protection_sla_time_mins=None,
full_protection_start_time=None,
incremental_protection_sla_time_mins=None,
incremental_protection_start_time=None,
indexing_policy=None,
leverage_storage_snapshots=None,
leverage_storage_snapshots_for_hyperflex=None,
parent_source_id=None,
perform_source_side_dedup=None,
post_backup_script=None,
pre_backup_script=None,
priority=None,
qos_type=None,
quiesce=None,
remote_script=None,
source_ids=None,
source_special_parameters=None,
start_time=None,
timezone=None,
view_name=None,
vm_tag_ids=None):
"""Constructor for the ProtectionJobRequest class"""
# Initialize members of the class
self.abort_in_blackout_period = abort_in_blackout_period
self.alerting_config = alerting_config
self.alerting_policy = alerting_policy
self.cloud_parameters = cloud_parameters
self.continue_on_quiesce_failure = continue_on_quiesce_failure
self.dedup_disabled_source_ids = dedup_disabled_source_ids
self.description = description
self.end_time_usecs = end_time_usecs
self.environment = environment
self.environment_parameters = environment_parameters
self.exclude_source_ids = exclude_source_ids
self.exclude_vm_tag_ids = exclude_vm_tag_ids
self.full_protection_sla_time_mins = full_protection_sla_time_mins
self.full_protection_start_time = full_protection_start_time
self.incremental_protection_sla_time_mins = incremental_protection_sla_time_mins
self.incremental_protection_start_time = incremental_protection_start_time
self.indexing_policy = indexing_policy
self.leverage_storage_snapshots = leverage_storage_snapshots
self.leverage_storage_snapshots_for_hyperflex = leverage_storage_snapshots_for_hyperflex
self.name = name
self.parent_source_id = parent_source_id
self.perform_source_side_dedup = perform_source_side_dedup
self.policy_id = policy_id
self.post_backup_script = post_backup_script
self.pre_backup_script = pre_backup_script
self.priority = priority
self.qos_type = qos_type
self.quiesce = quiesce
self.remote_script = remote_script
self.source_ids = source_ids
self.source_special_parameters = source_special_parameters
self.start_time = start_time
self.timezone = timezone
self.view_box_id = view_box_id
self.view_name = view_name
self.vm_tag_ids = vm_tag_ids
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
policy_id = dictionary.get('policyId')
view_box_id = dictionary.get('viewBoxId')
abort_in_blackout_period = dictionary.get('abortInBlackoutPeriod')
alerting_config = cohesity_management_sdk.models.alerting_config.AlertingConfig.from_dictionary(dictionary.get('alertingConfig')) if dictionary.get('alertingConfig') else None
alerting_policy = dictionary.get('alertingPolicy')
cloud_parameters = cohesity_management_sdk.models.cloud_parameters.CloudParameters.from_dictionary(dictionary.get('cloudParameters')) if dictionary.get('cloudParameters') else None
continue_on_quiesce_failure = dictionary.get('continueOnQuiesceFailure')
dedup_disabled_source_ids = dictionary.get('dedupDisabledSourceIds')
description = dictionary.get('description')
end_time_usecs = dictionary.get('endTimeUsecs')
environment = dictionary.get('environment')
environment_parameters = cohesity_management_sdk.models.environment_specific_common_job_parameters.EnvironmentSpecificCommonJobParameters.from_dictionary(dictionary.get('environmentParameters')) if dictionary.get('environmentParameters') else None
exclude_source_ids = dictionary.get('excludeSourceIds')
exclude_vm_tag_ids = dictionary.get('excludeVmTagIds')
full_protection_sla_time_mins = dictionary.get('fullProtectionSlaTimeMins')
full_protection_start_time = cohesity_management_sdk.models.time_of_day.TimeOfDay.from_dictionary(dictionary.get('fullProtectionStartTime')) if dictionary.get('fullProtectionStartTime') else None
incremental_protection_sla_time_mins = dictionary.get('incrementalProtectionSlaTimeMins')
incremental_protection_start_time = cohesity_management_sdk.models.time_of_day.TimeOfDay.from_dictionary(dictionary.get('incrementalProtectionStartTime')) if dictionary.get('incrementalProtectionStartTime') else None
indexing_policy = cohesity_management_sdk.models.indexing_policy.IndexingPolicy.from_dictionary(dictionary.get('indexingPolicy')) if dictionary.get('indexingPolicy') else None
leverage_storage_snapshots = dictionary.get('leverageStorageSnapshots')
leverage_storage_snapshots_for_hyperflex = dictionary.get('leverageStorageSnapshotsForHyperflex')
parent_source_id = dictionary.get('parentSourceId')
perform_source_side_dedup = dictionary.get('performSourceSideDedup')
post_backup_script = cohesity_management_sdk.models.backup_script.BackupScript.from_dictionary(dictionary.get('postBackupScript')) if dictionary.get('postBackupScript') else None
pre_backup_script = cohesity_management_sdk.models.backup_script.BackupScript.from_dictionary(dictionary.get('preBackupScript')) if dictionary.get('preBackupScript') else None
priority = dictionary.get('priority')
qos_type = dictionary.get('qosType')
quiesce = dictionary.get('quiesce')
remote_script = cohesity_management_sdk.models.remote_adapter.RemoteAdapter.from_dictionary(dictionary.get('remoteScript')) if dictionary.get('remoteScript') else None
source_ids = dictionary.get('sourceIds')
source_special_parameters = None
if dictionary.get('sourceSpecialParameters') != None:
source_special_parameters = list()
for structure in dictionary.get('sourceSpecialParameters'):
source_special_parameters.append(cohesity_management_sdk.models.source_special_parameters.SourceSpecialParameters.from_dictionary(structure))
start_time = cohesity_management_sdk.models.time_of_day.TimeOfDay.from_dictionary(dictionary.get('startTime')) if dictionary.get('startTime') else None
timezone = dictionary.get('timezone')
view_name = dictionary.get('viewName')
vm_tag_ids = dictionary.get('vmTagIds')
# Return an object of this model
return cls(name,
policy_id,
view_box_id,
abort_in_blackout_period,
alerting_config,
alerting_policy,
cloud_parameters,
continue_on_quiesce_failure,
dedup_disabled_source_ids,
description,
end_time_usecs,
environment,
environment_parameters,
exclude_source_ids,
exclude_vm_tag_ids,
full_protection_sla_time_mins,
full_protection_start_time,
incremental_protection_sla_time_mins,
incremental_protection_start_time,
indexing_policy,
leverage_storage_snapshots,
leverage_storage_snapshots_for_hyperflex,
parent_source_id,
perform_source_side_dedup,
post_backup_script,
pre_backup_script,
priority,
qos_type,
quiesce,
remote_script,
source_ids,
source_special_parameters,
start_time,
timezone,
view_name,
vm_tag_ids)
| [
"[email protected]"
]
| |
e30926a419b5d166b02a76f3f5c8ed329de20e60 | ff9fedd28f7436ba9945421e061fd2e1dadbf5c3 | /Alogithms/Dijkstra/dijkstra.py | 3d1510e8e6c59b494d2b934513ca7381f575586b | []
| no_license | ritwikbadola/Empirical-Analysis-Of-Algorithms | 0ed1b9c2c92813d11af33405527a4ecced8b2845 | 7ffb7a03e9d356d5368d2d79a49a8dabf49ed6c7 | refs/heads/master | 2022-08-19T12:39:24.875859 | 2020-05-16T03:53:35 | 2020-05-16T03:53:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,979 | py | # Python program for Dijkstra's single
# source shortest path algorithm. The program is
# for adjacency matrix representation of the graph
# Library for INT_MAX
import sys
class Graph():
def __init__(self, vertices):
self.V = vertices
self.graph = [[0 for column in range(vertices)]
for row in range(vertices)]
def printSolution(self, dist):
print "Vertex \tDistance from Source"
for node in range(self.V):
print node, "\t", dist[node]
# A utility function to find the vertex with
# minimum distance value, from the set of vertices
# not yet included in shortest path tree
def minDistance(self, dist, sptSet):
# Initilaize minimum distance for next node
min = sys.maxint
# Search not nearest vertex not in the
# shortest path tree
for v in range(self.V):
if dist[v] < min and sptSet[v] == False:
min = dist[v]
min_index = v
return min_index
# Funtion that implements Dijkstra's single source
# shortest path algorithm for a graph represented
# using adjacency matrix representation
def dijkstra(self, src):
dist = [sys.maxint] * self.V
dist[src] = 0
sptSet = [False] * self.V
for cout in range(self.V):
# Pick the minimum distance vertex from
# the set of vertices not yet processed.
# u is always equal to src in first iteration
u = self.minDistance(dist, sptSet)
# Put the minimum distance vertex in the
# shotest path tree
sptSet[u] = True
# Update dist value of the adjacent vertices
# of the picked vertex only if the current
# distance is greater than new distance and
# the vertex in not in the shotest path tree
for v in range(self.V):
if self.graph[u][v] > 0 and sptSet[v] == False and \
dist[v] > dist[u] + self.graph[u][v]:
dist[v] = dist[u] + self.graph[u][v]
# self.printSolution(dist)
# Driver program
g = Graph(25)
g.graph = [ [0, 156, 0, 0, 246, 0, 184, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 462, 0, 0, 171, 0, 157, 0, 363],
[156, 0, 323, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 323, 0, 151, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 151, 0, 0, 545, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[246, 0, 0, 0, 0, 174, 0, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 545, 174, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[184, 0, 0, 0, 0, 0, 0, 83, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 192, 0, 0, 0],
[0, 0, 0, 0, 100, 0, 83, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 224, 0, 0, 209, 0, 0, 0, 0, 217, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 209, 0, 116, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 116, 0, 180, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 180, 0, 157, 251, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 157, 0, 342, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 251, 342, 0, 111, 208, 0, 0, 0, 0, 0, 382, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 217, 0, 0, 0, 0, 111, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 208, 0, 0, 335, 462, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 335, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[462, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 462, 0, 0, 212, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 212, 0, 135, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 135, 0, 174, 0, 0, 0, 0],
[171, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 174, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 192, 0, 0, 0, 0, 0, 0, 382, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[157, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 171, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 171, 0, 0],
[363, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ];
g.dijkstra(0);
# This code is contributed by Divyanshu Mehta
| [
"[email protected]"
]
| |
e8e564dd8a81a7204c2c1219c8828de5d75a5b39 | b76615ff745c6d66803506251c3d4109faf50802 | /pyobjc-framework-Cocoa/PyObjCTest/test_nsexpression.py | 10aca71722b9813074d199da83ce3d260fed8d3b | [
"MIT"
]
| permissive | danchr/pyobjc-git | 6ef17e472f54251e283a0801ce29e9eff9c20ac0 | 62b787fddeb381184043c7ff136f1c480755ab69 | refs/heads/master | 2021-01-04T12:24:31.581750 | 2020-02-02T20:43:02 | 2020-02-02T20:43:02 | 240,537,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,427 | py | from PyObjCTools.TestSupport import *
from Foundation import *
class TestNSExpression(TestCase):
def testConstants(self):
self.assertEqual(NSConstantValueExpressionType, 0)
self.assertEqual(NSEvaluatedObjectExpressionType, 1)
self.assertEqual(NSVariableExpressionType, 2)
self.assertEqual(NSKeyPathExpressionType, 3)
self.assertEqual(NSFunctionExpressionType, 4)
self.assertEqual(NSUnionSetExpressionType, 5)
self.assertEqual(NSIntersectSetExpressionType, 6)
self.assertEqual(NSMinusSetExpressionType, 7)
self.assertEqual(NSSubqueryExpressionType, 13)
self.assertEqual(NSAggregateExpressionType, 14)
@min_os_level("10.6")
def testConstants10_6(self):
self.assertEqual(NSBlockExpressionType, 19)
@min_os_level("10.9")
def testConstants10_9(self):
self.assertEqual(NSAnyKeyExpressionType, 15)
@min_os_level("10.11")
def testConstants10_11(self):
self.assertEqual(NSConditionalExpressionType, 20)
@min_os_level("10.6")
def testMethods10_6(self):
self.assertArgIsBlock(NSExpression.expressionForBlock_arguments_, 0, b"@@@@")
self.assertResultIsBlock(NSExpression.expressionBlock, b"@@@@")
@min_os_level("10.6")
def testMethod10_6_unsupported(self):
self.assertArgIsPrintf(NSExpression.expressionWithFormat_, 0)
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
9102058651fbf91cbac1b616a121c35f0eb0973e | 8ab173ee437170afd5e4179f4e44d46b829f3ab0 | /Validation/RecoTrack/python/plotting/html.py | 04c09289f18ce2745bf5d1c2b56af89db89b9cc2 | [
"Apache-2.0"
]
| permissive | suchandradutta/cmssw | 6b085313fe15868bd3f7dfddfb850debe111410e | ed3aa96ca24548294076d466db17b0bca44d1359 | refs/heads/Phase2Digitizer_91X_170420 | 2023-06-25T09:47:56.111691 | 2017-04-20T09:59:31 | 2017-04-20T09:59:31 | 12,500,444 | 1 | 1 | null | 2018-11-06T10:34:46 | 2013-08-31T04:15:48 | C++ | UTF-8 | Python | false | false | 25,818 | py | import os
import collections
def _lowerFirst(s):
return s[0].lower()+s[1:]
_sampleName = {
"RelValMinBias": "Min Bias",
"RelValTTbar": "TTbar",
"RelValQCD_Pt_600_800": "QCD Pt 600 to 800",
"RelValQCD_Pt_3000_3500": "QCD Pt 3000 to 3500",
"RelValQCD_FlatPt_15_3000": "QCD Flat Pt 15 to 3000",
"RelValZMM": "ZMuMu",
"RelValWjet_Pt_3000_3500": "Wjet Pt 3000 to 3500",
"RelValH125GGgluonfusion": "Higgs to gamma gamma",
"RelValSingleElectronPt35": "Single Electron Pt 35",
"RelValSingleElectronPt35Extended": "Single Electron Pt 35 (extended eta)",
"RelValSingleElectronPt10": "Single Electron Pt 10",
"RelValSingleMuPt10": "Single Muon Pt 10",
"RelValSingleMuPt10Extended": "Single Muon Pt 10 (extended eta)",
"RelValSingleMuPt100": "Single Muon Pt 100",
"RelValTenMuE_0_200": "Ten muon Pt 0-200",
}
_sampleFileName = {
"RelValMinBias": "minbias",
"RelValTTbar": "ttbar",
"RelValQCD_Pt_600_800": "qcd600",
"RelValQCD_Pt_3000_3500": "qcd3000",
"RelValQCD_FlatPt_15_3000": "qcdflat",
"RelValZMM": "zmm",
"RelValWjet_Pt_3000_3500": "wjet3000",
"RelValH125GGgluonfusion": "hgg",
"RelValSingleElectronPt35": "ele35",
"RelValSingleElectronPt35Extended": "ele35ext",
"RelValSingleElectronPt10": "ele10",
"RelValSingleMuPt10": "mu10",
"RelValSingleMuPt10Extended": "mu10ext",
"RelValSingleMuPt100": "mu100",
"RelValTenMuE_0_200": "tenmu200",
}
_allTPEfficName = "All tracks (all TPs)"
_fromPVName = "Tracks from PV"
_fromPVAllTPName = "Tracks from PV (all TPs)"
_conversionName = "Tracks for conversions"
_gsfName = "Electron GSF tracks"
def _toHP(s):
return "High purity "+_lowerFirst(s)
def _allToHP(s):
return s.replace("All", "High purity")
def _ptCut(s):
return s.replace("Tracks", "Tracks pT > 0.9 GeV").replace("tracks", "tracks pT > 0.9 GeV")
_trackQualityNameOrder = collections.OrderedDict([
("seeding_seeds", "Seeds"),
("seeding_seedsa", "Seeds A"),
("seeding_seedsb", "Seeds B"),
("seeding_seedstripl", "Seeds triplets"),
("seeding_seedspair", "Seeds pairs"),
("building_", "Built tracks"),
("", "All tracks"),
("highPurity", "High purity tracks"),
("Pt09", "Tracks pT > 0.9 GeV"),
("highPurityPt09", "High purity tracks pT > 0.9 GeV"),
("ByOriginalAlgo", "All tracks by originalAlgo"),
("highPurityByOriginalAlgo", "High purity tracks by originalAlgo"),
("ByAlgoMask", "All tracks by algoMask"),
("highPurityByAlgoMask", "High purity tracks by algoMask"),
("btvLike", "BTV-like"),
("ak4PFJets", "AK4 PF jets"),
("allTPEffic_", _allTPEfficName),
("allTPEffic_highPurity", _allToHP(_allTPEfficName)),
("fromPV_", _fromPVName),
("fromPV_highPurity", _toHP(_fromPVName)),
("fromPV_Pt09", _ptCut(_fromPVName)),
("fromPV_highPurityPt09", _toHP(_ptCut(_fromPVName))),
("fromPVAllTP_", _fromPVAllTPName),
("fromPVAllTP_highPurity", _toHP(_fromPVAllTPName)),
("fromPVAllTP_Pt09", _ptCut(_fromPVAllTPName)),
("fromPVAllTP_highPurityPt09", _toHP(_ptCut(_fromPVAllTPName))),
("fromPVAllTP2_", _fromPVAllTPName.replace("PV", "PV v2")),
("fromPVAllTP2_highPurity", "High purity "+_lowerFirst(_fromPVAllTPName).replace("PV", "PV v2")),
("fromPVAllTP2_Pt09", _fromPVAllTPName.replace("Tracks", "Tracks pT > 0.9 GeV").replace("PV", "PV v2")),
("fromPVAllTP2_highPurityPt09", _toHP(_ptCut(_fromPVAllTPName)).replace("PV", "PV v2")),
("conversion_", _conversionName),
("gsf_", _gsfName),
])
_trackAlgoName = {
"ootb": "Out of the box",
"iter0" : "Iterative Step 0",
"iter1" : "Iterative Step 1",
"iter2" : "Iterative Step 2",
"iter3" : "Iterative Step 3",
"iter4" : "Iterative Step 4",
"iter5" : "Iterative Step 5",
"iter6" : "Iterative Step 6",
"iter7" : "Iterative Step 7",
"iter9" : "Iterative Step 9",
"iter10": "Iterative Step 10",
}
_trackAlgoOrder = [
'ootb',
'initialStepPreSplitting',
'initialStep',
'highPtTripletStep',
'detachedQuadStep',
'detachedTripletStep',
'lowPtQuadStep',
'lowPtTripletStep',
'pixelPairStep',
'mixedTripletStep',
'pixelLessStep',
'tobTecStep',
'jetCoreRegionalStep',
'muonSeededStepInOut',
'muonSeededStepOutIn',
'duplicateMerge',
'convStep',
'conversionStep',
'ckfInOutFromConversions',
'ckfOutInFromConversions',
'electronGsf',
'iter0',
'iter1',
'iter2',
'iter3',
'iter4',
'iter5',
'iter6',
'iter7',
'iter9',
'iter10',
]
_pageNameMap = {
"summary": "Summary",
"vertex": "Vertex",
"v0": "V0",
"miniaod": "MiniAOD",
"timing": "Timing",
"hlt": "HLT",
}
_sectionNameMapOrder = collections.OrderedDict([
# These are for the summary page
("seeding_seeds", "Seeds"),
("building", "Built tracks"),
("", "All tracks"),
("highPurity", "High purity tracks"),
("btvLike", "BTV-like"),
("ak4PFJets", "AK4 PF jets"),
("allTPEffic", _allTPEfficName),
("allTPEffic_highPurity", _allTPEfficName.replace("All", "High purity")),
("fromPV", _fromPVName),
("fromPV_highPurity", "High purity "+_lowerFirst(_fromPVName)),
("fromPVAllTP", _fromPVAllTPName),
("fromPVAllTP_highPurity", "High purity "+_lowerFirst(_fromPVAllTPName)),
("conversion", _conversionName),
("gsf", _gsfName),
# These are for vertices
("genvertex", "Gen vertices"),
("pixelVertices", "Pixel vertices"),
("selectedPixelVertices", "Selected pixel vertices"),
("firstStepPrimaryVerticesPreSplitting", "firstStepPrimaryVerticesPreSplitting"),
("firstStepPrimaryVertices", "firstStepPrimaryVertices"),
("offlinePrimaryVertices", "All vertices (offlinePrimaryVertices)"),
("selectedOfflinePrimaryVertices", "Selected vertices (selectedOfflinePrimaryVertices)"),
("offlinePrimaryVerticesWithBS", "All vertices with BS constraint"),
("selectedOfflinePrimaryVerticesWithBS", "Selected vertices with BS constraint"),
# These are for V0
("k0", "K0"),
("lambda", "Lambda"),
])
_allTPEfficLegend = "All tracks, efficiency denominator contains all TrackingParticles"
_fromPVLegend = "Tracks from reco PV vs. TrackingParticles from gen PV (fake rate includes pileup tracks)"
_fromPVPtLegend = "Tracks (pT > 0.9 GeV) from reco PV vs. TrackingParticles from gen PV (fake rate includes pileup tracks)"
_fromPVAllTPLegend = "Tracks from reco PV, fake rate numerator contains all TrackingParticles (separates fake tracks from pileup tracks)"
_fromPVAllTPPtLegend = "Tracks (pT > 0.9 GeV) from reco PV, fake rate numerator contains all TrackingParticles (separates fake tracks from pileup tracks)"
_fromPVAllTP2Legend = "Tracks from reco PV (another method), fake rate numerator contains all TrackingParticles (separates fake tracks from pileup tracks)"
_fromPVAllTPPt2Legend = "Tracks (pT > 0.9 GeV) from reco PV (another method), fake rate numerator contains all TrackingParticles (separates fake tracks from pileup tracks)"
def _sectionNameLegend():
return {
"btvLike": "BTV-like selected tracks",
"ak4PFJets": "Tracks from AK4 PF jets (jet corrected pT > 10 GeV)",
"allTPEffic": _allTPEfficLegend,
"allTPEffic_": _allTPEfficLegend,
"allTPEffic_highPurity": _allToHP(_allTPEfficLegend),
"fromPV": _fromPVLegend,
"fromPV_": _fromPVLegend,
"fromPV_highPurity": _toHP(_fromPVLegend),
"fromPV_Pt09": _fromPVPtLegend,
"fromPV_highPurity_Pt09": _toHP(_fromPVPtLegend),
"fromPVAllTP": _fromPVAllTPLegend,
"fromPVAllTP_": _fromPVAllTPLegend,
"fromPVAllTP_highPurity": _toHP(_fromPVAllTPLegend),
"fromPVAllTP_Pt09": _fromPVAllTPPtLegend,
"fromPVAllTP_highPurityPt09": _toHP(_fromPVAllTPPtLegend),
"fromPVAllTP2_": _fromPVAllTP2Legend,
"fromPVAllTP2_highPurity": _toHP(_fromPVAllTP2Legend),
"fromPVAllTP2_Pt09": _fromPVAllTPPt2Legend,
"fromPVAllTP2_highPurityPt09": _toHP(_fromPVAllTPPt2Legend),
}
class Table:
# table [column][row]
def __init__(self, columnHeaders, rowHeaders, table, purpose, page, section):
if len(columnHeaders) != len(table):
raise Exception("Got %d columnHeaders for table with %d columns for page %s, section %s" % (len(columnHeaders), len(table), page, section))
lenRow = len(table[0])
for icol, column in enumerate(table):
if len(column) != lenRow:
raise Exception("Got non-square table, first column has %d rows, column %d has %d rows" % (lenRow, icol, len(column)))
if len(rowHeaders) != lenRow:
raise Exception("Got %d rowHeaders for table with %d rows" % (len(rowHeaders), lenRow))
self._columnHeaders = columnHeaders
self._rowHeaders = rowHeaders
self._table = table
self._purpose = purpose
self._page = page
self._section = section
def getPurpose(self):
return self._purpose
def getPage(self):
return self._page
def getSection(self):
return self._section
def ncolumns(self):
return len(self._table)
def nrows(self):
return len(self._table[0])
def columnHeaders(self):
return self._columnHeaders
def rowHeaders(self):
return self._rowHeaders
def tableAsColumnRow(self):
return self._table
def tableAsRowColumn(self):
return map(list, zip(*self._table))
class PlotPurpose:
class TrackingIteration: pass
class TrackingSummary: pass
class Vertexing: pass
class MiniAOD: pass
class Timing: pass
class HLT: pass
class Page(object):
def __init__(self, title, sampleName):
self._content = [
'<html>',
' <head>',
' <title>%s</title>' % title,
' </head>',
' <body>',
' '+sampleName,
' <br/>',
' <br/>',
]
self._plotSets = {}
self._tables = {}
def addPlotSet(self, section, plotSet):
if section in self._plotSets:
self._plotSets[section].extend(plotSet)
else:
self._plotSets[section] = plotSet
def addTable(self, section, table):
self._tables[section] = table
def isEmpty(self):
for plotSet in self._plotSets.itervalues():
if len(plotSet) > 0:
return False
if len(self._tables) > 0:
return False
return True
def write(self, fileName):
self._legends = []
self._sectionLegendIndex = {}
self._columnHeaders = []
self._columnHeadersIndex = {}
self._formatPlotSets()
self._formatTables()
self._formatLegend()
self._content.extend([
' </body>',
'</html>',
])
#print "Writing HTML report page", fileName
f = open(fileName, "w")
for line in self._content:
f.write(line)
f.write("\n")
f.close()
def _appendLegend(self, section):
leg = ""
legends = _sectionNameLegend()
if section in legends:
if section in self._sectionLegendIndex:
leg = self._sectionLegendIndex[section]
else:
legnum = len(self._legends)+1
leg = "<sup>%d</sup>" % legnum
leg2 = "<sup>%d)</sup>" % legnum
self._legends.append("%s %s" % (leg2, legends[section]))
self._sectionLegendIndex[section] = leg
return leg
def _formatPlotSets(self):
self._content.extend([
' <table>'
' <tr>',
])
fileTable = []
sections = self._orderSets(self._plotSets.keys())
for isec, section in enumerate(sections):
leg = self._appendLegend(section)
self._content.extend([
' <td>%s%s</td>' % (self._mapSectionName(section), leg),
])
files = [(os.path.basename(f), f) for f in self._plotSets[section]]
for row in fileTable:
found = False
for i, (bsf, f) in enumerate(files):
if bsf == row[0]:
row.append(f)
found = True
del files[i]
break
if not found:
row.append(None)
for bsf, f in files:
fileTable.append( [bsf] + [None]*isec + [f] )
self._content.extend([
' </tr>',
])
for row in fileTable:
self._content.append(' <tr>')
bs = row[0]
for elem in row[1:]:
if elem is not None:
self._content.append(' <td><a href="%s">%s</a></td>' % (elem, bs))
else:
self._content.append(' <td></td>')
self._content.append(' </tr>')
self._content.extend([
' </table>',
])
def _appendColumnHeader(self, header):
leg = ""
if header in self._columnHeadersIndex:
leg = self._columnHeadersIndex[header]
else:
leg = str(chr(ord('A')+len(self._columnHeaders)))
self._columnHeaders.append("%s: %s" % (leg, header))
self._columnHeadersIndex[header] = leg
return leg
def _formatTables(self):
def _allNone(row):
for item in row:
if item is not None:
return False
return True
sections = self._orderSets(self._tables.keys())
for isec, section in enumerate(sections):
leg = self._appendLegend(section)
table = self._tables[section]
self._content.extend([
' <br/>',
' %s%s' % (self._mapSectionName(section), leg),
' <table border="1">'
])
# table is stored in column-row, need to transpose
data = table.tableAsRowColumn()
self._content.extend([
' <tr>'
' <td></td>'
])
heads = table.columnHeaders()
if max(map(lambda h: len(h), heads)) > 20:
heads = [self._appendColumnHeader(h) for h in heads]
for head in heads:
self._content.append(' <td>%s</td>' % head)
self._content.append(' </tr>')
for irow, row in enumerate(data):
# Skip row if all values are non-existent
if _allNone(row):
continue
self._content.extend([
' <tr>'
' <td>%s</td>' % table.rowHeaders()[irow]
])
# align the number columns to right
for icol, item in enumerate(row):
formatted = str(item) if item is not None else ""
self._content.append(' <td align="right">%s</td>' % formatted)
self._content.append(' </tr>')
self._content.append(' </table>')
for shortenedColumnHeader in self._columnHeaders:
self._content.append(' %s<br/>' % shortenedColumnHeader)
self._columnHeaders = []
self._columnHeadersIndex = {}
def _formatLegend(self):
if len(self._legends) > 0:
self._content.extend([
' <br/>'
' Details:</br>',
])
for leg in self._legends:
self._content.append(' %s<br/>' % leg)
def _mapSectionName(self, section):
return _sectionNameMapOrder.get(section, section)
def _orderSets(self, keys):
keys_sorted = sorted(keys)
ret = []
for section in _sectionNameMapOrder.keys():
if section in keys_sorted:
ret.append(section)
keys.remove(section)
ret.extend(keys_sorted)
return ret
class PageSet(object):
def __init__(self, title, sampleName, sample, fastVsFull, pileupComparison, dqmSubFolderTranslatedToSectionName=None):
self._title = title
self._sampleName = sampleName
self._pages = collections.OrderedDict()
self._dqmSubFolderTranslatedToSectionName = dqmSubFolderTranslatedToSectionName
self._prefix = ""
if sample.fastsim():
self._prefix += "fast_"
if fastVsFull:
self._prefix += "full_"
self._prefix += _sampleFileName.get(sample.label(), sample.label())+"_"
if hasattr(sample, "hasScenario") and sample.hasScenario():
self._prefix += sample.scenario()+"_"
if hasattr(sample, "hasPileup"):
if sample.hasPileup():
self._prefix += "pu"+str(sample.pileupNumber())+"_"+sample.pileupType()+"_"
else:
self._prefix += "nopu_"
if pileupComparison:
self._prefix += "vspu_"
def _getPage(self, key, pageClass):
if key not in self._pages:
page = pageClass(self._title, self._sampleName)
self._pages[key] = page
else:
page = self._pages[key]
return page
def addPlotSet(self, plotterFolder, dqmSubFolder, plotFiles):
pageKey = plotterFolder.getPage()
if pageKey is None:
if dqmSubFolder is not None:
pageKey = dqmSubFolder.translated
else:
pageKey = plotterFolder.getName()
page = self._getPage(pageKey, Page)
sectionName = plotterFolder.getSection()
if sectionName is None:
if plotterFolder.getPage() is not None and dqmSubFolder is not None:
if self._dqmSubFolderTranslatedToSectionName is not None:
sectionName = self._dqmSubFolderTranslatedToSectionName(dqmSubFolder.translated)
else:
sectionName = dqmSubFolder.translated
else:
sectionName = ""
page.addPlotSet(sectionName, plotFiles)
def addTable(self, table):
if table is None:
return
page = self._getPage(table.getPage(), Page)
page.addTable(table.getSection(), table)
def write(self, baseDir):
#print "TrackingPageSet.write"
ret = []
keys = self._orderPages(self._pages.keys())
for key in keys:
page = self._pages[key]
if page.isEmpty():
continue
fileName = "%s%s.html" % (self._prefix, key)
page.write(os.path.join(baseDir, fileName))
ret.append( (self._mapPagesName(key), fileName) )
return ret
def _mapPagesName(self, name):
return _pageNameMap.get(name, name)
def _orderPages(self, keys):
return keys
class TrackingIterPage(Page):
def __init__(self, *args, **kwargs):
super(TrackingIterPage, self).__init__(*args, **kwargs)
def _mapSectionName(self, quality):
return _trackQualityNameOrder.get(quality, quality)
def _orderSets(self, qualities):
ret = []
for qual in _trackQualityNameOrder.keys():
if qual in qualities:
ret.append(qual)
qualities.remove(qual)
ret.extend(qualities)
return ret
class TrackingPageSet(PageSet):
def __init__(self, *args, **kwargs):
super(TrackingPageSet, self).__init__(*args, **kwargs)
def addPlotSet(self, plotterFolder, dqmSubFolder, plotFiles):
(algo, quality) = dqmSubFolder.translated
pageName = algo
sectionName = quality
# put all non-iterative stuff under OOTB
#
# it is bit of a hack to access trackingPlots.TrackingPlotFolder this way,
# but it was simple and it works
if algo != "ootb" and not plotterFolder._plotFolder.isAlgoIterative(algo):
pageName = "ootb"
sectionName = algo
folderName = plotterFolder.getName()
if folderName != "":
sectionName = folderName+"_"+sectionName
page = self._getPage(pageName, TrackingIterPage)
page.addPlotSet(sectionName, plotFiles)
def _mapPagesName(self, algo): # algo = pageName
return _trackAlgoName.get(algo, algo)
def _orderPages(self, algos):
ret = []
for algo in _trackAlgoOrder:
if algo in algos:
ret.append(algo)
algos.remove(algo)
ret.extend(algos)
return ret
class IndexSection:
def __init__(self, sample, title, fastVsFull, pileupComparison):
self._sample = sample
self._sampleName = ""
if sample.fastsim():
self._sampleName += "FastSim "
if fastVsFull:
self._sampleName += "vs FullSim "
pileup = ""
if hasattr(sample, "hasPileup"):
pileup = "with no pileup"
if sample.hasPileup():
pileup = "with %d pileup (%s)" % (sample.pileupNumber(), sample.pileupType())
if pileupComparison is not None:
pileup += " "+pileupComparison
if hasattr(sample, "customPileupLabel"):
pileup = sample.customPileupLabel()
scenario = ""
if hasattr(sample, "hasScenario") and sample.hasScenario():
scenario = " (\"%s\")" % sample.scenario()
self._sampleName += "%s sample%s %s" % (_sampleName.get(sample.name(), sample.name()), scenario, pileup)
params = [title, self._sampleName, sample, fastVsFull, pileupComparison is not None]
self._summaryPage = PageSet(*params)
self._iterationPages = TrackingPageSet(*params)
self._vertexPage = PageSet(*params)
self._miniaodPage = PageSet(*params)
self._timingPage = PageSet(*params)
self._hltPages = PageSet(*params, dqmSubFolderTranslatedToSectionName=lambda algoQuality: algoQuality[0])
self._otherPages = PageSet(*params)
self._purposePageMap = {
PlotPurpose.TrackingIteration: self._iterationPages,
PlotPurpose.TrackingSummary: self._summaryPage,
PlotPurpose.Vertexing: self._vertexPage,
PlotPurpose.MiniAOD: self._miniaodPage,
PlotPurpose.Timing: self._timingPage,
PlotPurpose.HLT: self._hltPages,
}
def addPlots(self, plotterFolder, dqmSubFolder, plotFiles):
page = self._purposePageMap.get(plotterFolder.getPurpose(), self._otherPages)
page.addPlotSet(plotterFolder, dqmSubFolder, plotFiles)
def addTable(self, table):
if table is None:
return
page = self._purposePageMap.get(table.getPurpose(), self._otherPages)
page.addTable(table)
params = []
def write(self, baseDir):
ret = [
" "+self._sampleName,
" <br/>",
" <ul>",
]
for pages in [self._summaryPage, self._iterationPages, self._vertexPage, self._miniaodPage, self._timingPage, self._hltPages, self._otherPages]:
labelFiles = pages.write(baseDir)
for label, fname in labelFiles:
ret.append(' <li><a href="%s">%s</a></li>' % (fname, label))
ret.extend([
' </ul>',
' <br/>',
])
return ret
class HtmlReport:
def __init__(self, validationName, newBaseDir):
self._title = "Tracking validation "+validationName
self._newBaseDir = newBaseDir
self._index = [
'<html>',
' <head>',
' <title>%s</title>' % self._title,
' </head>',
' <body>',
]
self._sections = collections.OrderedDict()
def addNote(self, note):
self._index.append(' <p>%s</p>'%note)
def beginSample(self, sample, fastVsFull=False, pileupComparison=None):
# Fast vs. Full becomes just after the corresponding Fast
# Same for PU
rightAfterRefSample = fastVsFull or (pileupComparison is not None)
key = (sample.digest(), rightAfterRefSample)
if key in self._sections:
self._currentSection = self._sections[key]
else:
self._currentSection = IndexSection(sample, self._title, fastVsFull, pileupComparison)
self._sections[key] = self._currentSection
def addPlots(self, *args, **kwargs):
self._currentSection.addPlots(*args, **kwargs)
def addTable(self, *args, **kwargs):
self._currentSection.addTable(*args, **kwargs)
def write(self):
# Reorder sections such that Fast vs. Full becomes just after the corresponding Fast
keys = self._sections.iterkeys()
newkeys = []
for key in keys:
if not key[1]:
newkeys.append(key)
continue
# is fast vs full
ind_fast = newkeys.index( (key[0], False) )
newkeys.insert(ind_fast+1, key)
for key in newkeys:
section = self._sections[key]
self._index.extend(section.write(self._newBaseDir))
self._index.extend([
" </body>",
"</html>",
])
f = open(os.path.join(self._newBaseDir, "index.html"), "w")
for line in self._index:
f.write(line)
f.write("\n")
f.close()
class HtmlReportDummy:
def __init__(self):
pass
def beginSample(self, *args, **kwargs):
pass
def addPlots(self, *args, **kwargs):
pass
def addTable(self, *args, **kwargs):
pass
| [
"[email protected]"
]
| |
a9812104f466c0374fbccf71d0cd2b8edbf21fb8 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/network/v20200601/route_filter.py | 91eecb201ea5a51babd94a74b8238698682e23f2 | [
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,170 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['RouteFilterArgs', 'RouteFilter']
@pulumi.input_type
class RouteFilterArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a RouteFilter resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] route_filter_name: The name of the route filter.
:param pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]] rules: Collection of RouteFilterRules contained within a route filter.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if route_filter_name is not None:
pulumi.set(__self__, "route_filter_name", route_filter_name)
if rules is not None:
pulumi.set(__self__, "rules", rules)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="routeFilterName")
def route_filter_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the route filter.
"""
return pulumi.get(self, "route_filter_name")
@route_filter_name.setter
def route_filter_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "route_filter_name", value)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]]]:
"""
Collection of RouteFilterRules contained within a route filter.
"""
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]]]):
pulumi.set(self, "rules", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class RouteFilter(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteFilterRuleArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Route Filter Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] route_filter_name: The name of the route filter.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteFilterRuleArgs']]]] rules: Collection of RouteFilterRules contained within a route filter.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RouteFilterArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Route Filter Resource.
:param str resource_name: The name of the resource.
:param RouteFilterArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RouteFilterArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteFilterRuleArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RouteFilterArgs.__new__(RouteFilterArgs)
__props__.__dict__["id"] = id
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["route_filter_name"] = route_filter_name
__props__.__dict__["rules"] = rules
__props__.__dict__["tags"] = tags
__props__.__dict__["etag"] = None
__props__.__dict__["ipv6_peerings"] = None
__props__.__dict__["name"] = None
__props__.__dict__["peerings"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:network:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20161201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170301:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170601:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170901:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20171001:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20171101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180401:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180601:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180701:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20181001:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20181101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20181201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190401:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190601:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190701:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190901:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20191101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20191201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200301:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200401:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200501:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200701:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20201101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20210201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20210301:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20210501:RouteFilter")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(RouteFilter, __self__).__init__(
'azure-native:network/v20200601:RouteFilter',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'RouteFilter':
"""
Get an existing RouteFilter resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = RouteFilterArgs.__new__(RouteFilterArgs)
__props__.__dict__["etag"] = None
__props__.__dict__["ipv6_peerings"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["peerings"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["rules"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return RouteFilter(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="ipv6Peerings")
def ipv6_peerings(self) -> pulumi.Output[Sequence['outputs.ExpressRouteCircuitPeeringResponse']]:
"""
A collection of references to express route circuit ipv6 peerings.
"""
return pulumi.get(self, "ipv6_peerings")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def peerings(self) -> pulumi.Output[Sequence['outputs.ExpressRouteCircuitPeeringResponse']]:
"""
A collection of references to express route circuit peerings.
"""
return pulumi.get(self, "peerings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the route filter resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def rules(self) -> pulumi.Output[Optional[Sequence['outputs.RouteFilterRuleResponse']]]:
"""
Collection of RouteFilterRules contained within a route filter.
"""
return pulumi.get(self, "rules")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| [
"[email protected]"
]
| |
24b6a392193af3ed499ed5481be0d574615aa635 | fa0f12a6d63be22b588133bfb9c130f1eeecab3d | /myvenv/lib/python3.7/site-packages/pip/_internal/cli/autocompletion.py | 1295e23141c110930d3bf02637af4990d0143b8e | []
| no_license | 8th-caulion/high-hat | 6b2c455be14b5e617bf993cfb67c68975df3aa65 | fc1f9793747892b7b58f066c45ab95d3f0269db9 | refs/heads/master | 2023-08-02T12:07:36.540488 | 2020-06-03T17:36:32 | 2020-06-03T17:36:32 | 267,542,957 | 0 | 6 | null | 2021-09-22T19:09:26 | 2020-05-28T09:04:29 | Python | UTF-8 | Python | false | false | 8,237 | py | """Logic that powers autocompletion installed by ``pip completion``.
"""
import optparse
import os
import sys
<<<<<<< HEAD
from itertools import chain
from pip._internal.cli.main_parser import create_main_parser
from pip._internal.commands import commands_dict, create_command
from pip._internal.utils.misc import get_installed_distributions
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Any, Iterable, List, Optional
def autocomplete():
# type: () -> None
=======
from pip._internal.cli.main_parser import create_main_parser
from pip._internal.commands import commands_dict, get_summaries
from pip._internal.utils.misc import get_installed_distributions
def autocomplete():
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
"""Entry Point for completion of main and subcommand options.
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'PIP_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword - 1]
except IndexError:
current = ''
<<<<<<< HEAD
parser = create_main_parser()
subcommands = list(commands_dict)
options = []
# subcommand
subcommand_name = None # type: Optional[str]
for word in cwords:
if word in subcommands:
subcommand_name = word
break
# subcommand options
if subcommand_name is not None:
=======
subcommands = [cmd for cmd, summary in get_summaries()]
options = []
# subcommand
try:
subcommand_name = [w for w in cwords if w in subcommands][0]
except IndexError:
subcommand_name = None
parser = create_main_parser()
# subcommand options
if subcommand_name:
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
# special case: 'help' subcommand has no options
if subcommand_name == 'help':
sys.exit(1)
# special case: list locally installed dists for show and uninstall
should_list_installed = (
subcommand_name in ['show', 'uninstall'] and
not current.startswith('-')
)
if should_list_installed:
installed = []
lc = current.lower()
for dist in get_installed_distributions(local_only=True):
if dist.key.startswith(lc) and dist.key not in cwords[1:]:
installed.append(dist.key)
# if there are no dists installed, fall back to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
<<<<<<< HEAD
subcommand = create_command(subcommand_name)
=======
subcommand = commands_dict[subcommand_name]()
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
for opt in subcommand.parser.option_list_all:
if opt.help != optparse.SUPPRESS_HELP:
for opt_str in opt._long_opts + opt._short_opts:
options.append((opt_str, opt.nargs))
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
# get completion type given cwords and available subcommand options
completion_type = get_path_completion_type(
cwords, cword, subcommand.parser.option_list_all,
)
# get completion files and directories if ``completion_type`` is
# ``<file>``, ``<dir>`` or ``<path>``
if completion_type:
<<<<<<< HEAD
paths = auto_complete_paths(current, completion_type)
options = [(path, 0) for path in paths]
=======
options = auto_complete_paths(current, completion_type)
options = ((opt, 0) for opt in options)
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1] and option[0][:2] == "--":
opt_label += '='
print(opt_label)
else:
# show main parser options only when necessary
opts = [i.option_list for i in parser.option_groups]
opts.append(parser.option_list)
<<<<<<< HEAD
flattened_opts = chain.from_iterable(opts)
if current.startswith('-'):
for opt in flattened_opts:
=======
opts = (o for it in opts for o in it)
if current.startswith('-'):
for opt in opts:
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
if opt.help != optparse.SUPPRESS_HELP:
subcommands += opt._long_opts + opt._short_opts
else:
# get completion type given cwords and all available options
<<<<<<< HEAD
completion_type = get_path_completion_type(cwords, cword,
flattened_opts)
if completion_type:
subcommands = list(auto_complete_paths(current,
completion_type))
=======
completion_type = get_path_completion_type(cwords, cword, opts)
if completion_type:
subcommands = auto_complete_paths(current, completion_type)
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
print(' '.join([x for x in subcommands if x.startswith(current)]))
sys.exit(1)
def get_path_completion_type(cwords, cword, opts):
<<<<<<< HEAD
# type: (List[str], int, Iterable[Any]) -> Optional[str]
=======
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
"""Get the type of path completion (``file``, ``dir``, ``path`` or None)
:param cwords: same as the environmental variable ``COMP_WORDS``
:param cword: same as the environmental variable ``COMP_CWORD``
:param opts: The available options to check
:return: path completion type (``file``, ``dir``, ``path`` or None)
"""
if cword < 2 or not cwords[cword - 2].startswith('-'):
<<<<<<< HEAD
return None
=======
return
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
for opt in opts:
if opt.help == optparse.SUPPRESS_HELP:
continue
for o in str(opt).split('/'):
if cwords[cword - 2].split('=')[0] == o:
if not opt.metavar or any(
x in ('path', 'file', 'dir')
for x in opt.metavar.split('/')):
return opt.metavar
<<<<<<< HEAD
return None
def auto_complete_paths(current, completion_type):
# type: (str, str) -> Iterable[str]
=======
def auto_complete_paths(current, completion_type):
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
"""If ``completion_type`` is ``file`` or ``path``, list all regular files
and directories starting with ``current``; otherwise only list directories
starting with ``current``.
:param current: The word to be completed
:param completion_type: path completion type(`file`, `path` or `dir`)i
:return: A generator of regular files and/or directories
"""
directory, filename = os.path.split(current)
current_path = os.path.abspath(directory)
# Don't complete paths if they can't be accessed
if not os.access(current_path, os.R_OK):
return
filename = os.path.normcase(filename)
# list all files that start with ``filename``
file_list = (x for x in os.listdir(current_path)
if os.path.normcase(x).startswith(filename))
for f in file_list:
opt = os.path.join(current_path, f)
comp_file = os.path.normcase(os.path.join(directory, f))
# complete regular files when there is not ``<dir>`` after option
# complete directories when there is ``<file>``, ``<path>`` or
# ``<dir>``after option
if completion_type != 'dir' and os.path.isfile(opt):
yield comp_file
elif os.path.isdir(opt):
yield os.path.join(comp_file, '')
| [
"[email protected]"
]
| |
b9132f16bfc5b5e0cc2704d85af65a089cffd7cb | eee647635af1583d9b1150b7cd3195336291e1d2 | /ABC133/c.py | eb49ffdc05d6db403c85c8227196668dd8d288ac | []
| no_license | lilium513/competition_programing | 42f69222290b09b491477b8a2b9c2d4513ebe301 | 45082bf542224b667e753ad357cf145f683fde54 | refs/heads/master | 2020-06-22T03:16:34.510906 | 2019-07-31T18:22:31 | 2019-07-31T18:22:31 | 197,619,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py |
def do():
L, R = list(map(int, input().split(" ")))
ans = 10 ** 15
if R - L < 5000: #差が小さい場合は全探索
for i in range(L,R + 1):
for j in range(i+1,R + 1):
if (i*j) % 2019 < ans:
ans = (i*j) % 2019
else:#そうでなければ確実に一つ2019の倍数がある
ans = 0
print(ans)
| [
"[email protected]"
]
| |
58bb65a58ddad2e7ba4755e15c3698f3ff9b3301 | cb33113c4063867fa41cb74943d0a056a383b6a1 | /codexpert/Snake.py | bf0365b45c2712a8fdc2e057e76157dea480dae5 | []
| no_license | manuck/Algorithm | 9c6280095da6b88473460da52d07fb23ee6c3f9f | 4c15ff42f39224eb9b29728544c92dce9341fdfa | refs/heads/master | 2020-04-18T02:06:53.437576 | 2019-06-26T08:59:16 | 2019-06-26T08:59:16 | 167,148,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | import sys
sys.stdin = open("Snake_input.txt")
| [
"[email protected]"
]
| |
e798b57fa3a276c7acb65be428cc91e5a58aca43 | e3f2ab2999a851121897c02ee81bd85c2543bb96 | /ketan/codes/ee18btech11030/ee18btech11030_1.py | 7034225e0dcac1c1afe24ced57259387f4318dfb | []
| no_license | yashwanthguguloth24/control | ee38822c00d709ab63a35a9ebf7be886abae7eb7 | cff91230294686a4ee9432b04aea4333198512c1 | refs/heads/master | 2022-09-16T14:49:10.111030 | 2020-06-01T03:21:08 | 2020-06-01T03:21:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,349 | py | ###################################################################
# This is python code for Bode plots.
# By Moparthi Varun Sankar
# April 28 , 2020
# Released under GNU GPL
###################################################################
from scipy import signal
import matplotlib.pyplot as plt
from pylab import*
#if using termux
import subprocess
import shlex
#end if
#Defining the transfer function
s1 = signal.lti([16200,21*16200,110*16200], [11, 18*11 ,99*11,162*11,0]) #G(s)
s2 = signal.lti([1,0.121], [754.223*1,754.223*0.0001604]) #Gc(s)
s3 = signal.lti([16200,342160.2,1823164.2,215622],[8296.2,149333,821522,1344116.2,215.6,0]) #G(s)*Gc(s)
#signal.bode takes transfer function as input and returns frequency,magnitude and phase arrays
w1,mag1,phase1 = signal.bode(s1,n=1000)
w2,mag2,phase2 = signal.bode(s2,n=1000)
w3,mag3,phase3 = signal.bode(s3,n=1000)
plt.figure()
plt.subplot(2,1,1)
plt.grid()
plt.xlabel('Frequency(rad/s)')
plt.ylabel('Magnitude(db)')
plt.semilogx(w1, mag1,label='Uncompensated') # Magnitude plot for G(s)
plt.semilogx(w2, mag2,label='Compensator') # Magnitude plot for Gc(s)
plt.semilogx(w3, mag3,label='Compensated') # Magnitude plot for G(s)*Gc(s)
plt.plot(38.95,0,'o')
plt.text(38.95,0, '({}, {})'.format(38.95,0))
plt.plot(0.0001604,0,'o')
plt.text(0.0001604,0, '({}, {})'.format(0.0001604,0))
plt.plot(0.121,-57.55,'o')
plt.text(0.121,-57.55, '({}, {})'.format(0.121,-57.55))
plt.plot(1.21,0,'o')
plt.text(1.21,0, '({}, {})'.format(1.21,0))
plt.legend()
plt.subplot(2,1,2)
plt.grid()
plt.xlabel('Frequency(rad/s)')
plt.ylabel('Phase(degree)')
plt.semilogx(w1, phase1,label='Uncompensated') # Phase plot for G(s)
plt.semilogx(w2, phase2,label='Compensator') # Phase plot for Gc(s)
plt.semilogx(w3, phase3,label='Compensated') # Phase plot for G(s)*Gc(s)
plt.annotate('', (1.21,-117), (1.21,-127), arrowprops=dict(facecolor='red',arrowstyle='<|-|>',mutation_scale=15))
plt.annotate("Lag in Phase",(1.21,-117))
plt.plot(38.95,-184,'o')
plt.text(38.95,-184, '({}, {})'.format(38.95,-184))
plt.legend()
#if using termux
plt.savefig('./figs/ee18btech11030/ee18btech11030_2.pdf')
plt.savefig('./figs/ee18btech11030/ee18btech11030_2.eps')
subprocess.run(shlex.split("termux-open ./figs/ee18btech11030/ee18btech11030_2.pdf"))
#else
#plt.show()
| [
"[email protected]"
]
| |
c9f81bef1f3181735e2d92ff5e734356f7d6e16f | 14373275670c1f3065ce9ae195df142146e2c1a4 | /stubs/SQLAlchemy/sqlalchemy/cimmutabledict.pyi | 1a1a3006afc360bf3f13c4a33677a997d14fb729 | [
"Apache-2.0",
"MIT"
]
| permissive | sobolevn/typeshed | eb7af17c06a9722f23c337e6b9a4726223155d58 | d63a82640390a9c130e0fe7d409e8b0b836b7c31 | refs/heads/master | 2023-08-04T05:59:29.447015 | 2023-06-14T21:27:53 | 2023-06-14T21:27:53 | 216,265,622 | 2 | 0 | Apache-2.0 | 2022-02-08T10:40:53 | 2019-10-19T20:21:25 | Python | UTF-8 | Python | false | false | 737 | pyi | from _typeshed import SupportsKeysAndGetItem
from collections.abc import Iterable
from typing import Generic, TypeVar, overload
from typing_extensions import final
_KT = TypeVar("_KT")
_KT2 = TypeVar("_KT2")
_VT = TypeVar("_VT")
_VT2 = TypeVar("_VT2")
@final
class immutabledict(dict[_KT, _VT], Generic[_KT, _VT]):
@overload
def union(self, __dict: dict[_KT2, _VT2]) -> immutabledict[_KT | _KT2, _VT | _VT2]: ...
@overload
def union(self, __dict: None = None, **kw: SupportsKeysAndGetItem[_KT2, _VT2]) -> immutabledict[_KT | _KT2, _VT | _VT2]: ...
def merge_with(
self, *args: SupportsKeysAndGetItem[_KT | _KT2, _VT2] | Iterable[tuple[_KT2, _VT2]] | None
) -> immutabledict[_KT | _KT2, _VT | _VT2]: ...
| [
"[email protected]"
]
| |
20a079bd1af4db6c499e81e182bb3635f71069b9 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/I_to_M_Gk3_no_pad/pyr_Tcrop256_pad20_jit15/pyr_3s/L8/step09_3side_L8.py | 492f375600b24d9111789d8a77bc4776a8444e6d | []
| no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82,606 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
from tkinter import S
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
from step08_b_use_G_generate_I_to_M import I_to_M
from step08_b_use_G_generate_0_util import Tight_crop
from step09_c_train_step import Train_step_I_to_M
from step09_d_KModel_builder_combine_step789 import KModel_builder, MODEL_NAME
use_what_gen_op = I_to_M( Tight_crop(pad_size=20, resize=(256, 256), jit_scale= 0) )
use_what_train_step = Train_step_I_to_M( Tight_crop(pad_size=20, resize=(256, 256), jit_scale=15) )
import time
start_time = time.time()
###############################################################################################################################################################################################
###############################################################################################################################################################################################
########################################################### Block1
### Block1
#########################################################################################
# 3
pyramid_1side_1__2side_1__3side_1 = [3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3]
# 6
pyramid_1side_2__2side_1__3side_1 = [3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3]
pyramid_1side_2__2side_2__3side_1 = [3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3]
pyramid_1side_2__2side_2__3side_2 = [3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3]
# 10
pyramid_1side_3__2side_1__3side_1 = [3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 3]
pyramid_1side_3__2side_2__3side_1 = [3, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3]
pyramid_1side_3__2side_2__3side_2 = [3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 3]
pyramid_1side_3__2side_3__3side_1 = [3, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 3]
pyramid_1side_3__2side_3__3side_2 = [3, 3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 3]
pyramid_1side_3__2side_3__3side_3 = [3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3]
# 15
pyramid_1side_4__2side_1__3side_1 = [3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 3]
pyramid_1side_4__2side_2__3side_1 = [3, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 3]
pyramid_1side_4__2side_2__3side_2 = [3, 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 3, 3]
pyramid_1side_4__2side_3__3side_1 = [3, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 3]
pyramid_1side_4__2side_3__3side_2 = [3, 3, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 3]
pyramid_1side_4__2side_3__3side_3 = [3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 3, 3]
pyramid_1side_4__2side_4__3side_1 = [3, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 3]
pyramid_1side_4__2side_4__3side_2 = [3, 3, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 3, 3]
pyramid_1side_4__2side_4__3side_3 = [3, 3, 3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 3, 3]
pyramid_1side_4__2side_4__3side_4 = [3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3]
# 21
pyramid_1side_5__2side_1__3side_1 = [3, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 3]
pyramid_1side_5__2side_2__3side_1 = [3, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 3]
pyramid_1side_5__2side_2__3side_2 = [3, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 3, 3]
pyramid_1side_5__2side_3__3side_1 = [3, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3]
pyramid_1side_5__2side_3__3side_2 = [3, 3, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 3, 3]
pyramid_1side_5__2side_3__3side_3 = [3, 3, 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 3, 3, 3]
pyramid_1side_5__2side_4__3side_1 = [3, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 3]
pyramid_1side_5__2side_4__3side_2 = [3, 3, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 3, 3]
pyramid_1side_5__2side_4__3side_3 = [3, 3, 3, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 3, 3]
pyramid_1side_5__2side_4__3side_4 = [3, 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 1, 3, 3, 3, 3]
pyramid_1side_5__2side_5__3side_1 = [3, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3]
pyramid_1side_5__2side_5__3side_2 = [3, 3, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 3, 3]
pyramid_1side_5__2side_5__3side_3 = [3, 3, 3, 2, 2, 0, 0, 0, 0, 0, 0, 0, 2, 2, 3, 3, 3]
pyramid_1side_5__2side_5__3side_4 = [3, 3, 3, 3, 2, 0, 0, 0, 0, 0, 0, 0, 2, 3, 3, 3, 3]
pyramid_1side_5__2side_5__3side_5 = [3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3]
# 28
pyramid_1side_6__2side_1__3side_1 = [3, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 3]
pyramid_1side_6__2side_2__3side_1 = [3, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 3]
pyramid_1side_6__2side_2__3side_2 = [3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 3]
pyramid_1side_6__2side_3__3side_1 = [3, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3]
pyramid_1side_6__2side_3__3side_2 = [3, 3, 2, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 2, 3, 3]
pyramid_1side_6__2side_3__3side_3 = [3, 3, 3, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 3, 3, 3]
pyramid_1side_6__2side_4__3side_1 = [3, 2, 2, 2, 1, 1, 0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 3]
pyramid_1side_6__2side_4__3side_2 = [3, 3, 2, 2, 1, 1, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3]
pyramid_1side_6__2side_4__3side_3 = [3, 3, 3, 2, 1, 1, 0, 0, 0, 0, 0, 1, 1, 2, 3, 3, 3]
pyramid_1side_6__2side_4__3side_4 = [3, 3, 3, 3, 1, 1, 0, 0, 0, 0, 0, 1, 1, 3, 3, 3, 3]
pyramid_1side_6__2side_5__3side_1 = [3, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 3]
pyramid_1side_6__2side_5__3side_2 = [3, 3, 2, 2, 2, 1, 0, 0, 0, 0, 0, 1, 2, 2, 2, 3, 3]
pyramid_1side_6__2side_5__3side_3 = [3, 3, 3, 2, 2, 1, 0, 0, 0, 0, 0, 1, 2, 2, 3, 3, 3]
pyramid_1side_6__2side_5__3side_4 = [3, 3, 3, 3, 2, 1, 0, 0, 0, 0, 0, 1, 2, 3, 3, 3, 3]
pyramid_1side_6__2side_5__3side_5 = [3, 3, 3, 3, 3, 1, 0, 0, 0, 0, 0, 1, 3, 3, 3, 3, 3]
pyramid_1side_6__2side_6__3side_1 = [3, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 3]
pyramid_1side_6__2side_6__3side_2 = [3, 3, 2, 2, 2, 2, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 3]
pyramid_1side_6__2side_6__3side_3 = [3, 3, 3, 2, 2, 2, 0, 0, 0, 0, 0, 2, 2, 2, 3, 3, 3]
pyramid_1side_6__2side_6__3side_4 = [3, 3, 3, 3, 2, 2, 0, 0, 0, 0, 0, 2, 2, 3, 3, 3, 3]
pyramid_1side_6__2side_6__3side_5 = [3, 3, 3, 3, 3, 2, 0, 0, 0, 0, 0, 2, 3, 3, 3, 3, 3]
pyramid_1side_6__2side_6__3side_6 = [3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3]
# 36
pyramid_1side_7__2side_1__3side_1 = [3, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 3]
pyramid_1side_7__2side_2__3side_1 = [3, 2, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 2, 3]
pyramid_1side_7__2side_2__3side_2 = [3, 3, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 3, 3]
pyramid_1side_7__2side_3__3side_1 = [3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3]
pyramid_1side_7__2side_3__3side_2 = [3, 3, 2, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 2, 3, 3]
pyramid_1side_7__2side_3__3side_3 = [3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 3, 3, 3]
pyramid_1side_7__2side_4__3side_1 = [3, 2, 2, 2, 1, 1, 1, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3]
pyramid_1side_7__2side_4__3side_2 = [3, 3, 2, 2, 1, 1, 1, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3]
pyramid_1side_7__2side_4__3side_3 = [3, 3, 3, 2, 1, 1, 1, 0, 0, 0, 1, 1, 1, 2, 3, 3, 3]
pyramid_1side_7__2side_4__3side_4 = [3, 3, 3, 3, 1, 1, 1, 0, 0, 0, 1, 1, 1, 3, 3, 3, 3]
pyramid_1side_7__2side_5__3side_1 = [3, 2, 2, 2, 2, 1, 1, 0, 0, 0, 1, 1, 2, 2, 2, 2, 3]
pyramid_1side_7__2side_5__3side_2 = [3, 3, 2, 2, 2, 1, 1, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3]
pyramid_1side_7__2side_5__3side_3 = [3, 3, 3, 2, 2, 1, 1, 0, 0, 0, 1, 1, 2, 2, 3, 3, 3]
pyramid_1side_7__2side_5__3side_4 = [3, 3, 3, 3, 2, 1, 1, 0, 0, 0, 1, 1, 2, 3, 3, 3, 3]
pyramid_1side_7__2side_5__3side_5 = [3, 3, 3, 3, 3, 1, 1, 0, 0, 0, 1, 1, 3, 3, 3, 3, 3]
pyramid_1side_7__2side_6__3side_1 = [3, 2, 2, 2, 2, 2, 1, 0, 0, 0, 1, 2, 2, 2, 2, 2, 3]
pyramid_1side_7__2side_6__3side_2 = [3, 3, 2, 2, 2, 2, 1, 0, 0, 0, 1, 2, 2, 2, 2, 3, 3]
pyramid_1side_7__2side_6__3side_3 = [3, 3, 3, 2, 2, 2, 1, 0, 0, 0, 1, 2, 2, 2, 3, 3, 3]
pyramid_1side_7__2side_6__3side_4 = [3, 3, 3, 3, 2, 2, 1, 0, 0, 0, 1, 2, 2, 3, 3, 3, 3]
pyramid_1side_7__2side_6__3side_5 = [3, 3, 3, 3, 3, 2, 1, 0, 0, 0, 1, 2, 3, 3, 3, 3, 3]
pyramid_1side_7__2side_6__3side_6 = [3, 3, 3, 3, 3, 3, 1, 0, 0, 0, 1, 3, 3, 3, 3, 3, 3]
pyramid_1side_7__2side_7__3side_1 = [3, 2, 2, 2, 2, 2, 2, 0, 0, 0, 2, 2, 2, 2, 2, 2, 3]
pyramid_1side_7__2side_7__3side_2 = [3, 3, 2, 2, 2, 2, 2, 0, 0, 0, 2, 2, 2, 2, 2, 3, 3]
pyramid_1side_7__2side_7__3side_3 = [3, 3, 3, 2, 2, 2, 2, 0, 0, 0, 2, 2, 2, 2, 3, 3, 3]
pyramid_1side_7__2side_7__3side_4 = [3, 3, 3, 3, 2, 2, 2, 0, 0, 0, 2, 2, 2, 3, 3, 3, 3]
pyramid_1side_7__2side_7__3side_5 = [3, 3, 3, 3, 3, 2, 2, 0, 0, 0, 2, 2, 3, 3, 3, 3, 3]
pyramid_1side_7__2side_7__3side_6 = [3, 3, 3, 3, 3, 3, 2, 0, 0, 0, 2, 3, 3, 3, 3, 3, 3]
pyramid_1side_7__2side_7__3side_7 = [3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3]
# 45
pyramid_1side_8__2side_1__3side_1 = [3, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 3]
pyramid_1side_8__2side_2__3side_1 = [3, 2, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 2, 3]
pyramid_1side_8__2side_2__3side_2 = [3, 3, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 3, 3]
pyramid_1side_8__2side_3__3side_1 = [3, 2, 2, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 2, 2, 3]
pyramid_1side_8__2side_3__3side_2 = [3, 3, 2, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 2, 3, 3]
pyramid_1side_8__2side_3__3side_3 = [3, 3, 3, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 3, 3, 3]
pyramid_1side_8__2side_4__3side_1 = [3, 2, 2, 2, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 3]
pyramid_1side_8__2side_4__3side_2 = [3, 3, 2, 2, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 3, 3]
pyramid_1side_8__2side_4__3side_3 = [3, 3, 3, 2, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 3, 3, 3]
pyramid_1side_8__2side_4__3side_4 = [3, 3, 3, 3, 1, 1, 1, 1, 0, 1, 1, 1, 1, 3, 3, 3, 3]
pyramid_1side_8__2side_5__3side_1 = [3, 2, 2, 2, 2, 1, 1, 1, 0, 1, 1, 1, 2, 2, 2, 2, 3]
pyramid_1side_8__2side_5__3side_2 = [3, 3, 2, 2, 2, 1, 1, 1, 0, 1, 1, 1, 2, 2, 2, 3, 3]
pyramid_1side_8__2side_5__3side_3 = [3, 3, 3, 2, 2, 1, 1, 1, 0, 1, 1, 1, 2, 2, 3, 3, 3]
pyramid_1side_8__2side_5__3side_4 = [3, 3, 3, 3, 2, 1, 1, 1, 0, 1, 1, 1, 2, 3, 3, 3, 3]
pyramid_1side_8__2side_5__3side_5 = [3, 3, 3, 3, 3, 1, 1, 1, 0, 1, 1, 1, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_6__3side_1 = [3, 2, 2, 2, 2, 2, 1, 1, 0, 1, 1, 2, 2, 2, 2, 2, 3]
pyramid_1side_8__2side_6__3side_2 = [3, 3, 2, 2, 2, 2, 1, 1, 0, 1, 1, 2, 2, 2, 2, 3, 3]
pyramid_1side_8__2side_6__3side_3 = [3, 3, 3, 2, 2, 2, 1, 1, 0, 1, 1, 2, 2, 2, 3, 3, 3]
pyramid_1side_8__2side_6__3side_4 = [3, 3, 3, 3, 2, 2, 1, 1, 0, 1, 1, 2, 2, 3, 3, 3, 3]
pyramid_1side_8__2side_6__3side_5 = [3, 3, 3, 3, 3, 2, 1, 1, 0, 1, 1, 2, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_6__3side_6 = [3, 3, 3, 3, 3, 3, 1, 1, 0, 1, 1, 3, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_7__3side_1 = [3, 2, 2, 2, 2, 2, 2, 1, 0, 1, 2, 2, 2, 2, 2, 2, 3]
pyramid_1side_8__2side_7__3side_2 = [3, 3, 2, 2, 2, 2, 2, 1, 0, 1, 2, 2, 2, 2, 2, 3, 3]
pyramid_1side_8__2side_7__3side_3 = [3, 3, 3, 2, 2, 2, 2, 1, 0, 1, 2, 2, 2, 2, 3, 3, 3]
pyramid_1side_8__2side_7__3side_4 = [3, 3, 3, 3, 2, 2, 2, 1, 0, 1, 2, 2, 2, 3, 3, 3, 3]
pyramid_1side_8__2side_7__3side_5 = [3, 3, 3, 3, 3, 2, 2, 1, 0, 1, 2, 2, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_7__3side_6 = [3, 3, 3, 3, 3, 3, 2, 1, 0, 1, 2, 3, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_7__3side_7 = [3, 3, 3, 3, 3, 3, 3, 1, 0, 1, 3, 3, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_8__3side_1 = [3, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 2, 2, 3]
pyramid_1side_8__2side_8__3side_2 = [3, 3, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 2, 3, 3]
pyramid_1side_8__2side_8__3side_3 = [3, 3, 3, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 3, 3, 3]
pyramid_1side_8__2side_8__3side_4 = [3, 3, 3, 3, 2, 2, 2, 2, 0, 2, 2, 2, 2, 3, 3, 3, 3]
pyramid_1side_8__2side_8__3side_5 = [3, 3, 3, 3, 3, 2, 2, 2, 0, 2, 2, 2, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_8__3side_6 = [3, 3, 3, 3, 3, 3, 2, 2, 0, 2, 2, 3, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_8__3side_7 = [3, 3, 3, 3, 3, 3, 3, 2, 0, 2, 3, 3, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_8__3side_8 = [3, 3, 3, 3, 3, 3, 3, 3, 0, 3, 3, 3, 3, 3, 3, 3, 3]
# 55
pyramid_1side_9__2side_1__3side_1 = [3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3]
pyramid_1side_9__2side_2__3side_1 = [3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3]
pyramid_1side_9__2side_2__3side_2 = [3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3]
pyramid_1side_9__2side_3__3side_1 = [3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3]
pyramid_1side_9__2side_3__3side_2 = [3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 3]
pyramid_1side_9__2side_3__3side_3 = [3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3]
pyramid_1side_9__2side_4__3side_1 = [3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 3]
pyramid_1side_9__2side_4__3side_2 = [3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3]
pyramid_1side_9__2side_4__3side_3 = [3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 3, 3]
pyramid_1side_9__2side_4__3side_4 = [3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3]
pyramid_1side_9__2side_5__3side_1 = [3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3]
pyramid_1side_9__2side_5__3side_2 = [3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3]
pyramid_1side_9__2side_5__3side_3 = [3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3]
pyramid_1side_9__2side_5__3side_4 = [3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 2, 3, 3, 3, 3]
pyramid_1side_9__2side_5__3side_5 = [3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_6__3side_1 = [3, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3]
pyramid_1side_9__2side_6__3side_2 = [3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3]
pyramid_1side_9__2side_6__3side_3 = [3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3]
pyramid_1side_9__2side_6__3side_4 = [3, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3]
pyramid_1side_9__2side_6__3side_5 = [3, 3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 2, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_6__3side_6 = [3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_7__3side_1 = [3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3]
pyramid_1side_9__2side_7__3side_2 = [3, 3, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3]
pyramid_1side_9__2side_7__3side_3 = [3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3]
pyramid_1side_9__2side_7__3side_4 = [3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3]
pyramid_1side_9__2side_7__3side_5 = [3, 3, 3, 3, 3, 2, 2, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_7__3side_6 = [3, 3, 3, 3, 3, 3, 2, 1, 1, 1, 2, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_7__3side_7 = [3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_8__3side_1 = [3, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 3]
pyramid_1side_9__2side_8__3side_2 = [3, 3, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 3, 3]
pyramid_1side_9__2side_8__3side_3 = [3, 3, 3, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 3, 3, 3]
pyramid_1side_9__2side_8__3side_4 = [3, 3, 3, 3, 2, 2, 2, 2, 1, 2, 2, 2, 2, 3, 3, 3, 3]
pyramid_1side_9__2side_8__3side_5 = [3, 3, 3, 3, 3, 2, 2, 2, 1, 2, 2, 2, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_8__3side_6 = [3, 3, 3, 3, 3, 3, 2, 2, 1, 2, 2, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_8__3side_7 = [3, 3, 3, 3, 3, 3, 3, 2, 1, 2, 3, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_8__3side_8 = [3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 3, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_9__3side_1 = [3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3]
pyramid_1side_9__2side_9__3side_2 = [3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3]
pyramid_1side_9__2side_9__3side_3 = [3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3]
pyramid_1side_9__2side_9__3side_4 = [3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3]
pyramid_1side_9__2side_9__3side_5 = [3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_9__3side_6 = [3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_9__3side_7 = [3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_9__3side_8 = [3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_9__3side_9 = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
#########################################################################################
ch032_limit_pyramid_1side_1__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_1__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_2__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_2__2side_2__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_2__2side_2__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_3__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_3__2side_2__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_3__2side_2__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_3__2side_3__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_3__2side_3__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_3__2side_3__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_2__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_2__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_3__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_3__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_3__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_4__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_4__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_4__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_4__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_2__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_2__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_2__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_2__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_3__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_3__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_3__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_3__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_3__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_3__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_4__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_4__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_4__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_4__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_4__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_4__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_4__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_4__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_5__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_5__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_5__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_5__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_5__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_5__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_5__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_5__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_5__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_5__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_2__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_2__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_2__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_2__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_3__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_3__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_3__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_3__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_3__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_3__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_4__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_4__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_4__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_4__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_4__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_4__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_4__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_4__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_5__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_5__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_5__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_5__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_5__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_5__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_5__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_5__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_5__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_5__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_6__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_6__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_6__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_6__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_6__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_6__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_6__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_6__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_6__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_6__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_6__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_6__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_2__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_2__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_2__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_2__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_3__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_3__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_3__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_3__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_3__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_3__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_4__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_4__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_4__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_4__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_4__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_4__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_4__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_4__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_5__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_5__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_5__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_5__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_5__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_5__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_5__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_5__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_5__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_5__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_6__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_6__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_6__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_6__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_6__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_6__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_6__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_6__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_6__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_6__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_6__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_6__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_7__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_7__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_7__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_7__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_7__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_7__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_7__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_7__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_7__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_7__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_7__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_7__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_7__3side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_7__3side_7, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_2__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_2__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_2__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_2__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_3__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_3__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_3__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_3__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_3__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_3__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_4__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_4__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_4__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_4__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_4__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_4__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_4__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_4__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_5__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_5__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_5__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_5__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_5__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_5__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_5__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_5__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_5__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_5__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_6__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_6__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_6__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_6__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_6__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_6__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_6__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_6__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_6__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_6__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_6__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_6__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_7__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_7__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_7__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_7__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_7__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_7__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_7__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_7__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_7__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_7__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_7__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_7__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_7__3side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_7__3side_7, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_8__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_8__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_8__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_8__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_8__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_8__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_8__3side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8__3side_7, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_8__3side_8 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8__3side_8, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_2__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_2__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_2__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_2__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_3__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_3__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_3__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_3__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_3__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_3__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_4__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_4__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_4__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_4__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_4__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_4__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_4__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_4__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_5__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_5__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_5__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_5__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_5__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_5__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_5__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_5__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_5__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_5__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_6__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_6__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_6__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_6__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_6__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_6__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_6__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_6__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_6__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_6__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_6__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_6__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_7__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_7__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_7__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_7__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_7__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_7__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_7__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_7__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_7__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_7__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_7__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_7__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_7__3side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_7__3side_7, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_8__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_8__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_8__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_8__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_8__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_8__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_8__3side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8__3side_7, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_8__3side_8 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8__3side_8, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_7, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_8 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_8, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_9 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_9, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
#########################################################################################
###############################################################################################################################################################################################
if(__name__ == "__main__"):
import numpy as np
print("build_model cost time:", time.time() - start_time)
data = np.zeros(shape=(1, 512, 512, 1))
use_model = ch032_limit_pyramid_1side_1__2side_1__3side_1
use_model = use_model.build()
result = use_model.generator(data)
print(result.shape)
from kong_util.tf_model_util import Show_model_weights
Show_model_weights(use_model.generator)
use_model.generator.summary()
print(use_model.model_describe)
| [
"[email protected]"
]
| |
6e0e7be32af312f6e4e5c22864d619f58343b46b | 07ec5a0b3ba5e70a9e0fb65172ea6b13ef4115b8 | /lib/python3.6/site-packages/qtconsole/usage.py | 9748f0e934f04e3c18259feed28ecd2d79a87874 | []
| no_license | cronos91/ML-exercise | 39c5cd7f94bb90c57450f9a85d40c2f014900ea4 | 3b7afeeb6a7c87384049a9b87cac1fe4c294e415 | refs/heads/master | 2021-05-09T22:02:55.131977 | 2017-12-14T13:50:44 | 2017-12-14T13:50:44 | 118,736,043 | 0 | 0 | null | 2018-01-24T08:30:23 | 2018-01-24T08:30:22 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:4999bdffa49207a0fc3b0d1a32be17cab386bc93cb7e7f592a5154ee85dcc4e9
size 8349
| [
"[email protected]"
]
| |
96a1a69d636663d00ed646ff53f6c1fde2ee639b | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /9zsDKijmBffmnk9AP_4.py | f1bdc518936ccc7193328054c14e0aff9757174a | []
| no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py |
def programmers(one, two, three):
return max(one, two, three)-min(one, two, three)
| [
"[email protected]"
]
| |
7ab25735908dffad4ff145d77a16b3adf7334ef5 | d094ba0c8a9b1217fbf014aa79a283a49aabe88c | /env/lib/python3.6/site-packages/djcelery/tests/_compat.py | 4969b5c033405ba7bf924e2166b838b11922e304 | [
"Apache-2.0"
]
| permissive | Raniac/NEURO-LEARN | d9274e0baadd97bb02da54bdfcf6ca091fc1c703 | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | refs/heads/master | 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 | Apache-2.0 | 2022-12-09T21:01:00 | 2019-04-18T03:57:00 | CSS | UTF-8 | Python | false | false | 113 | py | # coding: utf-8
try:
from unittest.mock import patch
except ImportError:
from mock import patch # noqa
| [
"[email protected]"
]
| |
047660a9b15f645d34c790dbd31c938415f1e740 | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2021_02_01/models/__init__.py | 82c172aa1eb5e798e13af3d8f39e6216f291614d | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
]
| permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 12,525 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._models_py3 import AccountSasParameters
from ._models_py3 import ActiveDirectoryProperties
from ._models_py3 import AzureEntityResource
from ._models_py3 import AzureFilesIdentityBasedAuthentication
from ._models_py3 import BlobContainer
from ._models_py3 import BlobInventoryPolicy
from ._models_py3 import BlobInventoryPolicyDefinition
from ._models_py3 import BlobInventoryPolicyFilter
from ._models_py3 import BlobInventoryPolicyRule
from ._models_py3 import BlobInventoryPolicySchema
from ._models_py3 import BlobRestoreParameters
from ._models_py3 import BlobRestoreRange
from ._models_py3 import BlobRestoreStatus
from ._models_py3 import BlobServiceItems
from ._models_py3 import BlobServiceProperties
from ._models_py3 import ChangeFeed
from ._models_py3 import CheckNameAvailabilityResult
from ._models_py3 import CloudErrorBody
from ._models_py3 import CorsRule
from ._models_py3 import CorsRules
from ._models_py3 import CustomDomain
from ._models_py3 import DateAfterCreation
from ._models_py3 import DateAfterModification
from ._models_py3 import DeleteRetentionPolicy
from ._models_py3 import DeletedAccount
from ._models_py3 import DeletedAccountListResult
from ._models_py3 import DeletedShare
from ._models_py3 import Dimension
from ._models_py3 import Encryption
from ._models_py3 import EncryptionIdentity
from ._models_py3 import EncryptionScope
from ._models_py3 import EncryptionScopeKeyVaultProperties
from ._models_py3 import EncryptionScopeListResult
from ._models_py3 import EncryptionService
from ._models_py3 import EncryptionServices
from ._models_py3 import Endpoints
from ._models_py3 import ErrorResponse
from ._models_py3 import ErrorResponseBody
from ._models_py3 import ExtendedLocation
from ._models_py3 import FileServiceItems
from ._models_py3 import FileServiceProperties
from ._models_py3 import FileShare
from ._models_py3 import FileShareItem
from ._models_py3 import FileShareItems
from ._models_py3 import GeoReplicationStats
from ._models_py3 import IPRule
from ._models_py3 import Identity
from ._models_py3 import ImmutabilityPolicy
from ._models_py3 import ImmutabilityPolicyProperties
from ._models_py3 import KeyCreationTime
from ._models_py3 import KeyPolicy
from ._models_py3 import KeyVaultProperties
from ._models_py3 import LastAccessTimeTrackingPolicy
from ._models_py3 import LeaseContainerRequest
from ._models_py3 import LeaseContainerResponse
from ._models_py3 import LegalHold
from ._models_py3 import LegalHoldProperties
from ._models_py3 import ListAccountSasResponse
from ._models_py3 import ListBlobInventoryPolicy
from ._models_py3 import ListContainerItem
from ._models_py3 import ListContainerItems
from ._models_py3 import ListQueue
from ._models_py3 import ListQueueResource
from ._models_py3 import ListQueueServices
from ._models_py3 import ListServiceSasResponse
from ._models_py3 import ListTableResource
from ._models_py3 import ListTableServices
from ._models_py3 import ManagementPolicy
from ._models_py3 import ManagementPolicyAction
from ._models_py3 import ManagementPolicyBaseBlob
from ._models_py3 import ManagementPolicyDefinition
from ._models_py3 import ManagementPolicyFilter
from ._models_py3 import ManagementPolicyRule
from ._models_py3 import ManagementPolicySchema
from ._models_py3 import ManagementPolicySnapShot
from ._models_py3 import ManagementPolicyVersion
from ._models_py3 import MetricSpecification
from ._models_py3 import Multichannel
from ._models_py3 import NetworkRuleSet
from ._models_py3 import ObjectReplicationPolicies
from ._models_py3 import ObjectReplicationPolicy
from ._models_py3 import ObjectReplicationPolicyFilter
from ._models_py3 import ObjectReplicationPolicyRule
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationListResult
from ._models_py3 import PrivateEndpoint
from ._models_py3 import PrivateEndpointConnection
from ._models_py3 import PrivateEndpointConnectionListResult
from ._models_py3 import PrivateLinkResource
from ._models_py3 import PrivateLinkResourceListResult
from ._models_py3 import PrivateLinkServiceConnectionState
from ._models_py3 import ProtocolSettings
from ._models_py3 import ProxyResource
from ._models_py3 import QueueServiceProperties
from ._models_py3 import Resource
from ._models_py3 import ResourceAccessRule
from ._models_py3 import RestorePolicyProperties
from ._models_py3 import Restriction
from ._models_py3 import RoutingPreference
from ._models_py3 import SKUCapability
from ._models_py3 import SasPolicy
from ._models_py3 import ServiceSasParameters
from ._models_py3 import ServiceSpecification
from ._models_py3 import Sku
from ._models_py3 import SkuInformation
from ._models_py3 import SmbSetting
from ._models_py3 import StorageAccount
from ._models_py3 import StorageAccountCheckNameAvailabilityParameters
from ._models_py3 import StorageAccountCreateParameters
from ._models_py3 import StorageAccountInternetEndpoints
from ._models_py3 import StorageAccountKey
from ._models_py3 import StorageAccountListKeysResult
from ._models_py3 import StorageAccountListResult
from ._models_py3 import StorageAccountMicrosoftEndpoints
from ._models_py3 import StorageAccountRegenerateKeyParameters
from ._models_py3 import StorageAccountUpdateParameters
from ._models_py3 import StorageQueue
from ._models_py3 import StorageSkuListResult
from ._models_py3 import SystemData
from ._models_py3 import Table
from ._models_py3 import TableServiceProperties
from ._models_py3 import TagFilter
from ._models_py3 import TagProperty
from ._models_py3 import TrackedResource
from ._models_py3 import UpdateHistoryProperty
from ._models_py3 import Usage
from ._models_py3 import UsageListResult
from ._models_py3 import UsageName
from ._models_py3 import UserAssignedIdentity
from ._models_py3 import VirtualNetworkRule
from ._storage_management_client_enums import (
AccessTier,
AccountStatus,
BlobInventoryPolicyName,
BlobRestoreProgressStatus,
Bypass,
CorsRuleAllowedMethodsItem,
CreatedByType,
DefaultAction,
DirectoryServiceOptions,
EnabledProtocols,
EncryptionScopeSource,
EncryptionScopeState,
ExpirationAction,
ExtendedLocationTypes,
GeoReplicationStatus,
HttpProtocol,
IdentityType,
ImmutabilityPolicyState,
ImmutabilityPolicyUpdateType,
InventoryRuleType,
KeyPermission,
KeySource,
KeyType,
Kind,
LargeFileSharesState,
LeaseContainerRequestAction,
LeaseDuration,
LeaseState,
LeaseStatus,
ListContainersInclude,
ListSharesExpand,
ManagementPolicyName,
MinimumTlsVersion,
Name,
Permissions,
PrivateEndpointConnectionProvisioningState,
PrivateEndpointServiceConnectionStatus,
ProvisioningState,
PublicAccess,
PutSharesExpand,
Reason,
ReasonCode,
RootSquashType,
RoutingChoice,
RuleType,
Services,
ShareAccessTier,
SignedResource,
SignedResourceTypes,
SkuName,
SkuTier,
State,
StorageAccountExpand,
UsageUnit,
)
from ._patch import __all__ as _patch_all
from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
'AccountSasParameters',
'ActiveDirectoryProperties',
'AzureEntityResource',
'AzureFilesIdentityBasedAuthentication',
'BlobContainer',
'BlobInventoryPolicy',
'BlobInventoryPolicyDefinition',
'BlobInventoryPolicyFilter',
'BlobInventoryPolicyRule',
'BlobInventoryPolicySchema',
'BlobRestoreParameters',
'BlobRestoreRange',
'BlobRestoreStatus',
'BlobServiceItems',
'BlobServiceProperties',
'ChangeFeed',
'CheckNameAvailabilityResult',
'CloudErrorBody',
'CorsRule',
'CorsRules',
'CustomDomain',
'DateAfterCreation',
'DateAfterModification',
'DeleteRetentionPolicy',
'DeletedAccount',
'DeletedAccountListResult',
'DeletedShare',
'Dimension',
'Encryption',
'EncryptionIdentity',
'EncryptionScope',
'EncryptionScopeKeyVaultProperties',
'EncryptionScopeListResult',
'EncryptionService',
'EncryptionServices',
'Endpoints',
'ErrorResponse',
'ErrorResponseBody',
'ExtendedLocation',
'FileServiceItems',
'FileServiceProperties',
'FileShare',
'FileShareItem',
'FileShareItems',
'GeoReplicationStats',
'IPRule',
'Identity',
'ImmutabilityPolicy',
'ImmutabilityPolicyProperties',
'KeyCreationTime',
'KeyPolicy',
'KeyVaultProperties',
'LastAccessTimeTrackingPolicy',
'LeaseContainerRequest',
'LeaseContainerResponse',
'LegalHold',
'LegalHoldProperties',
'ListAccountSasResponse',
'ListBlobInventoryPolicy',
'ListContainerItem',
'ListContainerItems',
'ListQueue',
'ListQueueResource',
'ListQueueServices',
'ListServiceSasResponse',
'ListTableResource',
'ListTableServices',
'ManagementPolicy',
'ManagementPolicyAction',
'ManagementPolicyBaseBlob',
'ManagementPolicyDefinition',
'ManagementPolicyFilter',
'ManagementPolicyRule',
'ManagementPolicySchema',
'ManagementPolicySnapShot',
'ManagementPolicyVersion',
'MetricSpecification',
'Multichannel',
'NetworkRuleSet',
'ObjectReplicationPolicies',
'ObjectReplicationPolicy',
'ObjectReplicationPolicyFilter',
'ObjectReplicationPolicyRule',
'Operation',
'OperationDisplay',
'OperationListResult',
'PrivateEndpoint',
'PrivateEndpointConnection',
'PrivateEndpointConnectionListResult',
'PrivateLinkResource',
'PrivateLinkResourceListResult',
'PrivateLinkServiceConnectionState',
'ProtocolSettings',
'ProxyResource',
'QueueServiceProperties',
'Resource',
'ResourceAccessRule',
'RestorePolicyProperties',
'Restriction',
'RoutingPreference',
'SKUCapability',
'SasPolicy',
'ServiceSasParameters',
'ServiceSpecification',
'Sku',
'SkuInformation',
'SmbSetting',
'StorageAccount',
'StorageAccountCheckNameAvailabilityParameters',
'StorageAccountCreateParameters',
'StorageAccountInternetEndpoints',
'StorageAccountKey',
'StorageAccountListKeysResult',
'StorageAccountListResult',
'StorageAccountMicrosoftEndpoints',
'StorageAccountRegenerateKeyParameters',
'StorageAccountUpdateParameters',
'StorageQueue',
'StorageSkuListResult',
'SystemData',
'Table',
'TableServiceProperties',
'TagFilter',
'TagProperty',
'TrackedResource',
'UpdateHistoryProperty',
'Usage',
'UsageListResult',
'UsageName',
'UserAssignedIdentity',
'VirtualNetworkRule',
'AccessTier',
'AccountStatus',
'BlobInventoryPolicyName',
'BlobRestoreProgressStatus',
'Bypass',
'CorsRuleAllowedMethodsItem',
'CreatedByType',
'DefaultAction',
'DirectoryServiceOptions',
'EnabledProtocols',
'EncryptionScopeSource',
'EncryptionScopeState',
'ExpirationAction',
'ExtendedLocationTypes',
'GeoReplicationStatus',
'HttpProtocol',
'IdentityType',
'ImmutabilityPolicyState',
'ImmutabilityPolicyUpdateType',
'InventoryRuleType',
'KeyPermission',
'KeySource',
'KeyType',
'Kind',
'LargeFileSharesState',
'LeaseContainerRequestAction',
'LeaseDuration',
'LeaseState',
'LeaseStatus',
'ListContainersInclude',
'ListSharesExpand',
'ManagementPolicyName',
'MinimumTlsVersion',
'Name',
'Permissions',
'PrivateEndpointConnectionProvisioningState',
'PrivateEndpointServiceConnectionStatus',
'ProvisioningState',
'PublicAccess',
'PutSharesExpand',
'Reason',
'ReasonCode',
'RootSquashType',
'RoutingChoice',
'RuleType',
'Services',
'ShareAccessTier',
'SignedResource',
'SignedResourceTypes',
'SkuName',
'SkuTier',
'State',
'StorageAccountExpand',
'UsageUnit',
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk() | [
"[email protected]"
]
| |
8cb2376ed52ba4138dc95464f109798211500d6a | 4d9b71dc822dd62cade383629ea8ef469d2e83ae | /planning/SpCoNavi0.1.py | d05de2b52e4530add0ef3afd16f9a86a6519b889 | [
"MIT"
]
| permissive | sunnySKYwhy/SpCoNavi | cb2eaded8de5c0d5ec254d415dcc3418783db7f1 | 88edac8b204ad58380a00685f7d5159d5d937271 | refs/heads/master | 2023-03-19T23:52:29.411030 | 2020-02-19T11:57:54 | 2020-02-19T11:57:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,383 | py | #coding:utf-8
###########################################################
# SpCoNavi: Spatial Concept-based Path-Planning Program (開発中)
# Akira Taniguchi 2018/12/13-2019/3/10-
###########################################################
##########---遂行タスク---##########
#テスト実行・デバッグ
#ムダの除去・さらなる高速化
##########---作業終了タスク---##########
##文字コードをsjisのままにした
##現状、Xtは2次元(x,y)として計算(角度(方向)θは考慮しない)
##配列はlistかnumpy.arrayかを注意
##地図が大きいとメモリを大量に消費する・処理が重くなる恐れがある
##状態遷移確率(動作モデル)は確定モデルで近似計算する
##range() -> xrange()
##numbaのjitで高速化(?)and並列化(?)
##PathはROSの座標系と2次元配列上のインデックスの両方を保存する
##ViterbiPathの計算でlogを使う:PathWeightMapは確率で計算・保存、Transitionはlogで計算・保存する
##事前計算できるものはできるだけファイル読み込みする形にもできるようにした
###(単語辞書生成、単語認識結果(N-best)、事前計算可能な確率値、Transition(T_horizonごとに保持)、・・・)
##Viterbiの計算処理をTransitionをそのまま使わないように変更した(ムダが多く、メモリ消費・処理時間がかかる要因)
##Viterbiのupdate関数を一部numpy化(高速化)
#sum_i_GaussMultiがnp.arrayになっていなかった(?)⇒np.array化したが計算上変わらないはず (2019/02/17)⇒np.arrayにすると、numbaがエラーを吐くため元に戻した.
###未確認・未使用
#pi_2_pi
#Prob_Triangular_distribution_pdf
#Motion_Model_Odometry
#Motion_Model_Odometry_No_theta
###確認済み
#ReadParameters
#ReadSpeech
#SpeechRecognition
#WordDictionaryUpdate2
#SavePath
#SaveProbMap
#ReadMap
#ReadCostMap
#PathPlanner
#ViterbiPath
##########---保留---##########
#状態遷移確率(動作モデル)を確率モデルで計算する実装
#状態数の削減のための近似手法の実装
#並列処理
#SendPath
#SendProbMap
#PathDistance
#PostProbXt
##############################################
import os
import sys
import glob
import time
import random
import numpy as np
import scipy as sp
#from numpy.random import multinomial #,uniform #,dirichlet
from scipy.stats import multivariate_normal,multinomial #,t,invwishart,rv_discrete
#from numpy.linalg import inv, cholesky
from math import pi as PI
from math import cos,sin,sqrt,exp,log,degrees,radians,atan2 #,gamma,lgamma,fabs,fsum
from __init__ import *
from JuliusNbest_dec import *
from submodules import *
from numba import jit, njit, prange
from scipy.io import mmwrite, mmread
from scipy.sparse import lil_matrix, csr_matrix
from itertools import izip
#マップを読み込む⇒確率値に変換⇒2次元配列に格納
def ReadMap(outputfile):
#outputfolder + trialname + navigation_folder + map.csv
gridmap = np.loadtxt(outputfile + "map.csv", delimiter=",")
print "Read map: " + outputfile + "map.csv"
return gridmap
#コストマップを読み込む⇒確率値に変換⇒2次元配列に格納
def ReadCostMap(outputfile):
#outputfolder + trialname + navigation_folder + contmap.csv
costmap = np.loadtxt(outputfile + "costmap.csv", delimiter=",")
print "Read costmap: " + outputfile + "contmap.csv"
return costmap
#場所概念の学習済みパラメータを読み込む
def ReadParameters(particle_num, filename):
#THETA = [W,W_index,Mu,Sig,Pi,Phi_l,K,L]
r = particle_num
i = 0
for line in open(filename + 'index' + str(r) + '.csv', 'r'): ##読み込む
itemList = line[:-1].split(',')
#print itemList
if (i == 0):
L = len(itemList) -1
elif (i == 1):
K = len(itemList) -1
i += 1
print "L:",L,"K:",K
W_index = []
i = 0
#テキストファイルを読み込み
for line in open(filename + 'W_list' + str(r) + '.csv', 'r'):
itemList = line[:-1].split(',')
if(i == 0):
for j in xrange(len(itemList)):
if (itemList[j] != ""):
W_index = W_index + [itemList[j]]
i = i + 1
#####パラメータW、μ、Σ、φ、πを入力する#####
Mu = [ np.array([ 0.0, 0.0 ]) for i in xrange(K) ] #[ np.array([[ 0.0 ],[ 0.0 ]]) for i in xrange(K) ] #位置分布の平均(x,y)[K]
Sig = [ np.array([ [0.0, 0.0],[0.0, 0.0] ]) for i in xrange(K) ] #位置分布の共分散(2×2次元)[K]
W = [ [0.0 for j in xrange(len(W_index))] for c in xrange(L) ] #場所の名前(多項分布:W_index次元)[L]
#theta = [ [0.0 for j in xrange(DimImg)] for c in xrange(L) ]
Pi = [ 0.0 for c in xrange(L)] #場所概念のindexの多項分布(L次元)
Phi_l = [ [0.0 for i in xrange(K)] for c in xrange(L) ] #位置分布のindexの多項分布(K次元)[L]
i = 0
##Muの読み込み
for line in open(filename + 'mu' + str(r) + '.csv', 'r'):
itemList = line[:-1].split(',')
Mu[i] = np.array([ float(itemList[0]) , float(itemList[1]) ])
#Mu[i] = np.array([[ float(itemList[0]) ],[ float(itemList[1]) ]])
i = i + 1
i = 0
##Sigの読み込み
for line in open(filename + 'sig' + str(r) + '.csv', 'r'):
itemList = line[:-1].split(',')
Sig[i] = np.array([[ float(itemList[0]), float(itemList[1]) ], [ float(itemList[2]), float(itemList[3]) ]])
i = i + 1
##phiの読み込み
c = 0
#テキストファイルを読み込み
for line in open(filename + 'phi' + str(r) + '.csv', 'r'):
itemList = line[:-1].split(',')
for i in xrange(len(itemList)):
if itemList[i] != "":
Phi_l[c][i] = float(itemList[i])
c = c + 1
##Piの読み込み
for line in open(filename + 'pi' + str(r) + '.csv', 'r'):
itemList = line[:-1].split(',')
for i in xrange(len(itemList)):
if itemList[i] != '':
Pi[i] = float(itemList[i])
##Wの読み込み
c = 0
#テキストファイルを読み込み
for line in open(filename + 'W' + str(r) + '.csv', 'r'):
itemList = line[:-1].split(',')
for i in xrange(len(itemList)):
if itemList[i] != '':
#print c,i,itemList[i]
W[c][i] = float(itemList[i])
c = c + 1
"""
##thetaの読み込み
c = 0
#テキストファイルを読み込み
for line in open(filename + 'theta' + str(r) + '.csv', 'r'):
itemList = line[:-1].split(',')
for i in xrange(len(itemList)):
if itemList[i] != '':
#print c,i,itemList[i]
theta[c][i] = float(itemList[i])
c = c + 1
"""
THETA = [W,W_index,Mu,Sig,Pi,Phi_l,K,L]
return THETA
#音声ファイルを読み込み
def ReadSpeech(num):
# wavファイルを指定
files = glob.glob(speech_folder_go)
files.sort()
speech_file = files[num]
return speech_file
#音声データを受け取り、音声認識を行う⇒文字列配列を渡す・保存
def SpeechRecognition(speech_file, W_index, step, trialname, outputfile):
##学習した単語辞書を用いて音声認識し、BoWを得る
St = RecogNbest( speech_file, step, trialname )
#print St
Otb_B = [0 for i in xrange(len(W_index))] #[[] for j in xrange(len(St))]
for j in xrange(len(St)):
for i in xrange(5):
St[j] = St[j].replace("<s>", "")
St[j] = St[j].replace("</s>", "")
St[j] = St[j].replace(" <s> ", "")
St[j] = St[j].replace("<sp>", "")
St[j] = St[j].replace(" </s>", "")
St[j] = St[j].replace(" ", " ")
St[j] = St[j].replace("\n", "")
print j,St[j]
Otb = St[j].split(" ")
for j2 in xrange(len(Otb)):
#print n,j,len(Otb_Samp[r][n])
for i in xrange(len(W_index)):
#print W_index[i].decode('sjis'),Otb[j]
if (W_index[i].decode('sjis') == Otb[j2] ): #'utf8'
Otb_B[i] = Otb_B[i] + 1
#print W_index[i].decode('sjis'),Otb[j]
print Otb_B
# 認識結果をファイル保存
f = open( outputfile + "N"+str(N_best)+"G"+str(speech_num) + "_St.csv" , "w") # , "sjis" )
for i in xrange(len(St)):
f.write(St[i].encode('sjis'))
f.write('\n')
f.close()
return Otb_B
#角度を[-π,π]に変換(参考:https://github.com/AtsushiSakai/PythonRobotics)
def pi_2_pi(angle):
return (angle + PI) % (2 * PI) - PI
#三角分布の確率密度関数
def Prob_Triangular_distribution_pdf(a,b):
prob = max( 0, ( 1 / (sqrt(6)*b) ) - ( abs(a) / (6*(b**2)) ) )
return prob
#確率分布の選択
def Motion_Model_Prob(a,b):
if (MotionModelDist == "Gauss"):
p = multivariate_normal.pdf(a, 0, b)
elif (MotionModelDist == "Triangular"):
p = Prob_Triangular_distribution_pdf(a, b)
return p
#オドメトリ動作モデル(確率ロボティクスp.122) #現状、不使用
def Motion_Model_Odometry(xt,ut,xt_1):
#ut = (xt_1_bar, xt_bar), xt_1_bar = (x_bar, y_bar, theta_bar), xt_bar = (x_dash_bar, y_dash_bar, theta_dash_bar)
x_dash, y_dash, theta_dash = xt
x, y, theta = xt_1
xt_1_bar, xt_bar = ut
x_dash_bar, y_dash_bar, theta_dash_bar = xt_bar
x_bar, y_bar, theta_bar = xt_1_bar
delta_rot1 = atan2(y_dash_bar - y_bar, x_dash_bar - x_bar) - theta_bar
delta_trans = sqrt( (x_dash_bar - x_bar)**2 + (y_dash_bar - y_bar)**2 )
delta_rot2 = theta_dash_bar - theta_bar - delta_rot1
delta_rot1_hat = atan2(y_dash - y, x_dash - x) - theta
delta_trans_hat = sqrt( (x_dash - x)**2 + (y_dash - y)**2 )
delta_rot2_hat = theta_dash - theta - delta_rot1_hat
p1 = Motion_Model_Prob(pi_2_pi(delta_rot1 - delta_rot1_hat), odom_alpha1*(delta_rot1_hat**2) + odom_alpha2*(delta_trans_hat**2))
p2 = Motion_Model_Prob(delta_trans - delta_trans_hat, odom_alpha3*(delta_trans_hat**2) + odom_alpha4*(delta_rot1_hat**2+delta_rot2_hat**2))
p3 = Motion_Model_Prob(pi_2_pi(delta_rot2 - delta_rot2_hat), odom_alpha1*(delta_rot2_hat**2) + odom_alpha2*(delta_trans_hat**2))
return p1*p2*p3
#オドメトリ動作モデル(簡略版) #角度は考慮せず、移動量に応じて確率が決まる(ドーナツ型分布)
def Motion_Model_Odometry_No_theta(xt,ut,xt_1):
#ut = (xt_1_bar, xt_bar), xt_1_bar = (x_bar, y_bar), xt_bar = (x_dash_bar, y_dash_bar)
#utは相対的な位置関係で良い
x_dash, y_dash = xt
x, y = xt_1
delta_trans = cmd_vel #sqrt( (x_dash_bar - x_bar)**2 + (y_dash_bar - y_bar)**2 )
delta_trans_hat = sqrt( (x_dash - x)**2 + (y_dash - y)**2 )
p2 = Motion_Model_Prob( delta_trans - delta_trans_hat, odom_alpha3*(delta_trans_hat**2) )
return p2 #p1*p2*p3
#動作モデル(独自) #角度は考慮せず、移動先位置に応じて確率が決まる(ガウス分布)
def Motion_Model_Original(xt,ut,xt_1):
xt = np.array(xt)
#ut = np.array(ut)
xt_1 = np.array(xt_1)
dist = np.sum((xt-xt_1)**2)
px = Motion_Model_Prob( xt[0] - (xt_1[0]+ut[0]), odom_alpha3*dist )
py = Motion_Model_Prob( xt[1] - (xt_1[1]+ut[1]), odom_alpha3*dist )
return px*py
#ROSの地図座標系をPython内の2次元配列のインデックス番号に対応付ける
def Map_coordinates_To_Array_index(X):
X = np.array(X)
Index = np.round( (X - origin) / resolution ).astype(int) #四捨五入してint型にする
return Index
#Python内の2次元配列のインデックス番号からROSの地図座標系への変換
def Array_index_To_Map_coordinates(Index):
Index = np.array(Index)
X = np.array( (Index * resolution) + origin )
return X
#gridmap and costmap から確率の形のCostMapProbを得ておく
@jit(parallel=True)
def CostMapProb_jit(gridmap, costmap):
CostMapProb = (100.0 - costmap) / 100.0 #コストマップを確率の形にする
#gridの数値が0(非占有)のところだけ数値を持つようにマスクする
GridMapProb = 1*(gridmap == 0) #gridmap * (gridmap != 100) * (gridmap != -1) #gridmap[][]が障害物(100)または未探索(-1)であれば確率0にする
return CostMapProb * GridMapProb
#@jit(nopython=True, parallel=True)
@jit(parallel=True) #並列化されていない?1CPUだけ使用される
def PostProbMap_jit(CostMapProb,Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K):
PostProbMap = np.zeros((map_length,map_width))
#愚直な実装(for文の多用)
#memo: np.vectorize or np.frompyfunc の方が処理は早い?
for length in prange(map_length):
for width in prange(map_width):
if (CostMapProb[length][width] != 0.0): #(gridmap[length][width] != -1) and (gridmap[length][width] != 100): #gridmap[][]が障害物(100)または未探索(-1)であれば計算を省く
X_temp = Array_index_To_Map_coordinates([width, length]) #地図と縦横の座標系の軸が合っているか要確認
#print X_temp,Mu
sum_i_GaussMulti = [ np.sum([multivariate_normal.pdf(X_temp, mean=Mu[k], cov=Sig[k]) * Phi_l[c][k] for k in xrange(K)]) for c in xrange(L) ]
#sum_c_ProbCtsum_i = np.sum( LookupTable_ProbCt * sum_i_GaussMulti )
PostProbMap[length][width] = np.sum( LookupTable_ProbCt * sum_i_GaussMulti ) #sum_c_ProbCtsum_i
return CostMapProb * PostProbMap
@jit(parallel=True)
def PostProb_ij(Index_temp,Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K):
if (CostMapProb[Index_temp[1]][Index_temp[0]] != 0.0):
X_temp = Array_index_To_Map_coordinates(Index_temp) #地図と縦横の座標系の軸が合っているか要確認
#print X_temp,Mu
sum_i_GaussMulti = [ np.sum([multivariate_normal.pdf(X_temp, mean=Mu[k], cov=Sig[k]) * Phi_l[c][k] for k in xrange(K)]) for c in xrange(L) ] ##########np.array( ) !!! np.arrayにすると、numbaがエラーを吐く
PostProb = np.sum( LookupTable_ProbCt * sum_i_GaussMulti ) #sum_c_ProbCtsum_i
else:
PostProb = 0.0
return PostProb
#@jit(parallel=True) #並列化されていない?1CPUだけ使用される
def PostProbMap_nparray_jit(CostMapProb,Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K): #,IndexMap):
PostProbMap = np.array([ [ PostProb_ij([width, length],Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K) for width in xrange(map_width) ] for length in xrange(map_length) ])
return CostMapProb * PostProbMap
#@jit(nopython=True, parallel=True)
#@jit #(parallel=True) #なぜかエラーが出る
def Transition_log_jit(state_num,IndexMap_one_NOzero,MoveIndex_list):
#Transition = np.ones((state_num,state_num)) * approx_log_zero
Transition = [[approx_log_zero for j in range(state_num)] for i in range(state_num)]
print "Memory OK"
#print IndexMap_one_NOzero
#今、想定している位置1セルと隣接する8セルのみの遷移を考えるようにすればよい
for n in prange(state_num):
#Index_2D = IndexMap_one_NOzero[n] #.tolist()
MoveIndex_list_n = MoveIndex_list + IndexMap_one_NOzero[n] #.tolist() #Index_2D #絶対座標系にする
MoveIndex_list_n_list = MoveIndex_list_n.tolist()
for c in prange(len(MoveIndex_list_n_list)):
#print c
if (MoveIndex_list_n_list[c] in IndexMap_one_NOzero):
m = IndexMap_one_NOzero.index(MoveIndex_list_n_list[c]) #cは移動可能な状態(セル)とは限らない
Transition[n][m] = 0.0 #1 #このインデックスは状態から状態への繊維確率(地図のx,yではない)
# print n,m,c
return Transition
def Transition_sparse_jit(state_num,IndexMap_one_NOzero,MoveIndex_list):
Transition = lil_matrix((state_num,state_num)) #[[0 for j in range(state_num)] for i in range(state_num)])
print "Memory OK"
#今、想定している位置1セルと隣接する8セルのみの遷移を考えるようにすればよい
for n in xrange(state_num):
#Index_2D = IndexMap_one_NOzero[n] #.tolist()
MoveIndex_list_n = MoveIndex_list + IndexMap_one_NOzero[n] #.tolist() #Index_2D #絶対座標系にする
MoveIndex_list_n_list = MoveIndex_list_n.tolist()
for c in xrange(len(MoveIndex_list_n_list)):
if (MoveIndex_list_n_list[c] in IndexMap_one_NOzero): #try:
m = IndexMap_one_NOzero.index(MoveIndex_list_n_list[c]) #cは移動可能な状態(セル)とは限らない
Transition[n,m] = 1 #このインデックスは状態から状態への繊維確率(地図のx,yではない)
# print n,m,c
#Transition_csr = Transition.tocsr()
#print "Transformed sparse csr format OK"
return Transition.tocsr() #Transition_csr
#動的計画法によるグローバルパス推定(SpCoNaviの計算)
def PathPlanner(S_Nbest, X_init, THETA, CostMapProb): #gridmap, costmap):
print "[RUN] PathPlanner"
#THETAを展開
W, W_index, Mu, Sig, Pi, Phi_l, K, L = THETA
#ROSの座標系の現在位置を2次元配列のインデックスにする
X_init_index = X_init ###TEST #Map_coordinates_To_Array_index(X_init)
print "Initial Xt:",X_init_index
#MAPの縦横(length and width)のセルの長さを計る
map_length = len(CostMapProb) #len(costmap)
map_width = len(CostMapProb[0]) #len(costmap[0])
print "MAP[length][width]:",map_length,map_width
#事前計算できるものはしておく
LookupTable_ProbCt = np.array([multinomial.pmf(S_Nbest, sum(S_Nbest), W[c])*Pi[c] for c in xrange(L)]) #Ctごとの確率分布 p(St|W_Ct)×p(Ct|Pi) の確率値
###SaveLookupTable(LookupTable_ProbCt, outputfile)
###LookupTable_ProbCt = ReadLookupTable(outputfile) #事前計算結果をファイル読み込み(計算する場合と大差ないかも)
print "Please wait for PostProbMap"
output = outputfile + "N"+str(N_best)+"G"+str(speech_num) + "_PathWeightMap.csv"
if (os.path.isfile(output) == False) or (UPDATE_PostProbMap == 1): #すでにファイルがあれば作成しない
#PathWeightMap = PostProbMap_jit(CostMapProb,Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K) #マルチCPUで高速化できるかも #CostMapProb * PostProbMap #後の処理のために、この時点ではlogにしない
PathWeightMap = PostProbMap_nparray_jit(CostMapProb,Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K) #,IndexMap)
#[TEST]計算結果を先に保存
SaveProbMap(PathWeightMap, outputfile)
else:
PathWeightMap = ReadProbMap(outputfile)
#print "already exists:", output
print "[Done] PathWeightMap."
#[メモリ・処理の軽減]初期位置のセルからT_horizonよりも離れた位置のセルをすべて2次元配列から消す([(2*T_horizon)+1][(2*T_horizon)+1]の配列になる)
Bug_removal_savior = 0 #座標変換の際にバグを生まないようにするためのフラグ
x_min = X_init_index[0] - T_horizon
x_max = X_init_index[0] + T_horizon
y_min = X_init_index[1] - T_horizon
y_max = X_init_index[1] + T_horizon
if (x_min>=0 and x_max<=map_width and y_min>=0 and y_max<=map_length):
PathWeightMap = PathWeightMap[x_min:x_max+1, y_min:y_max+1] # X[-T+I[0]:T+I[0],-T+I[1]:T+I[1]]
X_init_index = [T_horizon, T_horizon]
#再度、MAPの縦横(length and width)のセルの長さを計る
map_length = len(PathWeightMap)
map_width = len(PathWeightMap[0])
else:
print "[WARNING] The initial position (or init_pos +/- T_horizon) is outside the map."
Bug_removal_savior = 1 #バグを生まない(1)
#print X_init, X_init_index
#計算量削減のため状態数を減らす(状態空間を一次元配列にする⇒0の要素を除く)
#PathWeight = np.ravel(PathWeightMap)
PathWeight_one_NOzero = PathWeightMap[PathWeightMap!=0.0]
state_num = len(PathWeight_one_NOzero)
print "PathWeight_one_NOzero state_num:", state_num
#地図の2次元配列インデックスと一次元配列の対応を保持する
IndexMap = np.array([[(i,j) for j in xrange(map_width)] for i in xrange(map_length)])
IndexMap_one_NOzero = IndexMap[PathWeightMap!=0.0].tolist() #先にリスト型にしてしまう #実装上、np.arrayではなく2次元配列リストにしている
print "IndexMap_one_NOzero"
#1次元配列上の初期位置
if (X_init_index in IndexMap_one_NOzero):
X_init_index_one = IndexMap_one_NOzero.index(X_init_index)
else:
print "[ERROR] The initial position is not a movable position on the map."
#print X_init, X_init_index
X_init_index_one = 0
print "Initial index", X_init_index_one
#移動先候補のインデックス座標のリスト(相対座標)
MoveIndex_list = MovePosition_2D([0,0]) #.tolist()
#MoveIndex_list = np.round(MovePosition(X_init_index)).astype(int)
print "MoveIndex_list"
"""
#状態遷移確率(動作モデル)の計算
print "Please wait for Transition"
output_transition = outputfile + "T"+str(T_horizon) + "_Transition_sparse.mtx" # + "_Transition_log.csv"
if (os.path.isfile(output_transition) == False): #すでにファイルがあれば作成しない
#IndexMap_one_NOzero内の2次元配列上のインデックスと一致した要素のみ確率1を持つようにする
#Transition = Transition_log_jit(state_num,IndexMap_one_NOzero,MoveIndex_list)
Transition = Transition_sparse_jit(state_num,IndexMap_one_NOzero,MoveIndex_list)
#[TEST]計算結果を先に保存
#SaveTransition(Transition, outputfile)
SaveTransition_sparse(Transition, outputfile)
else:
Transition = ReadTransition_sparse(state_num, outputfile) #ReadTransition(state_num, outputfile)
#print "already exists:", output_transition
Transition_one_NOzero = Transition #[PathWeightMap!=0.0]
print "[Done] Transition distribution."
"""
#Viterbi Algorithmを実行
Path_one = ViterbiPath(X_init_index_one, np.log(PathWeight_one_NOzero), state_num,IndexMap_one_NOzero,MoveIndex_list, outputname, X_init, Bug_removal_savior) #, Transition_one_NOzero)
#1次元配列のインデックスを2次元配列のインデックスへ⇒ROSの座標系にする
Path_2D_index = np.array([ IndexMap_one_NOzero[Path_one[i]] for i in xrange(len(Path_one)) ])
if ( Bug_removal_savior == 0):
Path_2D_index_original = Path_2D_index + np.array(X_init) - T_horizon
else:
Path_2D_index_original = Path_2D_index
Path_ROS = Array_index_To_Map_coordinates(Path_2D_index_original) #ROSのパスの形式にできればなおよい
#Path = Path_2D_index_original #Path_ROS #必要な方をPathとして返す
print "Init:", X_init
print "Path:\n", Path_2D_index_original
return Path_2D_index_original, Path_ROS, PathWeightMap
#移動位置の候補:現在の位置(2次元配列のインデックス)の近傍8セル+現在位置1セル
def MovePosition_2D(Xt):
PostPosition_list = np.array([ [-1,-1],[-1,0],[-1,1], [0,-1],[0,0], [0,1], [1,-1],[1,0],[1,1] ])*cmd_vel + np.array(Xt)
return PostPosition_list
#Viterbi Path計算用関数(参考:https://qiita.com/kkdd/items/6cbd949d03bc56e33e8e)
#@jit(parallel=True)
def update(cost, trans, emiss):
COST = 0 #COST, INDEX = range(2) #0,1
arr = [c[COST]+t for c, t in zip(cost, trans)]
max_arr = max(arr)
#print max_arr + emiss, arr.index(max_arr)
return max_arr + emiss, arr.index(max_arr)
#なぜか重くてTが進まない(不採用)
def update_sparse(cost, trans, emiss):
COST = 0 #COST, INDEX = range(2) #0,1
trans_log = [(trans[0,i]==0)*approx_log_zero for i in xrange(trans.get_shape()[1])] #trans.toarray()
arr = [c[COST]+t for c, t in zip(cost, trans_log)]
#index = [i for i in xrange(trans.get_shape()[1])]
#arr = [c[COST]+np.log(trans[0,t]) for c, t in zip(cost, index)]
max_arr = max(arr)
#print max_arr + emiss, arr.index(max_arr)
return max_arr + emiss, arr.index(max_arr)
@jit #jitはコードによってエラーが出る場合があるので注意
def update_lite(cost, n, emiss, state_num,IndexMap_one_NOzero,MoveIndex_list,Transition):
#Transition = np.array([approx_log_zero for j in prange(state_num)]) #emissのindex番号に応じて、これをつくる処理を入れる
for i in prange(len(Transition)):
Transition[i] = approx_log_zero
#今、想定している位置1セルと隣接する8セルのみの遷移を考えるようにすればよい
#Index_2D = IndexMap_one_NOzero[n] #.tolist()
MoveIndex_list_n = MoveIndex_list + IndexMap_one_NOzero[n] #Index_2D #絶対座標系にする
MoveIndex_list_n_list = MoveIndex_list_n.tolist()
count_t = 0
for c in prange(len(MoveIndex_list_n_list)): #prangeの方がxrangeより速い
if (MoveIndex_list_n_list[c] in IndexMap_one_NOzero):
m = IndexMap_one_NOzero.index(MoveIndex_list_n_list[c]) #cは移動可能な状態(セル)とは限らない
Transition[m] = 0.0 #1 #このインデックスは状態から状態への繊維確率(地図のx,yではない)
count_t += 1
#計算上おかしい場合はエラー表示を出す.
if (count_t == 0): #遷移確率がすべて0.移動できないということを意味する.
print "[ERROR] All transition is approx_log_zero."
elif (count_t == 1): #遷移確率がひとつだけある.移動可能な座標が一択.
print "[WARNING] One transition is zero."
#trans = Transition #np.array(Transition)
arr = cost + Transition #trans
#max_arr = np.max(arr)
max_arr_index = np.argmax(arr)
#return max_arr + emiss, np.where(arr == max_arr)[0][0] #np.argmax(arr)#arr.index(max_arr)
return arr[max_arr_index] + emiss, max_arr_index
#def transition(m, n):
# return [[1.0 for i in xrange(m)] for j in xrange(n)]
#def emission(n):
# return [random.random() for j in xrange(n)]
#ViterbiPathを計算してPath(軌道)を返す
#@jit(parallel=True) #print関係(?)のエラーが出たので一時避難
def ViterbiPath(X_init, PathWeight, state_num,IndexMap_one_NOzero,MoveIndex_list, outputname, X_init_original, Bug_removal_savior): #, Transition):
#Path = [[0,0] for t in xrange(T_horizon)] #各tにおけるセル番号[x,y]
print "Start Viterbi Algorithm"
INDEX = 1 #COST, INDEX = range(2) #0,1
INITIAL = (approx_log_zero, X_init) # (cost, index) #indexに初期値の一次元配列インデックスを入れる
#print "Initial:",X_init
cost = [INITIAL for i in prange(len(PathWeight))]
cost[X_init] = (0.0, X_init) #初期位置は一意に与えられる(確率log(1.0))
trellis = []
e = PathWeight #emission(nstates[i])
m = [i for i in prange(len(PathWeight))] #Transition #transition(nstates[i-1], nstates[i]) #一つ前から現在への遷移
Transition = np.array([approx_log_zero for j in prange(state_num)]) #参照渡しになってしまう
temp = 1
#Forward
print "Forward"
for i in prange(T_horizon): #len(nstates)): #計画区間まで1セルずつ移動していく+1+1
#このfor文の中でiを別途インディケータとして使わないこと
print "T:",i+1
if (i+1 == T_restart):
outputname_restart = outputfile + "T"+str(T_restart)+"N"+str(N_best)+"A"+str(Approx)+"S"+str(init_position_num)+"G"+str(speech_num)
trellis = ReadTrellis(outputname_restart, i+1)
cost = trellis[-1]
if (i+1 >= T_restart):
#cost = [update(cost, t, f) for t, f in zip(m, e)]
#cost = [update_sparse(cost, Transition[t], f) for t, f in zip(m, e)] #なぜか遅い
cost_np = np.array([cost[c][0] for c in prange(len(cost))])
#Transition = np.array([approx_log_zero for j in prange(state_num)]) #参照渡しになってしまう
#cost = [update_lite(cost_np, t, e[t], state_num,IndexMap_one_NOzero,MoveIndex_list) for t in prange(len(e))]
cost = [update_lite(cost_np, t, f, state_num,IndexMap_one_NOzero,MoveIndex_list,Transition) for t, f in izip(m, e)] #izipの方がメモリ効率は良いが、zipとしても処理速度は変わらない
trellis.append(cost)
#print "i", i, [(c[COST], c[INDEX]) for c in cost] #前のノードがどこだったか(どこから来たか)を記録している
if (SAVE_T_temp == temp):
#Backward temp
last = [trellis[-1][j][0] for j in xrange(len(trellis[-1]))]
path_one = [last.index(max(last))] #最終的にいらないが計算上必要⇒最後のノードの最大値インデックスを保持する形でもできるはず
#print "last",last,"max",path
for x in reversed(trellis):
path_one = [x[path_one[0]][INDEX]] + path_one
#print "x", len(x), x
path_one = path_one[1:len(path_one)] #初期位置と処理上追加した最後の遷移を除く
SavePathTemp(X_init_original, path_one, i+1, outputname, IndexMap_one_NOzero, Bug_removal_savior)
if (SAVE_Trellis == 1):
SaveTrellis(trellis, outputname, i+1)
temp = 0
temp += 1
#最後の遷移確率は一様にすればよいはず
e_last = [0.0]
m_last = [[0.0 for i in range(len(PathWeight))]]
cost = [update(cost, t, f) for t, f in zip(m_last, e_last)]
trellis.append(cost)
#Backward
print "Backward"
#last = [trellis[-1][i][0] for i in xrange(len(trellis[-1]))]
path = [0] #[last.index(max(last))] #最終的にいらないが計算上必要⇒最後のノードの最大値インデックスを保持する形でもできるはず
#print "last",last,"max",path
for x in reversed(trellis):
path = [x[path[0]][INDEX]] + path
#print "x", len(x), x
path = path[1:len(path)-1] #初期位置と処理上追加した最後の遷移を除く
print 'Maximum prob path:', path
return path
#推定されたパスを(トピックかサービスで)送る
#def SendPath(Path):
#パスをファイル保存する(形式未定)
def SavePath(X_init, Path, Path_ROS, outputname):
print "PathSave"
if (SAVE_X_init == 1):
# ロボット初期位置をファイル保存(index)
np.savetxt(outputname + "_X_init.csv", X_init, delimiter=",")
# ロボット初期位置をファイル保存(ROS)
np.savetxt(outputname + "_X_init_ROS.csv", Array_index_To_Map_coordinates(X_init), delimiter=",")
# 結果をファイル保存(index)
np.savetxt(outputname + "_Path.csv", Path, delimiter=",")
# 結果をファイル保存(ROS)
np.savetxt(outputname + "_Path_ROS.csv", Path_ROS, delimiter=",")
print "Save Path: " + outputname + "_Path.csv and _Path_ROS.csv"
#パスをファイル保存する(形式未定)
def SavePathTemp(X_init, Path_one, temp, outputname, IndexMap_one_NOzero, Bug_removal_savior):
print "PathSaveTemp"
#1次元配列のインデックスを2次元配列のインデックスへ⇒ROSの座標系にする
Path_2D_index = np.array([ IndexMap_one_NOzero[Path_one[i]] for i in xrange(len(Path_one)) ])
if ( Bug_removal_savior == 0):
Path_2D_index_original = Path_2D_index + np.array(X_init) - T_horizon
else:
Path_2D_index_original = Path_2D_index
Path_ROS = Array_index_To_Map_coordinates(Path_2D_index_original) #
#Path = Path_2D_index_original #Path_ROS #必要な方をPathとして返す
# 結果をファイル保存(index)
np.savetxt(outputname + "_Path" + str(temp) + ".csv", Path_2D_index_original, delimiter=",")
# 結果をファイル保存(ROS)
np.savetxt(outputname + "_Path_ROS" + str(temp) + ".csv", Path_ROS, delimiter=",")
print "Save Path: " + outputname + "_Path" + str(temp) + ".csv and _Path_ROS" + str(temp) + ".csv"
def SaveTrellis(trellis, outputname, temp):
print "SaveTrellis"
# 結果をファイル保存
np.save(outputname + "_trellis" + str(temp) + ".npy", trellis) #, delimiter=",")
print "Save trellis: " + outputname + "_trellis" + str(temp) + ".npy"
def ReadTrellis(outputname, temp):
print "ReadTrellis"
# 結果をファイル保存
trellis = np.load(outputname + "_trellis" + str(temp) + ".npy") #, delimiter=",")
print "Read trellis: " + outputname + "_trellis" + str(temp) + ".npy"
return trellis
#パス計算のために使用したLookupTable_ProbCtをファイル保存する
def SaveLookupTable(LookupTable_ProbCt, outputfile):
# 結果をファイル保存
output = outputfile + "LookupTable_ProbCt.csv"
np.savetxt( output, LookupTable_ProbCt, delimiter=",")
print "Save LookupTable_ProbCt: " + output
#パス計算のために使用したLookupTable_ProbCtをファイル読み込みする
def ReadLookupTable(outputfile):
# 結果をファイル読み込み
output = outputfile + "LookupTable_ProbCt.csv"
LookupTable_ProbCt = np.loadtxt(output, delimiter=",")
print "Read LookupTable_ProbCt: " + output
return LookupTable_ProbCt
#パス計算のために使用した確率値コストマップをファイル保存する
def SaveCostMapProb(CostMapProb, outputfile):
# 結果をファイル保存
output = outputfile + "CostMapProb.csv"
np.savetxt( output, CostMapProb, delimiter=",")
print "Save CostMapProb: " + output
#パス計算のために使用した確率値コストマップをファイル読み込みする
def ReadCostMapProb(outputfile):
# 結果をファイル読み込み
output = outputfile + "CostMapProb.csv"
CostMapProb = np.loadtxt(output, delimiter=",")
print "Read CostMapProb: " + output
return CostMapProb
#パス計算のために使用した確率値マップを(トピックかサービスで)送る
#def SendProbMap(PathWeightMap):
#パス計算のために使用した確率値マップをファイル保存する
def SaveProbMap(PathWeightMap, outputfile):
# 結果をファイル保存
output = outputfile + "N"+str(N_best)+"G"+str(speech_num) + "_PathWeightMap.csv"
np.savetxt( output, PathWeightMap, delimiter=",")
print "Save PathWeightMap: " + output
#パス計算のために使用した確率値マップをファイル読み込みする
def ReadProbMap(outputfile):
# 結果をファイル読み込み
output = outputfile + "N"+str(N_best)+"G"+str(speech_num) + "_PathWeightMap.csv"
PathWeightMap = np.loadtxt(output, delimiter=",")
print "Read PathWeightMap: " + output
return PathWeightMap
def SaveTransition(Transition, outputfile):
# 結果をファイル保存
output_transition = outputfile + "T"+str(T_horizon) + "_Transition_log.csv"
#np.savetxt(outputfile + "_Transition_log.csv", Transition, delimiter=",")
f = open( output_transition , "w")
for i in xrange(len(Transition)):
for j in xrange(len(Transition[i])):
f.write(str(Transition[i][j]) + ",")
f.write('\n')
f.close()
print "Save Transition: " + output_transition
def ReadTransition(state_num, outputfile):
Transition = [[approx_log_zero for j in xrange(state_num)] for i in xrange(state_num)]
# 結果をファイル読み込み
output_transition = outputfile + "T"+str(T_horizon) + "_Transition_log.csv"
#Transition = np.loadtxt(outputfile + "_Transition_log.csv", delimiter=",")
i = 0
#テキストファイルを読み込み
for line in open(output_transition, 'r'):
itemList = line[:-1].split(',')
for j in xrange(len(itemList)):
if itemList[j] != '':
Transition[i][j] = float(itemList[j])
i = i + 1
print "Read Transition: " + output_transition
return Transition
def SaveTransition_sparse(Transition, outputfile):
# 結果をファイル保存(.mtx形式)
output_transition = outputfile + "T"+str(T_horizon) + "_Transition_sparse"
mmwrite(output_transition, Transition)
print "Save Transition: " + output_transition
def ReadTransition_sparse(state_num, outputfile):
#Transition = [[0 for j in xrange(state_num)] for i in xrange(state_num)]
# 結果をファイル読み込み
output_transition = outputfile + "T"+str(T_horizon) + "_Transition_sparse.mtx"
Transition = mmread(output_transition).tocsr() #.todense()
print "Read Transition: " + output_transition
return Transition
##単語辞書読み込み書き込み追加
def WordDictionaryUpdate2(step, filename, W_list):
LIST = []
LIST_plus = []
i_best = len(W_list)
hatsuon = [ "" for i in xrange(i_best) ]
TANGO = []
##単語辞書の読み込み
for line in open('./lang_m/' + lang_init, 'r'):
itemList = line[:-1].split(' ')
LIST = LIST + [line]
for j in xrange(len(itemList)):
itemList[j] = itemList[j].replace("[", "")
itemList[j] = itemList[j].replace("]", "")
TANGO = TANGO + [[itemList[1],itemList[2]]]
#print TANGO
if (1):
##W_listの単語を順番に処理していく
for c in xrange(i_best): # i_best = len(W_list)
#W_list_sj = unicode(MI_best[c][i], encoding='shift_jis')
W_list_sj = unicode(W_list[c], encoding='shift_jis')
if len(W_list_sj) != 1: ##1文字は除外
#for moji in xrange(len(W_list_sj)):
moji = 0
while (moji < len(W_list_sj)):
flag_moji = 0
#print len(W_list_sj),str(W_list_sj),moji,W_list_sj[moji]#,len(unicode(W_list[i], encoding='shift_jis'))
for j in xrange(len(TANGO)):
if (len(W_list_sj)-2 > moji) and (flag_moji == 0):
#print TANGO[j],j
#print moji
if (unicode(TANGO[j][0], encoding='shift_jis') == W_list_sj[moji]+"_"+W_list_sj[moji+2]) and (W_list_sj[moji+1] == "_"):
###print moji,j,TANGO[j][0]
hatsuon[c] = hatsuon[c] + TANGO[j][1]
moji = moji + 3
flag_moji = 1
for j in xrange(len(TANGO)):
if (len(W_list_sj)-1 > moji) and (flag_moji == 0):
#print TANGO[j],j
#print moji
if (unicode(TANGO[j][0], encoding='shift_jis') == W_list_sj[moji]+W_list_sj[moji+1]):
###print moji,j,TANGO[j][0]
hatsuon[c] = hatsuon[c] + TANGO[j][1]
moji = moji + 2
flag_moji = 1
#print len(W_list_sj),moji
for j in xrange(len(TANGO)):
if (len(W_list_sj) > moji) and (flag_moji == 0):
#else:
if (unicode(TANGO[j][0], encoding='shift_jis') == W_list_sj[moji]):
###print moji,j,TANGO[j][0]
hatsuon[c] = hatsuon[c] + TANGO[j][1]
moji = moji + 1
flag_moji = 1
print W_list_sj,hatsuon[c]
else:
print W_list_sj, "(one name)" #W_list[c]
print JuliusVer,HMMtype
if (JuliusVer == "v4.4" and HMMtype == "DNN"):
#hatsuonのすべての単語の音素表記を"*_I"にする
for i in xrange(len(hatsuon)):
hatsuon[i] = hatsuon[i].replace("_S","_I")
hatsuon[i] = hatsuon[i].replace("_B","_I")
hatsuon[i] = hatsuon[i].replace("_E","_I")
#hatsuonの単語の先頭の音素を"*_B"にする
for i in xrange(len(hatsuon)):
#onsohyoki_index = onsohyoki.find(target)
hatsuon[i] = hatsuon[i].replace("_I","_B", 1)
#hatsuonの単語の最後の音素を"*_E"にする
hatsuon[i] = hatsuon[i][0:-2] + "E "
#hatsuonの単語の音素の例外処理(N,q)
hatsuon[i] = hatsuon[i].replace("q_S","q_I")
hatsuon[i] = hatsuon[i].replace("q_B","q_I")
hatsuon[i] = hatsuon[i].replace("N_S","N_I")
#print type(hatsuon),hatsuon,type("N_S"),"N_S"
##各場所の名前の単語ごとに
meishi = u'名詞'
meishi = meishi.encode('shift-jis')
##単語辞書ファイル生成
fp = open( filename + '/WDnavi.htkdic', 'w')
for list in xrange(len(LIST)):
if (list < 3):
fp.write(LIST[list])
#if (UseLM == 1):
if (1):
##新しい単語を追加
c = 0
for mi in xrange(i_best): # i_best = len(W_list)
if hatsuon[mi] != "":
if ((W_list[mi] in LIST_plus) == False): #同一単語を除外
flag_tango = 0
for j in xrange(len(TANGO)):
if(W_list[mi] == TANGO[j][0]):
flag_tango = -1
if flag_tango == 0:
LIST_plus = LIST_plus + [W_list[mi]]
fp.write(LIST_plus[c] + "+" + meishi +" [" + LIST_plus[c] + "] " + hatsuon[mi])
fp.write('\n')
c = c+1
fp.close()
########################################
if __name__ == '__main__':
print "[START] SpCoNavi."
#学習済みパラメータフォルダ名を要求
trialname = sys.argv[1]
#print trialname
#trialname = raw_input("trialname?(folder) >")
#読み込むパーティクル番号を要求
particle_num = sys.argv[2] #0
#ロボット初期位置の候補番号を要求
init_position_num = sys.argv[3] #0
#音声命令のファイル番号を要求
speech_num = sys.argv[4] #0
i = 0
#重みファイルを読み込み
for line in open(datafolder + trialname + '/'+ str(step) + '/weights.csv', 'r'): ##読み込む
if (i == 0):
MAX_Samp = int(line)
i += 1
#最大尤度のパーティクル番号を保存
particle_num = MAX_Samp
if (SAVE_time == 1):
#開始時刻を保持
start_time = time.time()
##FullPath of folder
filename = datafolder + trialname + "/" + str(step) +"/"
print filename, particle_num
outputfile = outputfolder + trialname + navigation_folder
outputname = outputfile + "T"+str(T_horizon)+"N"+str(N_best)+"A"+str(Approx)+"S"+str(init_position_num)+"G"+str(speech_num)
#Makedir( outputfolder + trialname )
Makedir( outputfile )
#Makedir( outputname )
#学習済みパラメータの読み込み #THETA = [W,W_index,Mu,Sig,Pi,Phi_l,K,L]
THETA = ReadParameters(particle_num, filename)
W_index = THETA[1]
##単語辞書登録
if (os.path.isfile(filename + '/WDnavi.htkdic') == False): #すでに単語辞書ファイルがあれば作成しない
WordDictionaryUpdate2(step, filename, W_index)
else:
print "Word dictionary already exists:", filename + '/WDnavi.htkdic'
if (os.path.isfile(outputfile + "CostMapProb.csv") == False): #すでにファイルがあれば計算しない
##マップの読み込み
gridmap = ReadMap(outputfile)
##コストマップの読み込み
costmap = ReadCostMap(outputfile)
#コストマップを確率の形にする
CostMapProb = CostMapProb_jit(gridmap, costmap)
#確率化したコストマップの書き込み
SaveCostMapProb(CostMapProb, outputfile)
else:
#確率化したコストマップの読み込み
CostMapProb = ReadCostMapProb(outputfile)
##音声ファイルを読み込み
speech_file = ReadSpeech(int(speech_num))
if (SAVE_time == 1):
#音声認識開始時刻(初期化読み込み処理終了時刻)を保持
start_recog_time = time.time()
time_init = start_recog_time - start_time
fp = open( outputname + "_time_init.txt", 'w')
fp.write(str(time_init)+"\n")
fp.close()
#音声認識
S_Nbest = SpeechRecognition(speech_file, W_index, step, trialname, outputfile)
if (SAVE_time == 1):
#音声認識終了時刻(PP開始時刻)を保持
end_recog_time = time.time()
time_recog = end_recog_time - start_recog_time
fp = open( outputname + "_time_recog.txt", 'w')
fp.write(str(time_recog)+"\n")
fp.close()
#パスプランニング
Path, Path_ROS, PathWeightMap = PathPlanner(S_Nbest, X_candidates[int(init_position_num)], THETA, CostMapProb) #gridmap, costmap)
if (SAVE_time == 1):
#PP終了時刻を保持
end_pp_time = time.time()
time_pp = end_pp_time - end_recog_time
fp = open( outputname + "_time_pp.txt", 'w')
fp.write(str(time_pp)+"\n")
fp.close()
#パスの移動距離
#Distance = PathDistance(Path)
#パスを送る
#SendPath(Path)
#パスを保存
SavePath(X_candidates[int(init_position_num)], Path, Path_ROS, outputname)
#確率値マップを送る
#SendProbMap(PathWeightMap)
#確率値マップを保存(PathPlanner内部で実行)
#####SaveProbMap(PathWeightMap, outputname)
print "[END] SpCoNavi."
########################################
| [
"[email protected]"
]
| |
ce0c8512a2373bffac1635858e730b38b204d9dd | 37bc60b070be22a5e22321655c8490df2285b07c | /translate.py | 5f414fdbd164ef00cfcaa2c3eddd47a0378d4518 | []
| no_license | TheWover/DidierStevensSuite | 2ab56d33472a242a5d49359d643c4e669c7a7e04 | 17f08aee76b98f95fc94b4e9c6131786d62b4716 | refs/heads/master | 2020-07-30T01:00:00.497949 | 2019-09-17T18:46:00 | 2019-09-17T18:46:00 | 210,027,232 | 1 | 0 | null | 2019-09-21T17:32:54 | 2019-09-21T17:32:53 | null | UTF-8 | Python | false | false | 27,454 | py | #!/usr/bin/env python
__description__ = 'Translate bytes according to a Python expression'
__author__ = 'Didier Stevens'
__version__ = '2.5.6'
__date__ = '2019/02/26'
"""
Source code put in public domain by Didier Stevens, no Copyright
https://DidierStevens.com
Use at your own risk
No input validation (neither output) is performed by this program: it contains injection vulnerabilities
Developed with Python 2.7, tested with 2.7 and 3.3
History:
2007/08/20: start
2014/02/24: rewrite
2014/02/27: manual
2015/11/04: added option -f
2015/11/05: continue
2016/02/20: added option -r
2016/04/25: 2.3.0 added StdoutWriteChunked() and option -R
2016/09/07: 2.3.1 added option -e
2016/09/09: continue
2016/09/13: man
2017/02/10: 2.4.0 added input filename # support
2017/02/26: fixed Python 3 str vs bytes bug
2017/06/04: 2.5.0 added #e# support
2017/06/16: continued #e# support
2017/07/29: added -2 option
2017/08/09: 2.5.1 #e# chr can take a second argument
2017/09/09: added functions Sani1 and Sani2 to help with input/output sanitization
2018/01/29: 2.5.2 added functions GzipD and ZlibD; and fixed stdin/stdout for Python 3
2018/02/12: 2.5.3 when the Python expression returns None (in stead of a byte value), no byte is written to output.
2018/03/05: 2.5.4 updated #e# expressions
2018/04/27: added option literalfilenames
2019/02/20: 2.5.5 added ZlibRawD
2019/02/26: 2.5.6 updated help
Todo:
"""
import optparse
import sys
import os
import textwrap
import re
import math
import binascii
import random
import zlib
import gzip
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
def PrintManual():
manual = '''
Manual:
Translate.py is a Python script to perform bitwise operations on files (like XOR, ROL/ROR, ...). You specify the bitwise operation to perform as a Python expression, and pass it as a command-line argument.
translate.py malware -o malware.decoded "byte ^ 0x10"
This will read file malware, perform XOR 0x10 on each byte (this is, expressed in Python: byte ^ 0x10), and write the result to file malware.decoded.
byte is a variable containing the current byte from the input file. Your expression has to evaluate to the modified byte. When your expression evaluates to None, no byte will be written to output. This can be used to delete bytes from the input.
For complex manipulation, you can define your own functions in a script file and load this with translate.py, like this:
translate.py malware -o malware.decoded "Process(byte)" process.py
process.py must contain the definition of function Process. Function Process must return the modified byte.
Another variable is also available: position. This variable contains the position of the current byte in the input file, starting from 0.
If only part of the file has to be manipulated, while leaving the rest unchanged, you can do it like this:
def Process(byte):
if position >= 0x10 and position < 0x20:
return byte ^ 0x10
else:
return byte
This example will perform an XOR 0x10 operation from the 17th byte till the 32nd byte included. All other bytes remain unchanged.
Because Python has built-in shift operators (<< and >>) but no rotate operators, I've defined 2 rotate functions that operate on a byte: rol (rotate left) and ror (rotate right). They accept 2 arguments: the byte to rotate and the number of bit positions to rotate. For example, rol(0x01, 2) gives 0x04.
translate.py malware -o malware.decoded "rol(byte, 2)"
Another function I defined is IFF (the IF Function): IFF(expression, valueTrue, valueFalse). This function allows you to write conditional code without an if statement. When expression evaluates to True, IFF returns valueTrue, otherwise it returns valueFalse.
And yet 2 other functions I defined are Sani1 and Sani2. They can help you with input/output sanitization: Sani1 accepts a byte as input and returns the same byte, except if it is a control character. All control characters (except VT, LF and CR) are replaced by a space character (0x20). Sani2 is like Sani1, but sanitizes even more bytes: it sanitizes control characters like Sani1, and also all bytes equal to 0x80 and higher.
translate.py malware -o malware.decoded "IFF(position >= 0x10 and position < 0x20, byte ^ 0x10, byte)"
By default this program translates individual bytes via the provided Python expression. With option -f (fullread), translate.py reads the input file as one byte sequence and passes it to the function specified by the expression. This function needs to take one string as an argument and return one string (the translated file).
Option -r (regex) uses a regular expression to search through the file and then calls the provided function with a match argument for each matched string. The return value of the function (a string) is used to replace the matched string.
Option -R (filterregex) is similar to option -r (regex), except that it does not operate on the complete file, but on the file filtered for the regex.
Here are 2 examples with a regex. The input file (test-ah.txt) contains the following: 1234&H41&H42&H43&H444321
The first command will search for strings &Hxx and replace them with the character represented in ASCII by hexadecimal number xx:
translate.py -r "&H(..)" test-ah.txt "lambda m: chr(int(m.groups()[0], 16))"
Output: 1234ABCD4321
The second command is exactly the same as the first command, except that it uses option -R in stead or -r:
translate.py -R "&H(..)" test-ah.txt "lambda m: chr(int(m.groups()[0], 16))"
Output: ABCD
Option -e (execute) is used to execute Python commands before the command is executed. This can, for example, be used to import modules.
Here is an example to decompress a Flash file (.swf):
translate.py -f -e "import zlib" sample.swf "lambda b: zlib.decompress(b[8:])"
You can use build in function ZlibD too, and ZlibRawD for inflating without header, and GzipD for gzip decompression.
A second file can be used as input with option -2. The value of the current byte of the second input file is stored in variable byte2 (this too advances byte per byte together with the primary input file).
Example:
translate.py -2 #021230 #Scbpbt "byte + byte2 - 0x30"
Output:
Secret
In stead of using an input filename, the content can also be passed in the argument. To achieve this, prefix the text with character #.
If the text to pass via the argument contains control characters or non-printable characters, hexadecimal (#h#) or base64 (#b#) can be used.
Example:
translate.py #h#89B5B4AEFDB4AEFDBCFDAEB8BEAFB8A9FC "byte ^0xDD"
Output:
This is a secret!
File arguments that start with #e# are a notational convention to use expressions to generate data. An expression is a single function/string or the concatenation of several functions/strings (using character + as concatenation operator).
Strings can be characters enclosed by single quotes ('example') or hexadecimal strings prefixed by 0x (0xBEEF).
4 functions are available: random, loremipsum, repeat and chr.
Function random takes exactly one argument: an integer (with value 1 or more). Integers can be specified using decimal notation or hexadecimal notation (prefix 0x).
The random function generates a sequence of bytes with a random value (between 0 and 255), the argument specifies how many bytes need to be generated. Remark that the random number generator that is used is just the Python random number generator, not a cryptographic random number generator.
Example:
tool.py #e#random(100)
will make the tool process data consisting of a sequence of 100 random bytes.
Function loremipsum takes exactly one argument: an integer (with value 1 or more).
The loremipsum function generates "lorem ipsum" text (fake latin), the argument specifies the number of sentences to generate.
Example: #e#loremipsum(2) generates this text:
Ipsum commodo proin pulvinar hac vel nunc dignissim neque eget odio erat magna lorem urna cursus fusce facilisis porttitor congue eleifend taciti. Turpis duis suscipit facilisi tristique dictum praesent natoque sem mi egestas venenatis per dui sit sodales est condimentum habitasse ipsum phasellus non bibendum hendrerit.
Function chr takes one argument or two arguments.
chr with one argument takes an integer between 0 and 255, and generates a single byte with the value specified by the integer.
chr with two arguments takes two integers between 0 and 255, and generates a byte sequence with the values specified by the integers.
For example #e#chr(0x41,0x45) generates data ABCDE.
Function repeat takes two arguments: an integer (with value 1 or more) and a byte sequence. This byte sequence can be a quoted string of characters (single quotes), like 'ABCDE' or an hexadecimal string prefixed with 0x, like 0x4142434445.
The repeat function will create a sequence of bytes consisting of the provided byte sequence (the second argument) repeated as many times as specified by the first argument.
For example, #e#repeat(3, 'AB') generates byte sequence ABABAB.
When more than one function needs to be used, the byte sequences generated by the functions can be concatenated with the + operator.
For example, #e#repeat(10,0xFF)+random(100) will generate a byte sequence of 10 FF bytes followed by 100 random bytes.
To prevent the tool from processing file arguments with wildcard characters or special initial characters (@ and #) differently, but to process them as normal files, use option --literalfilenames.
'''
for line in manual.split('\n'):
print(textwrap.fill(line))
def rol(byte, count):
return (byte << count | byte >> (8- count)) & 0xFF
def ror(byte, count):
return (byte >> count | byte << (8- count)) & 0xFF
#Sanitize 1: Sanitize input: return space (0x20) for all control characters, except HT, LF and CR
def Sani1(byte):
if byte in [0x09, 0x0A, 0x0D]:
return byte
if byte < 0x20:
return 0x20
return byte
#Sanitize 2: Sanitize input: return space (0x20) for all bytes equal to 0x80 and higher, and all control characters, except HT, LF and CR
def Sani2(byte):
if byte in [0x09, 0x0A, 0x0D]:
return byte
if byte < 0x20:
return 0x20
if byte >= 0x80:
return 0x20
return byte
def GzipD(data):
return gzip.GzipFile('', 'r', fileobj=StringIO(data)).read()
def ZlibD(data):
return zlib.decompress(data)
def ZlibRawD(data):
return zlib.decompress(data, -8)
# CIC: Call If Callable
def CIC(expression):
if callable(expression):
return expression()
else:
return expression
# IFF: IF Function
def IFF(expression, valueTrue, valueFalse):
if expression:
return CIC(valueTrue)
else:
return CIC(valueFalse)
#Convert String To Bytes If Python 3
def CS2BIP3(string):
if sys.version_info[0] > 2:
return bytes([ord(x) for x in string])
else:
return string
def Output(fOut, data):
if fOut != sys.stdout:
fOut.write(data)
else:
StdoutWriteChunked(data)
def LoremIpsumSentence(minimum, maximum):
words = ['lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur', 'adipiscing', 'elit', 'etiam', 'tortor', 'metus', 'cursus', 'sed', 'sollicitudin', 'ac', 'sagittis', 'eget', 'massa', 'praesent', 'sem', 'fermentum', 'dignissim', 'in', 'vel', 'augue', 'scelerisque', 'auctor', 'libero', 'nam', 'a', 'gravida', 'odio', 'duis', 'vestibulum', 'vulputate', 'quam', 'nec', 'cras', 'nibh', 'feugiat', 'ut', 'vitae', 'ornare', 'justo', 'orci', 'varius', 'natoque', 'penatibus', 'et', 'magnis', 'dis', 'parturient', 'montes', 'nascetur', 'ridiculus', 'mus', 'curabitur', 'nisl', 'egestas', 'urna', 'iaculis', 'lectus', 'maecenas', 'ultrices', 'velit', 'eu', 'porta', 'hac', 'habitasse', 'platea', 'dictumst', 'integer', 'id', 'commodo', 'mauris', 'interdum', 'malesuada', 'fames', 'ante', 'primis', 'faucibus', 'accumsan', 'pharetra', 'aliquam', 'nunc', 'at', 'est', 'non', 'leo', 'nulla', 'sodales', 'porttitor', 'facilisis', 'aenean', 'condimentum', 'rutrum', 'facilisi', 'tincidunt', 'laoreet', 'ultricies', 'neque', 'diam', 'euismod', 'consequat', 'tempor', 'elementum', 'lobortis', 'erat', 'ligula', 'risus', 'donec', 'phasellus', 'quisque', 'vivamus', 'pellentesque', 'tristique', 'venenatis', 'purus', 'mi', 'dictum', 'posuere', 'fringilla', 'quis', 'magna', 'pretium', 'felis', 'pulvinar', 'lacinia', 'proin', 'viverra', 'lacus', 'suscipit', 'aliquet', 'dui', 'molestie', 'dapibus', 'mollis', 'suspendisse', 'sapien', 'blandit', 'morbi', 'tellus', 'enim', 'maximus', 'semper', 'arcu', 'bibendum', 'convallis', 'hendrerit', 'imperdiet', 'finibus', 'fusce', 'congue', 'ullamcorper', 'placerat', 'nullam', 'eros', 'habitant', 'senectus', 'netus', 'turpis', 'luctus', 'volutpat', 'rhoncus', 'mattis', 'nisi', 'ex', 'tempus', 'eleifend', 'vehicula', 'class', 'aptent', 'taciti', 'sociosqu', 'ad', 'litora', 'torquent', 'per', 'conubia', 'nostra', 'inceptos', 'himenaeos']
sample = random.sample(words, random.randint(minimum, maximum))
sample[0] = sample[0].capitalize()
return ' '.join(sample) + '.'
def LoremIpsum(sentences):
return ' '.join([LoremIpsumSentence(15, 30) for i in range(sentences)])
STATE_START = 0
STATE_IDENTIFIER = 1
STATE_STRING = 2
STATE_SPECIAL_CHAR = 3
STATE_ERROR = 4
FUNCTIONNAME_REPEAT = 'repeat'
FUNCTIONNAME_RANDOM = 'random'
FUNCTIONNAME_CHR = 'chr'
FUNCTIONNAME_LOREMIPSUM = 'loremipsum'
def Tokenize(expression):
result = []
token = ''
state = STATE_START
while expression != '':
char = expression[0]
expression = expression[1:]
if char == "'":
if state == STATE_START:
state = STATE_STRING
elif state == STATE_IDENTIFIER:
result.append([STATE_IDENTIFIER, token])
state = STATE_STRING
token = ''
elif state == STATE_STRING:
result.append([STATE_STRING, token])
state = STATE_START
token = ''
elif char >= '0' and char <= '9' or char.lower() >= 'a' and char.lower() <= 'z':
if state == STATE_START:
token = char
state = STATE_IDENTIFIER
else:
token += char
elif char == ' ':
if state == STATE_IDENTIFIER:
result.append([STATE_IDENTIFIER, token])
token = ''
state = STATE_START
elif state == STATE_STRING:
token += char
else:
if state == STATE_IDENTIFIER:
result.append([STATE_IDENTIFIER, token])
token = ''
state = STATE_START
result.append([STATE_SPECIAL_CHAR, char])
elif state == STATE_STRING:
token += char
else:
result.append([STATE_SPECIAL_CHAR, char])
token = ''
if state == STATE_IDENTIFIER:
result.append([state, token])
elif state == STATE_STRING:
result = [[STATE_ERROR, 'Error: string not closed', token]]
return result
def ParseFunction(tokens):
if len(tokens) == 0:
print('Parsing error')
return None, tokens
if tokens[0][0] == STATE_STRING or tokens[0][0] == STATE_IDENTIFIER and tokens[0][1].startswith('0x'):
return [[FUNCTIONNAME_REPEAT, [[STATE_IDENTIFIER, '1'], tokens[0]]], tokens[1:]]
if tokens[0][0] != STATE_IDENTIFIER:
print('Parsing error')
return None, tokens
function = tokens[0][1]
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
return None, tokens
if tokens[0][0] != STATE_SPECIAL_CHAR or tokens[0][1] != '(':
print('Parsing error')
return None, tokens
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
return None, tokens
arguments = []
while True:
if tokens[0][0] != STATE_IDENTIFIER and tokens[0][0] != STATE_STRING:
print('Parsing error')
return None, tokens
arguments.append(tokens[0])
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
return None, tokens
if tokens[0][0] != STATE_SPECIAL_CHAR or (tokens[0][1] != ',' and tokens[0][1] != ')'):
print('Parsing error')
return None, tokens
if tokens[0][0] == STATE_SPECIAL_CHAR and tokens[0][1] == ')':
tokens = tokens[1:]
break
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
return None, tokens
return [[function, arguments], tokens]
def Parse(expression):
tokens = Tokenize(expression)
if len(tokens) == 0:
print('Parsing error')
return None
if tokens[0][0] == STATE_ERROR:
print(tokens[0][1])
print(tokens[0][2])
print(expression)
return None
functioncalls = []
while True:
functioncall, tokens = ParseFunction(tokens)
if functioncall == None:
return None
functioncalls.append(functioncall)
if len(tokens) == 0:
return functioncalls
if tokens[0][0] != STATE_SPECIAL_CHAR or tokens[0][1] != '+':
print('Parsing error')
return None
tokens = tokens[1:]
def InterpretInteger(token):
if token[0] != STATE_IDENTIFIER:
return None
try:
return int(token[1])
except:
return None
def Hex2Bytes(hexadecimal):
if len(hexadecimal) % 2 == 1:
hexadecimal = '0' + hexadecimal
try:
return binascii.a2b_hex(hexadecimal)
except:
return None
def InterpretHexInteger(token):
if token[0] != STATE_IDENTIFIER:
return None
if not token[1].startswith('0x'):
return None
bytes = Hex2Bytes(token[1][2:])
if bytes == None:
return None
integer = 0
for byte in bytes:
integer = integer * 0x100 + ord(byte)
return integer
def InterpretNumber(token):
number = InterpretInteger(token)
if number == None:
return InterpretHexInteger(token)
else:
return number
def InterpretBytes(token):
if token[0] == STATE_STRING:
return token[1]
if token[0] != STATE_IDENTIFIER:
return None
if not token[1].startswith('0x'):
return None
return Hex2Bytes(token[1][2:])
def CheckFunction(functionname, arguments, countarguments, maxcountarguments=None):
if maxcountarguments == None:
if countarguments == 0 and len(arguments) != 0:
print('Error: function %s takes no arguments, %d are given' % (functionname, len(arguments)))
return True
if countarguments == 1 and len(arguments) != 1:
print('Error: function %s takes 1 argument, %d are given' % (functionname, len(arguments)))
return True
if countarguments != len(arguments):
print('Error: function %s takes %d arguments, %d are given' % (functionname, countarguments, len(arguments)))
return True
else:
if len(arguments) < countarguments or len(arguments) > maxcountarguments:
print('Error: function %s takes between %d and %d arguments, %d are given' % (functionname, countarguments, maxcountarguments, len(arguments)))
return True
return False
def CheckNumber(argument, minimum=None, maximum=None):
number = InterpretNumber(argument)
if number == None:
print('Error: argument should be a number: %s' % argument[1])
return None
if minimum != None and number < minimum:
print('Error: argument should be minimum %d: %d' % (minimum, number))
return None
if maximum != None and number > maximum:
print('Error: argument should be maximum %d: %d' % (maximum, number))
return None
return number
def Interpret(expression):
functioncalls = Parse(expression)
if functioncalls == None:
return None
decoded = ''
for functioncall in functioncalls:
functionname, arguments = functioncall
if functionname == FUNCTIONNAME_REPEAT:
if CheckFunction(functionname, arguments, 2):
return None
number = CheckNumber(arguments[0], minimum=1)
if number == None:
return None
bytes = InterpretBytes(arguments[1])
if bytes == None:
print('Error: argument should be a byte sequence: %s' % arguments[1][1])
return None
decoded += number * bytes
elif functionname == FUNCTIONNAME_RANDOM:
if CheckFunction(functionname, arguments, 1):
return None
number = CheckNumber(arguments[0], minimum=1)
if number == None:
return None
decoded += ''.join([chr(random.randint(0, 255)) for x in range(number)])
elif functionname == FUNCTIONNAME_LOREMIPSUM:
if CheckFunction(functionname, arguments, 1):
return None
number = CheckNumber(arguments[0], minimum=1)
if number == None:
return None
decoded += LoremIpsum(number)
elif functionname == FUNCTIONNAME_CHR:
if CheckFunction(functionname, arguments, 1, 2):
return None
number = CheckNumber(arguments[0], minimum=1, maximum=255)
if number == None:
return None
if len(arguments) == 1:
decoded += chr(number)
else:
number2 = CheckNumber(arguments[1], minimum=1, maximum=255)
if number2 == None:
return None
decoded += ''.join([chr(n) for n in range(number, number2 + 1)])
else:
print('Error: unknown function: %s' % functionname)
return None
return decoded
def FilenameCheckHash(filename):
if filename.startswith('#h#'):
return Hex2Bytes(filename[3:])
elif filename.startswith('#b#'):
try:
return binascii.a2b_base64(filename[3:])
except:
return None
elif filename.startswith('#e#'):
return Interpret(filename[3:])
elif filename.startswith('#'):
return filename[1:]
else:
return ''
def Transform(fIn, fIn2, fOut, commandPython):
position = 0
while True:
inbyte = fIn.read(1)
if not inbyte:
break
byte = ord(inbyte)
if fIn2 != None:
inbyte2 = fIn2.read(1)
byte2 = ord(inbyte2)
outbyte = eval(commandPython)
if outbyte != None:
fOut.write(chr(outbyte))
position += 1
#Fix for http://bugs.python.org/issue11395
def StdoutWriteChunked(data):
if sys.version_info[0] > 2:
sys.stdout.buffer.write(data)
else:
while data != '':
sys.stdout.write(data[0:10000])
try:
sys.stdout.flush()
except IOError:
return
data = data[10000:]
def Translate(filenameInput, commandPython, options):
if filenameInput == '':
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
try:
fIn = sys.stdin.buffer
except:
fIn = sys.stdin
else:
decoded = FilenameCheckHash(filenameInput)
if options.literalfilenames or decoded == '':
fIn = open(filenameInput, 'rb')
elif decoded == None:
print('Error parsing filename: ' + filenameInput)
return
else:
fIn = StringIO(decoded)
if options.secondbytestream != '':
decoded = FilenameCheckHash(options.secondbytestream)
if options.literalfilenames or decoded == '':
fIn2 = open(options.secondbytestream, 'rb')
elif decoded == None:
print('Error parsing filename: ' + options.secondbytestream)
return
else:
fIn2 = StringIO(decoded)
else:
fIn2 = None
if options.output == '':
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
fOut = sys.stdout
else:
fOut = open(options.output, 'wb')
if options.script != '':
execfile(options.script, globals())
if options.execute != '':
exec(options.execute, globals())
if options.fullread:
Output(fOut, eval(commandPython)(fIn.read()))
elif options.regex != '' or options.filterregex != '':
content = fIn.read()
if options.regex != '':
Output(fOut, re.sub(options.regex, eval(commandPython), content))
else:
Output(fOut, re.sub(options.filterregex, eval(commandPython), ''.join([x.group() for x in re.finditer(options.filterregex, content)])))
else:
Transform(fIn, fIn2, fOut, commandPython)
if fIn != sys.stdin:
fIn.close()
if fIn2 != None:
fIn2.close()
if fOut != sys.stdout:
fOut.close()
def Main():
moredesc = '''
Example: translate.py -o svchost.exe.dec svchost.exe 'byte ^ 0x10'
"byte" is the current byte in the file, 'byte ^ 0x10' does an X0R 0x10
Extra functions:
rol(byte, count)
ror(byte, count)
IFF(expression, valueTrue, valueFalse)
Sani1(byte)
Sani2(byte)
ZlibD(bytes)
ZlibRawD(bytes)
GzipD(bytes)
Variable "position" is an index into the input file, starting at 0
Source code put in the public domain by Didier Stevens, no Copyright
Use at your own risk
https://DidierStevens.com'''
oParser = optparse.OptionParser(usage='usage: %prog [options] [file-in] [file-out] command [script]\n' + __description__ + moredesc, version='%prog ' + __version__)
oParser.add_option('-o', '--output', default='', help='Output file (default is stdout)')
oParser.add_option('-s', '--script', default='', help='Script with definitions to include')
oParser.add_option('-f', '--fullread', action='store_true', default=False, help='Full read of the file')
oParser.add_option('-r', '--regex', default='', help='Regex to search input file for and apply function to')
oParser.add_option('-R', '--filterregex', default='', help='Regex to filter input file for and apply function to')
oParser.add_option('-e', '--execute', default='', help='Commands to execute')
oParser.add_option('-2', '--secondbytestream', default='', help='Second bytestream')
oParser.add_option('-l', '--literalfilenames', action='store_true', default=False, help='Do not interpret filenames')
oParser.add_option('-m', '--man', action='store_true', default=False, help='print manual')
(options, args) = oParser.parse_args()
if options.man:
oParser.print_help()
PrintManual()
return
if len(args) == 0 or len(args) > 4:
oParser.print_help()
elif len(args) == 1:
Translate('', args[0], options)
elif len(args) == 2:
Translate(args[0], args[1], options)
elif len(args) == 3:
options.output = args[1]
Translate(args[0], args[2], options)
elif len(args) == 4:
options.output = args[1]
options.script = args[3]
Translate(args[0], args[2], options)
if __name__ == '__main__':
Main()
| [
"[email protected]"
]
| |
c58f1c2970ecc1f52452603ec752fee605c737c0 | 053221e1d90b365f68701dbd5b6466f30d1f6fd7 | /Day2/vd9.py | fd7cce53fa7b1ae816f5b6dbeb603d15b41e478e | []
| no_license | pytutorial/py2011E | eceb4d563cc807294b08b818edadd521ed8da488 | 306437369b0bfe55a2fa827b098283856242e731 | refs/heads/main | 2023-02-28T23:57:32.851536 | 2021-01-30T14:56:12 | 2021-01-30T14:56:12 | 318,186,117 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | # vd9.py
# Chương trình dự báo thời tiết
# Cho T(độ C), w (km/h), p(atm)
# In ra : Có mưa ?
T = float(input('Nhiệt độ (C):'))
w = float(input('Tốc độ gió (km/h):'))
p = float(input('Áp suất khí quyển(atm):'))
rain = False # default
if T >= 21:
if w >= 3 and p > 0.87:
rain = True
else:
if w >= 7 or p > 1.04:
rain = True
print(rain)
| [
"[email protected]"
]
| |
c830596b2f898d2ead4f94528ad2f3100de2be7b | 7786de317489fa258c7504b2fc96341e970e45db | /tests/unit/test_cf_storage_object.py | 40cecc402ed6e56b9c96465a85a7524220df10d6 | [
"MIT"
]
| permissive | tvaught/pyrax | 7207158d832721ca6ccde2e9c328855155a60915 | 8a310435239c536921490e04a984ff8a82b18eb8 | refs/heads/master | 2020-12-25T10:10:54.714401 | 2013-05-30T19:56:21 | 2013-05-30T19:56:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,903 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import random
import unittest
from mock import patch
from mock import MagicMock as Mock
import pyrax
from pyrax.cf_wrapper.storage_object import StorageObject
import pyrax.exceptions as exc
from tests.unit.fakes import FakeContainer
from tests.unit.fakes import FakeIdentity
from tests.unit.fakes import FakeResponse
class CF_StorageObjectTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
reload(pyrax)
self.orig_connect_to_cloudservers = pyrax.connect_to_cloudservers
self.orig_connect_to_cloudfiles = pyrax.connect_to_cloudfiles
self.orig_connect_to_cloud_databases = pyrax.connect_to_cloud_databases
ctclb = pyrax.connect_to_cloud_loadbalancers
self.orig_connect_to_cloud_loadbalancers = ctclb
ctcbs = pyrax.connect_to_cloud_blockstorage
self.orig_connect_to_cloud_blockstorage = ctcbs
super(CF_StorageObjectTest, self).__init__(*args, **kwargs)
self.obj_name = "testobj"
self.container_name = "testcont"
pyrax.connect_to_cloudservers = Mock()
pyrax.connect_to_cloud_loadbalancers = Mock()
pyrax.connect_to_cloud_databases = Mock()
pyrax.connect_to_cloud_blockstorage = Mock()
@patch('pyrax.cf_wrapper.client.Container', new=FakeContainer)
def setUp(self):
pyrax.connect_to_cloudservers = Mock()
pyrax.connect_to_cloud_loadbalancers = Mock()
pyrax.connect_to_cloud_databases = Mock()
pyrax.connect_to_cloud_blockstorage = Mock()
pyrax.clear_credentials()
pyrax.identity = FakeIdentity()
pyrax.set_credentials("fakeuser", "fakeapikey")
pyrax.connect_to_cloudfiles()
self.client = pyrax.cloudfiles
self.container = FakeContainer(self.client, self.container_name, 0, 0)
self.container.name = self.container_name
self.client.get_container = Mock(return_value=self.container)
self.client.connection.get_container = Mock()
self.client.connection.head_object = Mock()
objs = [{"name": self.obj_name, "content_type": "test/test",
"bytes": 444, "hash": "abcdef0123456789"}]
self.client.connection.head_object.return_value = ({}, objs)
self.client.connection.get_container.return_value = ({}, objs)
self.storage_object = self.client.get_object(self.container, "testobj")
self.client._container_cache = {}
self.container.object_cache = {}
def tearDown(self):
self.client = None
self.container = None
self.storage_object = None
pyrax.connect_to_cloudservers = self.orig_connect_to_cloudservers
pyrax.connect_to_cloudfiles = self.orig_connect_to_cloudfiles
pyrax.connect_to_cloud_databases = self.orig_connect_to_cloud_databases
octclb = self.orig_connect_to_cloud_loadbalancers
pyrax.connect_to_cloud_loadbalancers = octclb
octcbs = self.orig_connect_to_cloud_blockstorage
pyrax.connect_to_cloud_blockstorage = octcbs
def test_read_attdict(self):
tname = "something"
ttype = "foo/bar"
tbytes = 12345
tlastmodified = "2222-02-22T22:22:22.222222"
tetag = "123123123"
dct = {"name": tname, "content_type": ttype, "bytes": tbytes,
"last_modified": tlastmodified, "hash": tetag}
obj = self.storage_object
obj._read_attdict(dct)
self.assertEqual(obj.name, tname)
self.assertEqual(obj.content_type, ttype)
self.assertEqual(obj.total_bytes, tbytes)
self.assertEqual(obj.last_modified, tlastmodified)
self.assertEqual(obj.etag, tetag)
def test_subdir(self):
tname = "something"
dct = {"subdir": tname}
obj = self.storage_object
obj._read_attdict(dct)
self.assertEqual(obj.name, tname)
def test_get(self):
obj = self.storage_object
obj.client.connection.get_object = Mock()
meta = {"a": "b"}
data = "This is the contents of the file"
obj.client.connection.get_object.return_value = (meta, data)
ret = obj.get()
self.assertEqual(ret, data)
ret = obj.get(include_meta=True)
self.assertEqual(ret, (meta, data))
def test_delete(self):
obj = self.storage_object
obj.client.connection.delete_object = Mock()
obj.delete()
obj.client.connection.delete_object.assert_called_with(
obj.container.name, obj.name)
def test_purge(self):
obj = self.storage_object
cont = obj.container
cont.cdn_uri = None
self.assertRaises(exc.NotCDNEnabled, obj.purge)
cont.cdn_uri = "http://example.com"
obj.client.connection.cdn_request = Mock()
obj.purge()
obj.client.connection.cdn_request.assert_called_with("DELETE",
cont.name, obj.name, hdrs={})
def test_get_metadata(self):
obj = self.storage_object
obj.client.connection.head_object = Mock()
obj.client.connection.head_object.return_value = {
"X-Object-Meta-Foo": "yes",
"Some-Other-Key": "no"}
meta = obj.get_metadata()
self.assertEqual(meta, {"X-Object-Meta-Foo": "yes"})
def test_set_metadata(self):
obj = self.storage_object
obj.client.connection.post_object = Mock()
obj.client.connection.head_object = Mock(return_value={})
obj.set_metadata({"newkey": "newval"})
obj.client.connection.post_object.assert_called_with(obj.container.name,
obj.name, {"x-object-meta-newkey": "newval"})
def test_remove_metadata_key(self):
obj = self.storage_object
obj.client.connection.post_object = Mock()
obj.client.connection.head_object = Mock(return_value={})
obj.remove_metadata_key("newkey")
obj.client.connection.post_object.assert_called_with(obj.container.name,
obj.name, {})
def test_change_content_type(self):
obj = self.storage_object
obj.client.change_object_content_type = Mock()
obj.change_content_type("foo")
obj.client.change_object_content_type.assert_called_once_with(
obj.container, obj, new_ctype="foo", guess=False)
def test_get_temp_url(self):
obj = self.storage_object
obj.client.get_temp_url = Mock()
secs = random.randint(1, 1000)
obj.get_temp_url(seconds=secs)
obj.client.get_temp_url.assert_called_with(obj.container, obj,
seconds=secs, method="GET")
def test_repr(self):
obj = self.storage_object
rep = obj.__repr__()
self.assert_("<Object " in rep)
self.assert_(obj.name in rep)
self.assert_(obj.content_type in rep)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
]
| |
a5a11cfef9f4349cd1bbbda6164070d5f154324b | ad682d2145f440c078a431a40d2153a204771026 | /method/DepBased/WM_OLPDM.py | 7889685fa719f8816d1f5051b2aece6f7cb45c2f | []
| no_license | barry800414/NewsCrawler | d81f1ee4b0e0c4a997dda1efd24d1430e222d318 | 18c10f10508558600f734d659e724d4e27f071a3 | refs/heads/master | 2021-05-03T13:11:29.696108 | 2015-07-01T16:38:05 | 2015-07-01T16:38:05 | 26,075,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,697 | py | #!/usr/bin/env python3
import sys
import json
import math
from collections import defaultdict
import numpy as np
from scipy.sparse import csr_matrix, hstack
from sklearn.grid_search import ParameterGrid
import WordModelImproved as WM
import OneLayerPhraseDepModel as OLPDM
from PhraseDepTree import loadPhraseFile
from sentiDictSum import readSentiDict
from RunExperiments import *
import ErrorAnalysis as EA
from misc import *
import dataTool
import Parameter
'''
This code implements the baseline (tf, tf-idf) features
for training and testing (supervised document-level learning)
Author: Wei-Ming Chen
Date: 2015/02/16
'''
# Depricated
def mainProcedure(labelNewsList, paramsIter, clfList, allowedFirstLayerWord,
allowedRel, topicMap=None, topicId=None):
oldms = dict()
for p in paramsIter:
# generate tfidf features
print('generating tfidf features...', file=sys.stderr)
(X1, y1) = tfidf.generateXY(labelNewsList, newsCols=p['columnSource'],
statCol=p['statementCol'], feature=p['feature'])
print('X1: (%d, %d)' % (X1.shape[0], X1.shape[1]), file=sys.stderr)
# generate OLPDM features
print('generating OLPDM features...', file=sys.stderr)
# saving model for speed up
if p['seedWordPOSType'] not in oldms:
allowedSeedWord = { topicId: set(p['seedWordPOSType']) for topicId in topicSet }
oldm = OLPDM.OneLayerPhraseDepModel(labelNewsList, topicPhraseList, allowedSeedWord,
'tag', allowedFirstLayerWord, 'word', allowedRel)
oldms[p['seedWordPOSType']] = oldm
else:
oldm = oldms[p['seedWordPOSType']]
(X2, y2) = oldm.genXY()
print('X2: (%d, %d)' % (X2.shape[0], X2.shape[1]), file=sys.stderr)
# merge (horozontally align) two matrix
X = DataTool.hstack(X1, X2)
print('X: %d %d' % (X.shape[0], X.shape[1]), file=sys.stderr)
if topicMap == None: #self train -> self test
prefix = "%d, %s, %s, %s" % (topicId, 'OLPDM+' + str(p['feature']),
toStr(p['columnSource']), p['statementCol'])
RunExp.selfTrainTest(X, y1, clfList, "MacroF1", testSize=0.2, prefix=prefix)
else: # all-train-and-test and leave-one-test
prefix = "all, %s, %s, %s" % ('OLPDM+' + str(p['feature']),
toStr(p['columnSource']), p['statementCol'])
RunExp.allTrainTest(X, y1, topicMap, clfList, "MacroF1", testSize=0.2, prefix=prefix)
RunExp.leaveOneTest(X, y1, topicMap, clfList, "MacroF1", prefix=prefix)
# generate word model features and dependency model features, then merge them
def genXY(labelNewsList, olpdm, topicSet, sentiDict, params, volc):
# generate WM features
print('generating word features...', file=sys.stderr)
p = params['WM']['model settings']
allowedPOS = set(['VA', 'VV', 'NN', 'NR', 'AD', 'JJ', 'FW'])
wm = WM.WordModel(labelNewsList, newsCols=p['col'], statCol=p['stat'],
feature=p['feature'], allowedPOS=allowedPOS, volc=volc)
(X1, y1) = wm.genXY(p['minCnt'])
volc1 = WM.getVolc()
print('X1: (%d, %d)' % (X1.shape[0], X1.shape[1]), file=sys.stderr)
# generate OLPDM features
print('generating OLPDM features...', file=sys.stderr)
p = params['OLPDM']['model settings']
allowedSeedWord = initAllowedSet(topicSet, p['seedWordType'])
allowedFirstLayerWord = initAllowedSet(topicSet, p['firstLayerType'], sentiDict)
allowedRel = { t: None for t in topicSet }
olpdm.setModel(allowedSeedWord, p['seedWordType']['type'],
allowedFirstLayerWord, p['firstLayerType']['type'],
allowedRel, p['minCnt'])
(X2, y2) = olpdm.genXY()
volc2 = olpdm.getVolc()
print('X2: (%d, %d)' % (X2.shape[0], X2.shape[1]), file=sys.stderr)
assert np.array_equal(y1, y2)
# merge (horozontally align) two matrix
X = DataTool.hstack(X1, X2)
volc3 = mergeVolc(volc1, volc2)
print('X: (%d, %d)' % (X.shape[0], X.shape[1]), file=sys.stderr)
return (X, y1, volc3)
if __name__ == '__main__':
if len(sys.argv) != 6:
print('Usage:', sys.argv[0], 'TagAndDepLabelNewsJson phraseJson sentiDict WMParamsJson OLPDMParamsJson', file=sys.stderr)
exit(-1)
# arguments
labelNewsJson = sys.argv[1]
phraseJson = sys.argv[2]
sentiDictFile = sys.argv[3]
WMParamsJson = sys.argv[4]
OLPDMParamsJson = sys.argv[5]
# load labels and news
with open(labelNewsJson, 'r') as f:
labelNewsList = json.load(f)
# ====== initialization ======
# load phrases
topicPhraseList = loadPhraseFile(phraseJson)
# load sentiment dictionary
sentiDict = readSentiDict(sentiDictFile)
# get the set of all possible topic
topicSet = set([labelNews['statement_id'] for labelNews in labelNewsList])
# contruct in the process of constructing phrase dependency tree
allowedFirstLayerWord = { topicId: set(sentiDict.keys()) for topicId in topicSet }
allowedRel = { topicId: None for topicId in topicSet }
topicMap = [ labelNewsList[i]['statement_id'] for i in range(0, len(labelNewsList)) ]
# ====== initalizing parameters ======
clfList = ['NaiveBayes', 'MaxEnt', 'SVM']
randSeedList = [1, 2, 3, 4, 5]
# print result of first Line
ResultPrinter.printFirstLine()
# ==================================================================== #
# Run experiments on given list of parameters #
# ==================================================================== #
# read best parameters of two model
WMParams = Parameter.loadFrameworkTopicParams(WMParamsJson)
OLPDMParams = Parameter.loadFrameworkTopicParams(OLPDMParamsJson)
# ============= Run for self-train-test ===============
print('Self-Train-Test...', file=sys.stderr)
labelNewsInTopic = dataTool.divideLabel(labelNewsList)
for t in topicSet:
bestR = None
olpdm = OLPDM.OneLayerPhraseDepModel(labelNewsInTopic[t], topicPhraseList)
paramsIter = Parameter.getParamsIter(WMParams['SelfTrainTest'][t], 'WM',
OLPDMParams['SelfTrainTest'][t], 'OLPDM')
for p in paramsIter:
(X, y, volc) = genXY(labelNewsInTopic[t], olpdm, topicSet, sentiDict, p)
rsList = RunExp.runTask(X, y, volc, 'SelfTrainTest', p,
clfList, topicId=t, randSeedList=randSeedList)
for rs in rsList:
if rs != None:
bestR = keepBestResult(bestR, rs, 'MacroF1')
with open('WM_OLPDM_SelfTrainTest_topic%d.pickle' % t, 'w+b') as f:
pickle.dump(bestR, f)
olpdm = OLPDM.OneLayerPhraseDepModel(labelNewsList, topicPhraseList)
# ============= Run for all-train-test ================
print('All-Train-Test...', file=sys.stderr)
paramsIter = Parameter.getParamsIter(WMParams['AllTrainTest'], 'WM',
OLPDMParams['AllTrainTest'], 'OLPDM')
bestR = None
for p in paramsIter:
(X, y, volc) = genXY(labelNewsList, olpdm, topicSet,
sentiDict, p)
rsList = RunExp.runTask(X, y, volc, 'AllTrainTest', p, clfList,
topicMap=topicMap, randSeedList=randSeedList)
for rs in rsList:
if rs != None:
bestR = keepBestResult(bestR, rs, 'MacroF1')
with open('WM_OLPDM_AllTrainTest.pickle', 'w+b') as f:
pickle.dump(bestR, f)
# ============= Run for leave-one-test ================
print('Leave-One-Test...', file=sys.stderr)
for t in topicSet:
bestR = None
paramsIter = Parameter.getParamsIter(WMParams['LeaveOneTest'][t], 'tfidf',
OLPDMParams['LeaveOneTest'][t], 'OLPDM')
for p in paramsIter:
(X, y, volc) = genXY(labelNewsList, olpdm, topicSet, sentiDict, p)
rsList = RunExp.runTask(X, y, volc, 'LeaveOneTest', p, clfList,
topicMap=topicMap, topicId=t, randSeedList=randSeedList)
for rs in rsList:
if rs != None:
bestR = keepBestResult(bestR, rs[t], 'MacroF1')
with open('WM_OLPDM_LeaveOneTest_topic%d.pickle' % t, 'w+b') as f:
pickle.dump(bestR, f)
'''
# run all combination
params = { 'feature': ['0/1', 'tf', 'tfidf'],
'column': [['content'], ['title'], ['title', 'content']],
'statement': [False, True],
'seedWordPOSType': [('NP',), ('NP', 'NR'), ('NP', 'NN', 'NR')]
}
paramsIter = ParameterGrid(params)
mainProcedure(labelNewsList, paramsIter, clfList, allowedFirstLayerWord,
allowedRel, topicMap=topicMap, topicId=None)
topicLabelNewsList = dataTool.divideLabel(labelNewsList)
for topicId, labelNewsList in topicLabelNewsList.items():
mainProcedure(labelNewsList, paramsIter, clfList, allowedFirstLayerWord,
allowedRel, topicMap=None, topicId=topicId)
'''
'''
oldms = dict()
# all topic are mixed to train and predict/ leave-one-test
for p in paramsIter:
# generate tfidf features
print('generating tfidf features...', file=sys.stderr)
(X1, y1) = tfidf.generateXY(labelNewsList, newsCols=p['column'],
statementCol=p['statement'], feature=p['feature'])
print('X1: (%d, %d)' % (X1.shape[0], X1.shape[1]), file=sys.stderr)
# generate OLPDM features
print('generating OLPDM features...', file=sys.stderr)
# saving model for speed up
if p['seedWordPOSType'] not in oldms:
allowedSeedWord = { topicId: set(p['seedWordPOSType']) for topicId in topicSet }
print(allowedSeedWord)
oldm = OLPDM.OneLayerPhraseDepModel(labelNewsList, topicPhraseList, allowedSeedWord,
'tag', allowedFirstLayerWord, 'word', allowedRel)
oldms[p['seedWordPOSType']] = oldm
else:
oldm = oldms[p['seedWordPOSType']]
(X2, y2) = oldm.genXY()
print('X2: (%d, %d)' % (X2.shape[0], X2.shape[1]), file=sys.stderr)
# merge (horozontally align) two matrix
X = DataTool.hstack(X1, X2)
print('X: %d %d' % (X.shape[0], X.shape[1]), file=sys.stderr)
# all train and test
prefix = "all, %s, %s, %s" % ('OLPDM+' + str(p['feature']), list2Str(p['column']), p['statement'])
RunExp.allTrainTest(X, y1, topicMap, clfList, "MacroF1", testSize=0.2, prefix=prefix)
# leave one test
RunExp.leaveOneTest(X, y1, topicMap, clfList, "MacroF1", prefix=prefix)
'''
| [
"[email protected]"
]
| |
fed740e3a86c5c0992ca482c58875e9b14269012 | 1bfad01139237049eded6c42981ee9b4c09bb6de | /RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/pimsm/router/interface/learnedmdtinfo/learnedmdtinfo.py | b27f8bb6f94a4485f17effd4ef1a42a2e0f065ba | [
"MIT"
]
| permissive | kakkotetsu/IxNetwork | 3a395c2b4de1488994a0cfe51bca36d21e4368a5 | f9fb614b51bb8988af035967991ad36702933274 | refs/heads/master | 2020-04-22T09:46:37.408010 | 2019-02-07T18:12:20 | 2019-02-07T18:12:20 | 170,284,084 | 0 | 0 | MIT | 2019-02-12T08:51:02 | 2019-02-12T08:51:01 | null | UTF-8 | Python | false | false | 4,210 | py |
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class LearnedMdtInfo(Base):
"""The LearnedMdtInfo class encapsulates a system managed learnedMdtInfo node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the LearnedMdtInfo property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server by using the find method.
"""
_SDM_NAME = 'learnedMdtInfo'
def __init__(self, parent):
super(LearnedMdtInfo, self).__init__(parent)
@property
def Age(self):
"""The amount of time (in seconds) remaining before this TLV times out.
Returns:
number
"""
return self._get_attribute('age')
@property
def CeGroupAddress(self):
"""The CE group address contained in this data MDT TLV.
Returns:
str
"""
return self._get_attribute('ceGroupAddress')
@property
def CeSourceAddress(self):
"""The CE source address contained in this data MDT TLV.
Returns:
str
"""
return self._get_attribute('ceSourceAddress')
@property
def MdtGroupAddress(self):
"""The MDT (PE) group address contained in this data MDT TLV.
Returns:
str
"""
return self._get_attribute('mdtGroupAddress')
@property
def MdtSourceAddress(self):
"""The MDT (PE) source address contained in this data MDT TLV.
Returns:
str
"""
return self._get_attribute('mdtSourceAddress')
def find(self, Age=None, CeGroupAddress=None, CeSourceAddress=None, MdtGroupAddress=None, MdtSourceAddress=None):
"""Finds and retrieves learnedMdtInfo data from the server.
All named parameters support regex and can be used to selectively retrieve learnedMdtInfo data from the server.
By default the find method takes no parameters and will retrieve all learnedMdtInfo data from the server.
Args:
Age (number): The amount of time (in seconds) remaining before this TLV times out.
CeGroupAddress (str): The CE group address contained in this data MDT TLV.
CeSourceAddress (str): The CE source address contained in this data MDT TLV.
MdtGroupAddress (str): The MDT (PE) group address contained in this data MDT TLV.
MdtSourceAddress (str): The MDT (PE) source address contained in this data MDT TLV.
Returns:
self: This instance with matching learnedMdtInfo data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of learnedMdtInfo data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the learnedMdtInfo data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"[email protected]"
]
| |
f66d8eca2d435b8587e7ca130d23d12400ed0211 | 3fbd28e72606e5358328bfe4b99eb0349ca6a54f | /.history/a_Young_Physicist_20210607193741.py | 863458084f547b6a9bf662840ab4c6ff7880d758 | []
| no_license | Tarun1001/codeforces | f0a2ef618fbd45e3cdda3fa961e249248ca56fdb | 576b505d4b8b8652a3f116f32d8d7cda4a6644a1 | refs/heads/master | 2023-05-13T04:50:01.780931 | 2021-06-07T21:35:26 | 2021-06-07T21:35:26 | 374,399,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | n= int(input())
x=[]
for i in range(n):
p=map(int,input().split()))
x.append(p)
a=b=c=0
for i in x:
a+=i[0]
b+=i[1]
c+=i[2]
if a==b==c==0:
print("YES")
else:
print("NO")
| [
"[email protected]"
]
| |
20076d99682732c095519240df2c951bfe0aae37 | 55ab64b67d8abc02907eb43a54ff6c326ded6b72 | /scripts/startup/tila_OP_SmartDelete.py | cc9ba649d4972b3487b5351419e9a875b4d2745a | [
"MIT"
]
| permissive | Tilapiatsu/blender-custom_config | 2f03b0bb234c3b098d2830732296d199c91147d0 | 00e14fc190ebff66cf50ff911f25cf5ad3529f8f | refs/heads/master | 2023-08-16T14:26:39.990840 | 2023-08-16T01:32:41 | 2023-08-16T01:32:41 | 161,249,779 | 6 | 2 | MIT | 2023-04-12T05:33:59 | 2018-12-10T23:25:14 | Python | UTF-8 | Python | false | false | 2,449 | py | import bpy
bl_info = {
"name": "Tila : Smart Delete",
"author": "Tilapiatsu",
"version": (1, 0, 0, 0),
"blender": (2, 80, 0),
"location": "View3D",
"category": "Object",
}
class TILA_SmartDeleteOperator(bpy.types.Operator):
bl_idname = "object.tila_smartdelete"
bl_label = "TILA: Smart Delete"
bl_options = {'REGISTER', 'UNDO'}
menu: bpy.props.BoolProperty(name='call_menu', default=False)
def execute(self, context):
if context.space_data.type == 'VIEW_3D':
if self.menu:
if context.mode == 'EDIT_MESH':
bpy.ops.wm.call_menu(name='VIEW3D_MT_edit_mesh_delete')
elif context.mode == 'EDIT_CURVE':
bpy.ops.wm.call_menu(name='VIEW3D_MT_edit_curve_delete')
else:
if context.mode == 'EDIT_MESH':
current_mesh_mode = context.tool_settings.mesh_select_mode[:]
# if vertex mode on
if current_mesh_mode[0]:
bpy.ops.mesh.dissolve_verts()
# if edge mode on
if current_mesh_mode[1]:
bpy.ops.mesh.dissolve_edges(use_verts=True)
# if face mode on
if current_mesh_mode[2]:
bpy.ops.mesh.delete(type='FACE')
elif context.mode == 'EDIT_CURVE':
bpy.ops.curve.delete(type='VERT')
elif context.mode == 'EDIT_GPENCIL':
try:
bpy.ops.gpencil.delete(type='POINTS')
except Exception as e:
print("Warning: %r" % e)
elif context.mode == 'EDIT_METABALL':
bpy.ops.mball.delete_metaelems('EXEC_DEFAULT')
elif context.mode == 'OBJECT':
bpy.ops.object.delete(use_global=False, confirm=False)
elif context.space_data.type == 'OUTLINER':
bpy.ops.outliner.delete()
elif context.space_data.type == 'FILE_BROWSER':
bpy.ops.file.delete()
# elif context.space_data.type == 'IMAGE_EDITOR':
# layout.label("No Context! image editor")
return {'FINISHED'}
addon_keymaps = []
classes = (TILA_SmartDeleteOperator,)
register, unregister = bpy.utils.register_classes_factory(classes)
if __name__ == "__main__":
register()
| [
"[email protected]"
]
| |
3c3083f149d724f150c0f60864c4c9d6ed10495d | 27856ac3b3311728fe103911f7cbc0f20cbdfa8f | /bot/config.py | 549488d5ab4942dbe9d3762ea0d3e81b3afc860a | []
| no_license | 535521469/crawl_free_ip_proxy | 2c314f5037e45508071593bbcfa27e16751e4078 | 977c7fc422e8d49dd1d195cf8d7d1475da427e04 | refs/heads/master | 2016-09-06T13:25:25.738769 | 2013-05-01T07:28:25 | 2013-05-01T07:28:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | # encoding=utf8
'''
Created on 2013-4-24
@author: corleone
'''
from bot.configutil import ConfigFile
import os
def read_config():
cfg_path = os.sep.join([os.getcwd(), os.curdir, 'fetchproxy.cfg'])
configdata = ConfigFile.readconfig(cfg_path).data
return configdata
configdata = read_config()
| [
"[email protected]"
]
| |
ff224afdc46082bd19994708a0dc8289239eb5e4 | 9bc0d33e1c3454393ea74d85b531801d6aa28a55 | /baselines/duet/test_ranking.py | 20ddb3c6a7f5158fc67751c3eb22e468eb15f604 | [
"MIT"
]
| permissive | skallumadi/mnsrf_ranking_suggestion | 4c604ce5fc394c6d1d1efebb68af08bd2349c696 | 37cbf55d27e8595b990c0a66449e7bfe3027cc8c | refs/heads/master | 2021-01-25T14:03:23.465568 | 2017-10-09T06:40:10 | 2017-10-09T06:40:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,189 | py | ###############################################################################
# Author: Wasi Ahmad
# Project: https://www.microsoft.com/en-us/research/wp-content/uploads/2016/10/wwwfp0192-mitra.pdf
# Date Created: 7/23/2017
#
# File Description: This script evaluates test ranking performance.
###############################################################################
import torch, helper, util, data, os
from duet import DUET
from ranking_eval_functions import mean_average_precision, NDCG
args = util.get_args()
def compute_ranking_performance(model, test_batch, test_clicks, test_labels):
local_score = model.local_model(test_batch, test_clicks)
distributed_score = model.distributed_model(test_batch, test_clicks)
total_score = local_score + distributed_score
MAP = mean_average_precision(total_score, test_labels)
NDCG_at_1 = NDCG(total_score, test_labels, 1)
NDCG_at_3 = NDCG(total_score, test_labels, 3)
NDCG_at_10 = NDCG(total_score, test_labels, 5)
return MAP, NDCG_at_1, NDCG_at_3, NDCG_at_10
def test_ranking(model, test_batches):
num_batches = len(test_batches)
map, ndcg_1, ndcg_3, ndcg_10 = 0, 0, 0, 0
for batch_no in range(1, num_batches + 1):
test_queries, test_docs, test_labels = helper.batch_to_tensor(test_batches[batch_no - 1], model.dictionary,
model.config.max_query_length,
model.config.max_doc_length)
if model.config.cuda:
test_queries = test_queries.cuda()
test_docs = test_docs.cuda()
test_labels = test_labels.cuda()
ret_val = compute_ranking_performance(model, test_queries, test_docs, test_labels)
map += ret_val[0]
ndcg_1 += ret_val[1]
ndcg_3 += ret_val[2]
ndcg_10 += ret_val[3]
map = map / num_batches
ndcg_1 = ndcg_1 / num_batches
ndcg_3 = ndcg_3 / num_batches
ndcg_10 = ndcg_10 / num_batches
print('MAP - ', map)
print('NDCG@1 - ', ndcg_1)
print('NDCG@3 - ', ndcg_3)
print('NDCG@10 - ', ndcg_10)
if __name__ == "__main__":
dictionary = data.Dictionary(5)
dictionary.load_dictionary(args.save_path, 'vocab.csv', 5000)
model = DUET(dictionary, args)
if 'CUDA_VISIBLE_DEVICES' in os.environ:
cuda_visible_devices = [int(x) for x in os.environ['CUDA_VISIBLE_DEVICES'].split(',')]
if len(cuda_visible_devices) > 1:
model = torch.nn.DataParallel(model, device_ids=cuda_visible_devices)
if args.cuda:
model = model.cuda()
helper.load_model_states_from_checkpoint(model, os.path.join(args.save_path, 'model_best.pth.tar'), 'state_dict')
print('Model and dictionary loaded.')
model.eval()
test_corpus = data.Corpus(args.data, 'session_test.txt', dictionary)
print('Test set size = ', len(test_corpus.data))
test_batches = helper.batchify(test_corpus.data, args.batch_size)
print('Number of test batches = ', len(test_batches))
test_ranking(model, test_batches)
| [
"[email protected]"
]
| |
8dcc2947e1a739ffad867c6bf674d20d81008c49 | 0abd812a50ba3330734fcbb0088a74c5ad6735a2 | /python/utf8_for_emojis.py | 695f4f879e0986f5202ac4876ea2878fd0bf97aa | []
| no_license | scMarth/Learning | a914af6f6327454234e5f98dfc8cf95d6d4f8077 | ae696461c2c8edc9944879503cce01d525cf4ce0 | refs/heads/master | 2023-08-03T05:13:03.162533 | 2023-07-28T22:58:51 | 2023-07-28T22:58:51 | 120,689,926 | 2 | 0 | null | 2022-12-11T13:14:07 | 2018-02-08T00:33:42 | JavaScript | UTF-8 | Python | false | false | 3,984 | py | # convert json to csv
import arcpy, os, shutil, numpy, json, codecs
fields = {
'request' : [ \
'id', \
'master', \
'addDate', \
'addDateUnix', \
'lastAction', \
'lastActionUnix', \
'dept', \
'displayDate', \
'displayLastAction', \
'status', \
'streetId', \
'streetName', \
'streetNum', \
'crossStreetId', \
'crossStreetName', \
'cityId', \
'cityName', \
'district', \
'comments', \
'privateNotes', \
'submitter', \
'typeId', \
'typeName', \
'priorityValue', \
'latitude', \
'longitude', \
'aggregatorId', \
'aggregatorInfo', \
'origin', \
'priorityToDisplay' \
],
'activity' : [ \
'actDate', \
'actDateUnix', \
'attachments', \
'code', \
'codeDesc', \
'comments', \
'displayDate', \
'id', \
'notify', \
'requestId', \
'routeId', \
'user', \
'files', \
'isEditable' \
],
'attachment' : [ \
'createDateUnix', \
'createDate', \
'fileName', \
'id', \
'parent', \
'parentType', \
'size', \
'user' \
],
'submitter' : [ \
'id', \
'firstName', \
'lastName', \
'middleInitial', \
'address', \
'address2', \
'city', \
'state', \
'zip', \
'email', \
'phone', \
'phoneExt', \
'altPhone', \
'altPhoneExt', \
'password', \
'aggregatorId', \
'verified', \
'banned', \
'twitterId', \
'twitterScreenName', \
'notifyEmail', \
'notifyPhone', \
'notifyAltPhone', \
'notifyMail', \
'notifyPush', \
'notifyPhoneSms', \
'notifyAltPhoneSms' \
]
}
def escaped(inputStr):
# return inputStr
return inputStr.translate(str.maketrans({ \
# "]": r"\]", \
# "^": r"\^", \
# "$": r"\$", \
# "*": r"\*", \
# ".": r"\.", \
# "/": r"\/",\
# so far, I've seen carriage returns, line feeds, and double-quotes that can mess up records. '\'' is escaped just in case
"\r": r"\r", \
"\n": r"\n", \
"\\": r"\\", \
'\"': r'\"' \
}))
# reads a json file path then creates a fgdb for that json file in 'workspace'
# the json file contains json data that is returned from the requests/dump method
def write_json_file_to_csv(workspace, json_path):
with open(json_path) as json_file:
data = json.load(json_file)
for key in data:
if key == 'deleted':
continue
output_filepath = workspace + r'\\' + key.upper() + '.csv'
print('Writing' + output_filepath)
# delete file if it exists
if os.path.exists(output_filepath):
os.unlink(output_filepath)
with codecs.open(output_filepath, 'w', encoding='utf8') as file:
# write header
for i in range(len(fields[key]) - 1):
file.write(escaped(fields[key][i]) + ',')
file.write(escaped(fields[key][-1]) + '\n')
# write records
for i in range(len(data[key])):
record = data[key][i]
# print(record)
for j in range(len(fields[key]) - 1):
# print(j)
file.write('"' + escaped(str(record[fields[key][j]])) + '",')
file.write('"' + escaped(str(record[fields[key][-1]])) + '"\n')
print('{} records written.\n'.format(len(data[key])))
workspace = os.path.dirname(__file__) + r'\request_data'
write_json_file_to_csv(workspace, workspace + r'\response.json') | [
"[email protected]"
]
| |
1c9e3b879141282edd5569d79e16594bb83d4f29 | f51ac19ce4d1df15eba02c4b3481533087d5ef9e | /day03/xiaohuar/start.py | 06058cbe787a1bb3530230ff1fa09be09169f548 | []
| no_license | disenQF/xpy903_scrapy | c9e0818f4ad08614f933ec800d680439e3f22ea6 | 7fd1f89f2cbf046b59774071c48801dfc3c5b54d | refs/heads/master | 2022-08-09T13:53:10.104037 | 2019-09-27T09:06:15 | 2019-09-27T09:06:15 | 210,261,888 | 1 | 0 | null | 2022-07-29T22:35:50 | 2019-09-23T04:05:10 | Python | UTF-8 | Python | false | false | 156 | py | #!/usr/bin/python3
# coding: utf-8
from scrapy import cmdline
if __name__ == '__main__':
cmdline.execute(['scrapy', 'crawl', 'hua', '-o', 'hua.json']) | [
"[email protected]"
]
| |
17e914aac8110ab19e8448f67594dcc2b1be380c | cee96536d5115a20bd271d7ff5626da496197ac6 | /test_coco.py | ce245527e8ec25e646dbf982ae9dda955ca58fb4 | []
| no_license | YaojwDefgun/new-YOLOv1_PyTorch | 0855a8b0dcf8960057ccf82dcf341f480069a789 | f81b1b033fe2ad9a62bd61ad0bab0f47a4463f42 | refs/heads/master | 2023-01-03T21:28:34.243705 | 2020-10-22T12:21:31 | 2020-10-22T12:21:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,138 | py | import os
import argparse
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from data.cocodataset import *
from data import config, BaseTransform, VOCAnnotationTransform, VOCDetection, VOC_ROOT, VOC_CLASSES
import numpy as np
import cv2
import time
from decimal import *
parser = argparse.ArgumentParser(description='YOLO Detection')
parser.add_argument('-v', '--version', default='yolo',
help='yolo.')
parser.add_argument('-d', '--dataset', default='COCO_val',
help='we use VOC, COCO_val, COCO_test-dev, to test.')
parser.add_argument('-bk', '--backbone', type=str, default='r18',
help='r18, r50, d19')
parser.add_argument('--trained_model', default='weights/coco/',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--visual_threshold', default=0.3, type=float,
help='Final confidence threshold')
parser.add_argument('--cuda', default=True, type=bool,
help='Use cuda to test model')
parser.add_argument('--dataset_root', default='/home/k303/object-detection/dataset/COCO/',
help='Location of VOC root directory')
parser.add_argument('-f', default=None, type=str,
help="Dummy arg so we can load in Jupyter Notebooks")
parser.add_argument('--debug', action='store_true', default=False,
help='debug mode where only one image is trained')
args = parser.parse_args()
coco_class_labels = ('background',
'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck',
'boat', 'traffic light', 'fire hydrant', 'street sign', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'hat', 'backpack', 'umbrella',
'shoe', 'eye glasses', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'plate', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
'couch', 'potted plant', 'bed', 'mirror', 'dining table', 'window', 'desk',
'toilet', 'door', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'blender', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')
coco_class_index = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67,
70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
def test_net(net, device, testset, transform, thresh, mode='voc'):
class_color = [(np.random.randint(255),np.random.randint(255),np.random.randint(255)) for _ in range(80)]
num_images = len(testset)
for index in range(num_images):
print('Testing image {:d}/{:d}....'.format(index+1, num_images))
if args.dataset == 'COCO_val' or args.dataset == 'COCO-test' or args.dataset == 'COCO_test-dev':
img, _ = testset.pull_image(index)
elif args.dataset == 'VOC':
img = testset.pull_image(index)
# img_id, annotation = testset.pull_anno(i)
x = torch.from_numpy(transform(img)[0][:, :, (2, 1, 0)]).permute(2, 0, 1)
x = x.unsqueeze(0).to(device)
t0 = time.clock()
y = net(x) # forward pass
detections = y
print("detection time used ", Decimal(time.clock()) - Decimal(t0), "s")
# scale each detection back up to the image
scale = np.array([[img.shape[1], img.shape[0],
img.shape[1], img.shape[0]]])
bbox_pred, scores, cls_inds = detections
# map the boxes to origin image scale
bbox_pred *= scale
for i, box in enumerate(bbox_pred):
cls_indx = cls_inds[i]
xmin, ymin, xmax, ymax = box
if scores[i] > thresh:
box_w = int(xmax - xmin)
cv2.rectangle(img, (int(xmin), int(ymin)), (int(xmax), int(ymax)), class_color[int(cls_indx)], 2)
cv2.rectangle(img, (int(xmin), int(abs(ymin)-15)), (int(xmin+box_w*0.55), int(ymin)), class_color[int(cls_indx)], -1)
cls_id = coco_class_index[int(cls_indx)]
cls_name = coco_class_labels[cls_id]
mess = '%s: %.3f' % (cls_name, scores[i])
cv2.putText(img, mess, (int(xmin), int(ymin)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 2)
cv2.imshow('detection', img)
cv2.waitKey(0)
# print('Saving the' + str(index) + '-th image ...')
# cv2.imwrite('test_images/' + args.dataset+ '3/' + str(index).zfill(6) +'.jpg', img)
def test():
# get device
if args.cuda:
cudnn.benchmark = True
device = torch.device("cuda")
else:
device = torch.device("cpu")
# load net
num_classes = 80
if args.dataset == 'COCO_val':
cfg = config.coco_af
input_size = cfg['min_dim']
testset = COCODataset(
data_dir=args.dataset_root,
json_file='instances_val2017.json',
name='val2017',
img_size=cfg['min_dim'][0],
debug=args.debug)
elif args.dataset == 'COCO_test-dev':
cfg = config.coco_af
input_size = cfg['min_dim']
testset = COCODataset(
data_dir=args.dataset_root,
json_file='image_info_test-dev2017.json',
name='test2017',
img_size=cfg['min_dim'][0],
debug=args.debug)
elif args.dataset == 'VOC':
cfg = config.voc_af
input_size = cfg['min_dim']
testset = VOCDetection(VOC_ROOT, [('2007', 'test')], None, VOCAnnotationTransform())
# build model
if args.version == 'yolo':
from models.yolo import myYOLO
net = myYOLO(device, input_size=input_size, num_classes=num_classes, trainable=False)
print('Let us test YOLO on the %s dataset ......' % (args.dataset))
else:
print('Unknown Version !!!')
exit()
net.load_state_dict(torch.load(args.trained_model, map_location=device))
net.to(device).eval()
print('Finished loading model!')
# evaluation
test_net(net, device, testset,
BaseTransform(net.input_size, mean=(0.406, 0.456, 0.485), std=(0.225, 0.224, 0.229)),
thresh=args.visual_threshold)
if __name__ == '__main__':
test() | [
"[email protected]"
]
| |
93fe75d32ccb18339ef6ff1b37d1cfbe0b3c0c1e | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_2/dlymuh001/question2.py | 34d73fd549c0a400164a5301a2e7cc2b38ba5c3b | []
| no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,297 | py | def cat():
lick = input("Did the cat lick it? (yes/no)\n")
if (lick == "yes"):
healthy = input("Is your cat healthy? (yes/no)\n")
if (healthy == "yes"):
return "Eat it"
elif (healthy == "no"):
return "Your call"
elif (lick == "no"):
return "Eat it"
print("Welcome to the 30 Second Rule Expert")
print("------------------------------------")
print("Answer the following questions by selecting from among the options.")
decision = ""
seen = input("Did anyone see you? (yes/no)\n")
if (seen == "yes"):
person = input("Was it a boss/lover/parent? (yes/no)\n")
if (person == "yes"):
expensive = input("Was it expensive? (yes/no)\n")
if (expensive == "yes"):
cut_off = input("Can you cut off the part that touched the floor? (yes/no)\n")
if (cut_off == "yes"):
decision = "Eat it"
elif (cut_off == "no"):
decision = "Your call"
elif (expensive == "no"):
chocolate = input("Is it chocolate? (yes/no)\n")
if (chocolate == "yes"):
decision = "Eat it"
elif (chocolate == "no"):
decision = "Don\'t eat it"
elif (person == "no"):
decision = "Eat it"
elif (seen == "no"):
sticky = input("Was it sticky? (yes/no)\n")
if (sticky == "yes"):
raw_steak = input("Is it a raw steak? (yes/no)\n")
if (raw_steak == "yes"):
puma = input("Are you a puma? (yes/no)\n")
if (puma == "yes"):
decision = "Eat it"
elif (puma == "no"):
decision = "Don\'t eat it"
elif (raw_steak == "no"):
decision = cat()
elif (sticky == "no"):
emausaurus = input("Is it an Emausaurus? (yes/no)\n")
if (emausaurus == "yes"):
megalosaurus = input("Are you a Megalosaurus? (yes/no)\n")
if (megalosaurus == "yes"):
decision = "Eat it"
elif (megalosaurus == "no"):
decision = "Don\'t eat it"
elif (emausaurus == "no"):
decision = cat()
##output decision
print ("Decision:", decision, sep = " ", end = ".")
| [
"[email protected]"
]
| |
9876a9af35eb3649f4f3c68253359af8c252f427 | 54df8336b50e8f2d7dbe353f0bc51a2b3489095f | /Front End/Kivy/project8/pro8.py | ca78447ed453ab88b83ef4fdd5468ca01be6e9f2 | []
| no_license | SurendraKumarAratikatla/MyLenovolapCodes1 | 42d5bb7a14bfdf8d773ee60719380ee28ff4947a | 12c56200fcfd3e5229bfeec209fd03b5fc35b823 | refs/heads/master | 2023-06-17T15:44:18.312398 | 2021-07-19T10:28:11 | 2021-07-19T10:28:11 | 387,358,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,288 | py | from kivy.uix.screenmanager import ScreenManager, Screen
from kivymd.app import MDApp
from kivymd.uix.screen import Screen
from kivymd.uix.textfield import MDTextField
from kivy.lang import Builder
from kivymd.uix.label import MDLabel, MDIcon
from helpers8 import screen_help
from kivymd.uix.button import MDRectangleFlatButton
from kivy.core.window import Window
from kivymd.uix.list import OneLineListItem
from kivy.uix.scrollview import ScrollView
from kivymd.uix.list import OneLineListItem, MDList, TwoLineListItem, ThreeLineListItem
from kivymd.uix.list import OneLineIconListItem, IconLeftWidget
from kivymd.uix.button import MDFloatingActionButtonSpeedDial
from kivymd.theming import ThemableBehavior
from kivymd.uix.boxlayout import BoxLayout
from kivy.properties import ObjectProperty
import sqlite3 as sql
import re
from kivymd.uix.taptargetview import MDTapTargetView
KV = '''
Screen:
MDFloatingActionButton:
id: button
icon: "head-question"
pos: 10, 10
on_release: app.tap_target_start()
elevation_normal: 10
'''
Window.size = (350, 600)
class MenuScreen(Screen):
pass
class UserCustomerScreen(Screen):
pass
class ProfileScreen(Screen):
mobile: ObjectProperty()
user: ObjectProperty()
address: ObjectProperty()
def get_started(self):
print('here we go')
def add_user(self):
con = sql.connect('user.db')
cur = con.cursor()
cur.execute(""" INSERT INTO id (mobile,user,address) VALUES (?,?,?)""", (self.mobile.text, self.user.text, self.address.text))
con.commit()
con.close()
screen = Screen()
mobile_no_string = self.mobile.text
print(self.mobile.text)
print(self.user.text)
print(self.address.text)
print(len(self.mobile.text))
if re.match("^[0-9]\d{10}$", self.mobile.text) == None:
pass
else:
label = MDLabel(text='*You entered incorrect mobile number,', theme_text_color='Custom',
text_color=(0, 1, 0, 1), font_style='H6', pos_hint={'center_x': 0.5, 'center_y': 0.3})
screen.add_widget(label)
class AllItemsScreen(Screen):
pass
class RationScreen(Screen):
pass
class BellScreen(Screen):
pass
class FreshEggsScrren(Screen):
pass
class ContentNavigationDrawer(BoxLayout):
pass
class AboutScreen(Screen):
pass
class NotificationScreen(Screen):
pass
class AboutRationScreen(Screen):
pass
# Create the screen manager
sm = ScreenManager()
sm.add_widget(MenuScreen(name='menu'))
sm.add_widget(AllItemsScreen(name='usercustomer'))
sm.add_widget(ProfileScreen(name='profile'))
sm.add_widget(AllItemsScreen(name='allitems'))
sm.add_widget(AllItemsScreen(name='ration'))
sm.add_widget(AllItemsScreen(name='eggs'))
sm.add_widget(AllItemsScreen(name='aboutration'))
class DrawerList(ThemableBehavior, MDList):
pass
class DemoApp(MDApp):
data = {
'basket': 'Today Offers',
'offer': 'Discounts',
'cart': 'Cart Page',
}
try:
con = sql.connect('user.db')
cur = con.cursor()
cur.execute(""" CREATE TABLE id(
mobile text,
user text,
address text)
""")
con.commit()
con.close()
except:
pass
def build(self):
#self.theme_cls.theme_style = 'Dark'
#screen = Screen()
firstpage = Builder.load_string(screen_help)
screen = Builder.load_string(KV)
self.tap_target_view = MDTapTargetView(
widget=screen.ids.button,
title_text="VZM Store",
description_text='''Anyone can login as a user and
you can publish your products to customers''',
widget_position="left_bottom",
target_circle_color=(142/255.0, 172/255.0, 249/255.0),
)
screen.add_widget(firstpage)
return screen
def navigation_draw(self):
sm = ScreenManager()
sm.add_widget(AllItemsScreen(name='bell'))
def tap_target_start(self):
if self.tap_target_view.state == "close":
self.tap_target_view.start()
else:
self.tap_target_view.stop()
if __name__ == '__main__':
DemoApp().run()
| [
"[email protected]"
]
| |
18cb6da4a1dcaa779b3ef0b93d2dd0af8d8ec46b | e4eabccc6d971289cf13653d1b6f290e39b870ab | /1651-shuffle-string/shuffle-string.py | 806acf60e20549daab09a587a9cd68b2470fb226 | []
| no_license | HEroKuma/leetcode | 128b38a9f559dc9e3f21c86a47ede67ad72f7675 | b3045aaedbe98eddc7e4e518a03a9337a63be716 | refs/heads/master | 2023-01-03T12:12:31.018717 | 2020-11-01T16:56:47 | 2020-11-01T16:56:47 | 260,488,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,263 | py | # Given a string s and an integer array indices of the same length.
#
# The string s will be shuffled such that the character at the ith position moves to indices[i] in the shuffled string.
#
# Return the shuffled string.
#
#
# Example 1:
#
#
# Input: s = "codeleet", indices = [4,5,6,7,0,2,1,3]
# Output: "leetcode"
# Explanation: As shown, "codeleet" becomes "leetcode" after shuffling.
#
#
# Example 2:
#
#
# Input: s = "abc", indices = [0,1,2]
# Output: "abc"
# Explanation: After shuffling, each character remains in its position.
#
#
# Example 3:
#
#
# Input: s = "aiohn", indices = [3,1,4,2,0]
# Output: "nihao"
#
#
# Example 4:
#
#
# Input: s = "aaiougrt", indices = [4,0,2,6,7,3,1,5]
# Output: "arigatou"
#
#
# Example 5:
#
#
# Input: s = "art", indices = [1,0,2]
# Output: "rat"
#
#
#
# Constraints:
#
#
# s.length == indices.length == n
# 1 <= n <= 100
# s contains only lower-case English letters.
# 0 <= indices[i] < n
# All values of indices are unique (i.e. indices is a permutation of the integers from 0 to n - 1).
#
class Solution:
def restoreString(self, s: str, indices: List[int]) -> str:
ans = ['']*len(s)
for i, j in enumerate(indices):
ans[j] = s[i]
return "".join(ans)
| [
"[email protected]"
]
| |
faf3b5ffc73b80f5cb5728f55014305a2b80da4e | 738aedb8035e49951f83ce3f4291eee149cad5fb | /OB Damage - Li-Hopfield Model/All the code/SLURM setup files/dir_setup_OI-flat_10_2D.py | 2e83d567ac9f3004eca045a3289376859981b1dd | []
| no_license | jkberry07/OB_PD_Model | fb453303bfa64c1a3a43c7d81d2b5373950e1f4d | 1ce30205354dc30cab4673e406988bfa76390238 | refs/heads/master | 2022-11-21T09:39:09.692654 | 2020-07-25T23:25:11 | 2020-07-25T23:25:11 | 282,358,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 23 16:09:06 2019
@author: wmmjk
"""
import os
f = open('dir_setup_OI-flat_10_2D.sh','w+')
here = os.path.dirname(os.path.realpath(__file__))
subdir1 = 'OI-flat_10_2D'
f.write('mkdir '+subdir1+'\n')
f.write('cp OI-flat_10_2D.py '\
+'H0_10_2D_65Hz.npy W0_10_2D_65Hz.npy '+subdir1+'\n')
| [
"[email protected]"
]
| |
8c28fb51601157fcd64fda227a3c3f719d5b5f4d | 69814c9c3881855e6a8981eb7fc3d869549d3cd0 | /sedfitter/sed/cube.py | 3eeab29c13a149243e1fb9efb4b41d7ba633b166 | [
"BSD-2-Clause"
]
| permissive | xueyingtianhua/sedfitter | 83c2f94ba0fdba0af56ccc8a4ad6fd92b62085ab | ec8722ec423ac684e4930fe23a98cd7b2d5b9f50 | refs/heads/master | 2021-01-13T03:29:23.032572 | 2016-07-31T21:27:58 | 2016-07-31T21:27:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,222 | py | import abc
import numpy as np
from astropy import units as u
from astropy.io import fits
from astropy.extern import six
from astropy.table import Table
from ..utils.validator import validate_scalar, validate_array
from .helpers import parse_unit_safe, table_to_hdu, assert_allclose_quantity
__all__ = ['SEDCube', 'PolarizationCube']
@six.add_metaclass(abc.ABCMeta)
class BaseCube(object):
"""
A cube to represent a cube of models.
This consists of values and uncertainties as a function of wavelength,
aperture, and models.
Parameters
----------
names : 1-d iterable, optional
The names of all the models in the cube
distance : `~astropy.units.Quantity`, optional
The distance assumed for the values
wav : 1-d `~astropy.units.Quantity`, optional
The wavelengths at which the SEDs are defined (cannot be used with ``nu``)
nu : 1-d `~astropy.units.Quantity`, optional
The frequencies at which the SEDs are defined (cannot be used with ``wav``)
apertures : 1-d `~astropy.units.Quantity`, optional
The ap for which the SEDs are defined
val : 3-d `~astropy.units.Quantity`, optional
The values of the fluxes or polarization
unc : 3-d `~astropy.units.Quantity`, optional
The uncertainties in the fluxes or polarization
"""
_physical_type = None
def __init__(self, valid=None, names=None, distance=None, wav=None,
nu=None, apertures=None, val=None, unc=None):
# Which models are valid
self.valid = valid
# The names of all the models
self.names = names
# The distance at which the fluxes are defined
self.distance = distance
# The wavelengths and ap
self.wav = wav
self.nu = nu
self.apertures = apertures
# The value and uncertainties
self.val = val
self.unc = unc
def __eq__(self, other):
try:
assert np.all(self.valid == other.valid)
assert np.all(self.names == other.names)
assert_allclose_quantity(self.distance, other.distance)
assert_allclose_quantity(self.wav, other.wav)
assert_allclose_quantity(self.nu, other.nu)
assert_allclose_quantity(self.apertures, other.apertures)
assert_allclose_quantity(self.val, other.val)
assert_allclose_quantity(self.unc, other.unc)
except AssertionError:
raise
return False
else:
return True
@property
def valid(self):
"""
Which models are valid
"""
if self.n_models is None or self._valid is not None:
return self._valid
else:
return np.ones(self.n_models)
@valid.setter
def valid(self, value):
if value is None:
self._valid = None
else:
self._valid = validate_array('valid', value, ndim=1,
shape=None if self.n_models is None else (self.n_models,))
@property
def names(self):
"""
The names of the models
"""
return self._names
@names.setter
def names(self, value):
if value is None:
self._names = None
else:
if not isinstance(value, np.ndarray):
value = np.array(value)
self._names = value
@property
def wav(self):
"""
The wavelengths at which the SEDs are defined.
"""
if self._wav is None and self._nu is not None:
return self._nu.to(u.micron, equivalencies=u.spectral())
else:
return self._wav
@wav.setter
def wav(self, value):
if value is None:
self._wav = None
else:
self._nu = None
self._wav = validate_array('wav', value, domain='positive', ndim=1,
shape=None if self.nu is None else (len(self.nu),),
physical_type='length')
@property
def nu(self):
"""
The frequencies at which the SEDs are defined.
"""
if self._nu is None and self._wav is not None:
return self._wav.to(u.Hz, equivalencies=u.spectral())
else:
return self._nu
@nu.setter
def nu(self, value):
if value is None:
self._nu = None
else:
self._wav = None
self._nu = validate_array('nu', value, domain='positive', ndim=1,
shape=None if self.wav is None else (len(self.wav),),
physical_type='frequency')
@property
def apertures(self):
"""
The ap at which the SEDs are defined.
"""
return self._apertures
@apertures.setter
def apertures(self, value):
if value is None:
self._apertures = None
else:
self._apertures = validate_array('apertures', value, domain='positive',
ndim=1, physical_type='length')
@property
def distance(self):
"""
The distance at which the SEDs are defined.
"""
return self._distance
@distance.setter
def distance(self, value):
if value is None:
self._distance = None
else:
self._distance = validate_scalar('distance', value, domain='positive',
physical_type='length')
@property
def val(self):
"""
The fluxes or polarization values.
"""
return self._val
@val.setter
def val(self, value):
if value is None:
self._val = value
else:
self._val = validate_array('val', value, ndim=3,
shape=(self.n_models, self.n_ap, self.n_wav),
physical_type=self._physical_type)
@property
def unc(self):
"""
The uncertainties in the fluxes or polarization.
"""
return self._unc
@unc.setter
def unc(self, value):
if value is None:
self._unc = value
else:
self._unc = validate_array('unc', value, ndim=3,
shape=(self.n_models, self.n_ap, self.n_wav),
physical_type=self._physical_type)
@property
def n_ap(self):
if self.apertures is None:
return 1
else:
return len(self.apertures)
@property
def n_wav(self):
if self.wav is None:
return None
else:
return len(self.wav)
@property
def n_models(self):
if self.names is None:
return None
else:
return len(self.names)
@classmethod
def read(cls, filename, order='nu', memmap=True):
"""
Read models from a FITS file.
Parameters
----------
filename: str
The name of the file to read the cube from.
order: str, optional
Whether to sort the SED by increasing wavelength (`wav`) or
frequency ('nu').
"""
# Create class instance
cube = cls()
# Open FILE file
hdulist = fits.open(filename, memmap=memmap)
# Extract distance
cube.distance = hdulist[0].header['DISTANCE'] * u.cm
# Get validity
cube.valid = hdulist[0].data.astype(bool)
# Extract model names
cube.names = hdulist['MODEL_NAMES'].data['MODEL_NAME'].astype(str)
# Extract wavelengths
hdu_spectral = hdulist['SPECTRAL_INFO']
cube.wav = u.Quantity(hdu_spectral.data['WAVELENGTH'],
parse_unit_safe(hdu_spectral.columns[0].unit))
# Extract apertures
try:
hdu_apertures = hdulist['APERTURES']
except KeyError:
pass
else:
cube.apertures = u.Quantity(hdu_apertures.data['APERTURE'],
parse_unit_safe(hdu_apertures.columns[0].unit))
# Extract value
hdu_val = hdulist['VALUES']
cube.val = u.Quantity(hdu_val.data,
parse_unit_safe(hdu_val.header['BUNIT']),
copy=False)
# Extract uncertainty
try:
hdu_unc = hdulist['UNCERTAINTIES']
except KeyError:
pass
else:
cube.unc = u.Quantity(hdu_unc.data,
parse_unit_safe(hdu_unc.header['BUNIT']),
copy=False)
# The following should only use views and should therefore not be slow
if ((order == 'nu' and cube.nu[0] > cube.nu[-1]) or
(order == 'wav' and cube.wav[0] > cube.wav[-1])):
cube.wav = cube.wav[::-1]
cube.val = cube.val[:, ::-1, :]
cube.unc = cube.unc[:, ::-1, :]
return cube
def _check_all_set(self):
if self.wav is None:
raise ValueError("Wavelengths 'wav' are not set")
if self.nu is None:
raise ValueError("Frequencies 'nu' are not set")
if self.val is None:
raise ValueError("Values 'val' are not set")
if self.distance is None:
raise ValueError("Value 'distance' is not set")
def write(self, filename, overwrite=False, meta={}):
"""
Write the models to a FITS file.
Parameters
----------
filename: str
The name of the file to write the cube to.
"""
self._check_all_set()
hdulist = fits.HDUList()
# Create empty first HDU and add distance
hdu0 = fits.PrimaryHDU(data=self.valid.astype(int))
hdu0.header['distance'] = (self.distance.to(u.cm).value, 'Distance assumed for the values, in cm')
hdu0.header['NWAV'] = (self.n_wav, "Number of wavelengths")
if self.apertures is not None:
hdu0.header['NAP'] = (self.n_ap, "Number of apertures")
for key in meta:
hdu0.header[key] = meta[key]
hdulist.append(hdu0)
# Create names table
t1 = Table()
t1['MODEL_NAME'] = np.array(self.names, 'S')
hdu1 = table_to_hdu(t1)
hdu1.name = "MODEL_NAMES"
hdulist.append(hdu1)
# Create wavelength table
t2 = Table()
t2['WAVELENGTH'] = self.wav
t2['FREQUENCY'] = self.nu
hdu2 = table_to_hdu(t2)
hdu2.name = "SPECTRAL_INFO"
hdulist.append(hdu2)
# Create aperture table
if self.apertures is not None:
t3 = Table()
t3['APERTURE'] = self.apertures
hdu3 = table_to_hdu(t3)
hdu3.name = "APERTURES"
hdulist.append(hdu3)
# Create value HDU
hdu4 = fits.ImageHDU(self.val.value)
hdu4.header['BUNIT'] = self.val.unit.to_string()
hdu4.name = 'VALUES'
hdulist.append(hdu4)
# Create uncertainty HDU
if self.unc is not None:
hdu5 = fits.ImageHDU(self.unc.value)
hdu5.header['BUNIT'] = self.unc.unit.to_string()
hdu5.name = 'UNCERTAINTIES'
hdulist.append(hdu5)
# Write out HDUList
hdulist.writeto(filename, clobber=overwrite)
class SEDCube(BaseCube):
_physical_type = ('power', 'flux', 'spectral flux density')
def get_sed(self, model_name):
try:
sed_index = np.nonzero(self.names == model_name)[0][0]
except IndexError:
raise ValueError("Model '{0}' not found in SED cube".format(model_name))
from .sed import SED
sed = SED()
sed.name = model_name
sed.distance = self.distance
sed.wav = self.wav
sed.nu = self.nu
sed.apertures = self.apertures
sed.flux = self.val[sed_index, :,:]
sed.error = self.unc[sed_index, :,:]
return sed
class PolarizationCube(BaseCube):
_physical_type = ('dimensionless')
| [
"[email protected]"
]
| |
89e6683e391279884270bae480df6b3a56146ac5 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /comisr/lib/model.py | b3f2d2423bbd3b56d18ce8e090b7122e47b40d2c | [
"CC-BY-4.0",
"Apache-2.0"
]
| permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 4,846 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model functions to reconstruct models."""
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
from comisr.lib import ops
# Definition of the fnet, more details can be found in TecoGAN paper
def fnet(fnet_input, reuse=False):
"""Flow net."""
def down_block(inputs, output_channel=64, stride=1, scope='down_block'):
with tf.variable_scope(scope):
net = ops.conv2(
inputs, 3, output_channel, stride, use_bias=True, scope='conv_1')
net = ops.lrelu(net, 0.2)
net = ops.conv2(
net, 3, output_channel, stride, use_bias=True, scope='conv_2')
net = ops.lrelu(net, 0.2)
net = ops.maxpool(net)
return net
def up_block(inputs, output_channel=64, stride=1, scope='up_block'):
with tf.variable_scope(scope):
net = ops.conv2(
inputs, 3, output_channel, stride, use_bias=True, scope='conv_1')
net = ops.lrelu(net, 0.2)
net = ops.conv2(
net, 3, output_channel, stride, use_bias=True, scope='conv_2')
net = ops.lrelu(net, 0.2)
new_shape = tf.shape(net)[1:-1] * 2
net = tf2.image.resize(net, new_shape)
return net
with tf.variable_scope('autoencode_unit', reuse=reuse):
net = down_block(fnet_input, 32, scope='encoder_1')
net = down_block(net, 64, scope='encoder_2')
net = down_block(net, 128, scope='encoder_3')
net = up_block(net, 256, scope='decoder_1')
net = up_block(net, 128, scope='decoder_2')
net1 = up_block(net, 64, scope='decoder_3')
with tf.variable_scope('output_stage'):
net = ops.conv2(net1, 3, 32, 1, scope='conv1')
net = ops.lrelu(net, 0.2)
net2 = ops.conv2(net, 3, 2, 1, scope='conv2')
net = tf.tanh(net2) * 24.0
# the 24.0 is the max Velocity, details can be found in TecoGAN paper
return net
def generator_f_encoder(gen_inputs, num_resblock=10, reuse=False):
"""Generator function encoder."""
# The Bx residual blocks
def residual_block(inputs, output_channel=64, stride=1, scope='res_block'):
with tf.variable_scope(scope):
net = ops.conv2(
inputs, 3, output_channel, stride, use_bias=True, scope='conv_1')
net = tf.nn.relu(net)
net = ops.conv2(
net, 3, output_channel, stride, use_bias=True, scope='conv_2')
net = net + inputs
return net
with tf.variable_scope('generator_unit', reuse=reuse):
# The input layer
with tf.variable_scope('input_stage'):
net = ops.conv2(gen_inputs, 3, 64, 1, scope='conv')
stage1_output = tf.nn.relu(net)
net = stage1_output
# The residual block parts
for i in range(1, num_resblock + 1,
1): # should be 16 for TecoGAN, and 10 for TecoGANmini
name_scope = 'resblock_%d' % (i)
net = residual_block(net, 64, 1, name_scope)
return net
def generator_f_decoder(net,
gen_inputs,
gen_output_channels,
vsr_scale,
reuse=False):
"""Generator function decoder."""
with tf.variable_scope('generator_unit', reuse=reuse):
with tf.variable_scope('conv_tran2highres'):
if vsr_scale == 2:
net = ops.conv2_tran(
net, kernel=3, output_channel=64, stride=2, scope='conv_tran1')
net = tf.nn.relu(net)
if vsr_scale == 4:
net = ops.conv2_tran(net, 3, 64, 2, scope='conv_tran1')
net = tf.nn.relu(net)
net = ops.conv2_tran(net, 3, 64, 2, scope='conv_tran2')
net = tf.nn.relu(net)
with tf.variable_scope('output_stage'):
net = ops.conv2(net, 3, gen_output_channels, 1, scope='conv')
low_res_in = gen_inputs[:, :, :, 0:3] # ignore warped pre high res
bicubic_hi = ops.bicubic_x(low_res_in, scale=vsr_scale) # can put on GPU
net = net + bicubic_hi
net = ops.preprocess(net)
return net
# Definition of the generator.
def generator_f(gen_inputs,
gen_output_channels,
num_resblock=10,
vsr_scale=4,
reuse=False):
net = generator_f_encoder(gen_inputs, num_resblock, reuse)
net = generator_f_decoder(net, gen_inputs, gen_output_channels, vsr_scale,
reuse)
return net
| [
"[email protected]"
]
| |
8022f12b6bdc80820f79b896a18b71ea2aca5b72 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03844/s016048975.py | 61fa23d2d0067a90a0b12e2ac15e028d6958db57 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | a,b,c = input().split()
print(int(a) + int(c)) if b =="+" else print(int(a) - int(c)) | [
"[email protected]"
]
| |
06d28ed6d203c6790e5e808bd8033beb090b6c7d | 9dc6f8d91dc56523b9688990d4ae413b0bcbd4e1 | /examples/mcscf/31-cr2_scan/cr2-scan.py | cd23eb7028ad7e19891993db6645713ad6ae6e11 | [
"Apache-2.0"
]
| permissive | sunqm/pyscf | 566bc2447d8072cff442d143891c12e6414de01c | dd179a802f0a35e72d8522503172f16977c8d974 | refs/heads/master | 2023-08-15T18:09:58.195953 | 2023-03-27T21:02:03 | 2023-03-27T21:02:03 | 159,149,096 | 80 | 26 | Apache-2.0 | 2022-02-05T00:19:24 | 2018-11-26T10:10:23 | Python | UTF-8 | Python | false | false | 2,329 | py | #!/usr/bin/env python
'''
Scan Cr2 molecule singlet state dissociation curve.
Simliar tthe example mcscf/30-hf_scan, we need to control the CASSCF initial
guess using functions project_init_guess and sort_mo. In this example,
sort_mo function is replaced by the symmetry-adapted version
``sort_mo_by_irrep``.
'''
import numpy
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
ehf = []
emc = []
def run(b, dm, mo, ci=None):
mol = gto.Mole()
mol.verbose = 5
mol.output = 'cr2-%2.1f.out' % b
mol.atom = [
['Cr',( 0.000000, 0.000000, -b/2)],
['Cr',( 0.000000, 0.000000, b/2)],
]
mol.basis = 'cc-pVTZ'
mol.symmetry = 1
mol.build()
mf = scf.RHF(mol)
mf.level_shift = .4
mf.max_cycle = 100
mf.conv_tol = 1e-9
ehf.append(mf.scf(dm))
mc = mcscf.CASSCF(mf, 12, 12)
mc.fcisolver.conv_tol = 1e-9
# FCI solver with multi-threads is not stable enough for this sytem
mc.fcisolver.threads = 1
if mo is None:
# the initial guess for b = 1.5
ncore = {'A1g':5, 'A1u':5} # Optional. Program will guess if not given
ncas = {'A1g':2, 'A1u':2,
'E1ux':1, 'E1uy':1, 'E1gx':1, 'E1gy':1,
'E2ux':1, 'E2uy':1, 'E2gx':1, 'E2gy':1}
mo = mcscf.sort_mo_by_irrep(mc, mf.mo_coeff, ncas, ncore)
else:
mo = mcscf.project_init_guess(mc, mo)
emc.append(mc.kernel(mo, ci)[0])
mc.analyze()
return mf.make_rdm1(), mc.mo_coeff, mc.ci
dm = mo = ci = None
for b in numpy.arange(1.5, 3.01, .1):
dm, mo, ci = run(b, dm, mo, ci)
for b in reversed(numpy.arange(1.5, 3.01, .1)):
dm, mo, ci = run(b, dm, mo, ci)
x = numpy.arange(1.5, 3.01, .1)
ehf1 = ehf[:len(x)]
ehf2 = ehf[len(x):]
emc1 = emc[:len(x)]
emc2 = emc[len(x):]
ehf2.reverse()
emc2.reverse()
with open('cr2-scan.txt', 'w') as fout:
fout.write(' HF 1.5->3.0 CAS(12,12) HF 3.0->1.5 CAS(12,12)\n')
for i, xi in enumerate(x):
fout.write('%2.1f %12.8f %12.8f %12.8f %12.8f\n'
% (xi, ehf1[i], emc1[i], ehf2[i], emc2[i]))
import matplotlib.pyplot as plt
plt.plot(x, ehf1, label='HF,1.5->3.0')
plt.plot(x, ehf2, label='HF,3.0->1.5')
plt.plot(x, emc1, label='CAS(12,12),1.5->3.0')
plt.plot(x, emc2, label='CAS(12,12),3.0->1.5')
plt.legend()
plt.show()
| [
"[email protected]"
]
| |
66eee5b3e6193fdd3fbf93572531c18f032831fc | 5905ed0409c332492409d7707528452b19692415 | /google-cloud-sdk/lib/googlecloudsdk/command_lib/artifacts/print_settings/gradle.py | 82a99b6bd2e49073fe4da73c767a02d9c12bb651 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | millerthomasj/google-cloud-sdk | c37b7ddec08afadec6ee4c165153cd404f7dec5e | 3deda6696c3be6a679689b728da3a458c836a24e | refs/heads/master | 2023-08-10T16:03:41.819756 | 2021-09-08T00:00:00 | 2021-09-08T15:08:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,051 | py | # -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for forming settings for gradle."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
SERVICE_ACCOUNT_TEMPLATE = """\
// Move the secret to ~/.gradle.properties
def artifactRegistryMavenSecret = "{password}"
// Insert following snippet into your build.gradle
// see docs.gradle.org/current/userguide/publishing_maven.html
plugins {{
id "maven-publish"
}}
publishing {{
repositories {{
maven {{
url "https://{location}-maven.pkg.dev/{repo_path}"
credentials {{
username = "{username}"
password = "$artifactRegistryMavenSecret"
}}
}}
}}
}}
repositories {{
maven {{
url "https://{location}-maven.pkg.dev/{repo_path}"
credentials {{
username = "{username}"
password = "$artifactRegistryMavenSecret"
}}
authentication {{
basic(BasicAuthentication)
}}
}}
}}
"""
NO_SERVICE_ACCOUNT_TEMPLATE = """\
// Insert following snippet into your build.gradle
// see docs.gradle.org/current/userguide/publishing_maven.html
plugins {{
id "maven-publish"
id "com.google.cloud.artifactregistry.gradle-plugin" version "{extension_version}"
}}
publishing {{
repositories {{
maven {{
url "artifactregistry://{location}-maven.pkg.dev/{repo_path}"
}}
}}
}}
repositories {{
maven {{
url "artifactregistry://{location}-maven.pkg.dev/{repo_path}"
}}
}}
"""
| [
"[email protected]"
]
| |
c82b4cc15838b566a9c92ee0f9e2ac5b48dae623 | 380dfac9b68ef8663db5a9d1b30fc75636dec3d3 | /billforward/apis/roles_api.py | de864151b5300fd22c12648fd85530ca1110525f | [
"Apache-2.0"
]
| permissive | billforward/bf-python | d2f549e0c465d0dc78152b54413cac4216025a64 | d2b812329ca3ed1fd94364d7f46f69ad74665596 | refs/heads/master | 2021-12-13T22:10:16.658546 | 2018-06-19T14:44:45 | 2018-06-19T14:44:45 | 63,268,011 | 2 | 1 | Apache-2.0 | 2021-12-06T12:59:41 | 2016-07-13T17:57:37 | Python | UTF-8 | Python | false | false | 30,334 | py | # coding: utf-8
"""
BillForward REST API
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class RolesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_role(self, role_request, **kwargs):
"""
Create a new role.
{\"nickname\":\"Create a new role\",\"request\":\"createRoleRequest.html\",\"response\":\"createRoleResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_role(role_request, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param BillingEntityBase role_request: (required)
:return: RolePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_role_with_http_info(role_request, **kwargs)
else:
(data) = self.create_role_with_http_info(role_request, **kwargs)
return data
def create_role_with_http_info(self, role_request, **kwargs):
"""
Create a new role.
{\"nickname\":\"Create a new role\",\"request\":\"createRoleRequest.html\",\"response\":\"createRoleResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_role_with_http_info(role_request, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param BillingEntityBase role_request: (required)
:return: RolePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['role_request']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_role" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'role_request' is set
if ('role_request' not in params) or (params['role_request'] is None):
raise ValueError("Missing the required parameter `role_request` when calling `create_role`")
resource_path = '/roles'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'role_request' in params:
body_params = params['role_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/xml', 'application/xml', 'application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RolePagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_all_roles(self, **kwargs):
"""
Retrieves a collection of all roles. By default 10 values are returned. Records are returned in natural order.
{\"nickname\":\"Retrieve all roles\",\"response\":\"getRoleAll.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_roles(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] organizations:
:param int offset: The offset from the first subscription to return.
:param int records: The maximum number of subscriptions to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:param bool include_retired: Whether retired subscriptions should be returned.
:return: RolePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_roles_with_http_info(**kwargs)
else:
(data) = self.get_all_roles_with_http_info(**kwargs)
return data
def get_all_roles_with_http_info(self, **kwargs):
"""
Retrieves a collection of all roles. By default 10 values are returned. Records are returned in natural order.
{\"nickname\":\"Retrieve all roles\",\"response\":\"getRoleAll.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_roles_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] organizations:
:param int offset: The offset from the first subscription to return.
:param int records: The maximum number of subscriptions to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:param bool include_retired: Whether retired subscriptions should be returned.
:return: RolePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organizations', 'offset', 'records', 'order_by', 'order', 'include_retired']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_roles" % key
)
params[key] = val
del params['kwargs']
resource_path = '/roles'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
if 'offset' in params:
query_params['offset'] = params['offset']
if 'records' in params:
query_params['records'] = params['records']
if 'order_by' in params:
query_params['order_by'] = params['order_by']
if 'order' in params:
query_params['order'] = params['order']
if 'include_retired' in params:
query_params['include_retired'] = params['include_retired']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RolePagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_role_by_id(self, role, **kwargs):
"""
Retrieves a single role, specified by the ID parameter.
{\"nickname\":\"Retrieve a role\",\"response\":\"getRoleByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_role_by_id(role, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str role: ID or name of the role. (required)
:param list[str] organizations:
:param bool include_retired: Whether retired subscriptions should be returned.
:return: RolePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_role_by_id_with_http_info(role, **kwargs)
else:
(data) = self.get_role_by_id_with_http_info(role, **kwargs)
return data
def get_role_by_id_with_http_info(self, role, **kwargs):
"""
Retrieves a single role, specified by the ID parameter.
{\"nickname\":\"Retrieve a role\",\"response\":\"getRoleByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_role_by_id_with_http_info(role, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str role: ID or name of the role. (required)
:param list[str] organizations:
:param bool include_retired: Whether retired subscriptions should be returned.
:return: RolePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['role', 'organizations', 'include_retired']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_role_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'role' is set
if ('role' not in params) or (params['role'] is None):
raise ValueError("Missing the required parameter `role` when calling `get_role_by_id`")
resource_path = '/roles/{role}'.replace('{format}', 'json')
path_params = {}
if 'role' in params:
path_params['role'] = params['role']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
if 'include_retired' in params:
query_params['include_retired'] = params['include_retired']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RolePagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def remove_permission_from_role(self, role, resource, action, **kwargs):
"""
Revokes a particular permission
{\"nickname\":\"Remove Permission from role\",\"response\":\"removePermissionFromGroup.html\",\"request\":\"removePermissionFromGroupRequest.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.remove_permission_from_role(role, resource, action, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str role: ID or name of the role. (required)
:param str resource: (required)
:param str action: (required)
:param list[str] organizations:
:return: RolePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.remove_permission_from_role_with_http_info(role, resource, action, **kwargs)
else:
(data) = self.remove_permission_from_role_with_http_info(role, resource, action, **kwargs)
return data
def remove_permission_from_role_with_http_info(self, role, resource, action, **kwargs):
"""
Revokes a particular permission
{\"nickname\":\"Remove Permission from role\",\"response\":\"removePermissionFromGroup.html\",\"request\":\"removePermissionFromGroupRequest.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.remove_permission_from_role_with_http_info(role, resource, action, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str role: ID or name of the role. (required)
:param str resource: (required)
:param str action: (required)
:param list[str] organizations:
:return: RolePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['role', 'resource', 'action', 'organizations']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_permission_from_role" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'role' is set
if ('role' not in params) or (params['role'] is None):
raise ValueError("Missing the required parameter `role` when calling `remove_permission_from_role`")
# verify the required parameter 'resource' is set
if ('resource' not in params) or (params['resource'] is None):
raise ValueError("Missing the required parameter `resource` when calling `remove_permission_from_role`")
# verify the required parameter 'action' is set
if ('action' not in params) or (params['action'] is None):
raise ValueError("Missing the required parameter `action` when calling `remove_permission_from_role`")
resource_path = '/roles/{role}/permission/{resource}/{action}'.replace('{format}', 'json')
path_params = {}
if 'role' in params:
path_params['role'] = params['role']
if 'resource' in params:
path_params['resource'] = params['resource']
if 'action' in params:
path_params['action'] = params['action']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RolePagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def revoke_role(self, role, **kwargs):
"""
Revokes a role
{\"nickname\":\"Revoke role\",\"response\":\"revokeRole.html\",\"request\":\"revokeRoleRequest.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.revoke_role(role, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str role: ID or name of the role. (required)
:param list[str] organizations:
:return: RolePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.revoke_role_with_http_info(role, **kwargs)
else:
(data) = self.revoke_role_with_http_info(role, **kwargs)
return data
def revoke_role_with_http_info(self, role, **kwargs):
"""
Revokes a role
{\"nickname\":\"Revoke role\",\"response\":\"revokeRole.html\",\"request\":\"revokeRoleRequest.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.revoke_role_with_http_info(role, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str role: ID or name of the role. (required)
:param list[str] organizations:
:return: RolePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['role', 'organizations']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method revoke_role" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'role' is set
if ('role' not in params) or (params['role'] is None):
raise ValueError("Missing the required parameter `role` when calling `revoke_role`")
resource_path = '/roles/{role}'.replace('{format}', 'json')
path_params = {}
if 'role' in params:
path_params['role'] = params['role']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RolePagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def update_role(self, role_request, **kwargs):
"""
Update a role.
{\"nickname\":\"Update a role\",\"request\":\"updateRoleRequest.html\",\"response\":\"updateRoleResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_role(role_request, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param UpdateRoleRequest role_request: (required)
:return: RolePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_role_with_http_info(role_request, **kwargs)
else:
(data) = self.update_role_with_http_info(role_request, **kwargs)
return data
def update_role_with_http_info(self, role_request, **kwargs):
"""
Update a role.
{\"nickname\":\"Update a role\",\"request\":\"updateRoleRequest.html\",\"response\":\"updateRoleResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_role_with_http_info(role_request, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param UpdateRoleRequest role_request: (required)
:return: RolePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['role_request']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_role" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'role_request' is set
if ('role_request' not in params) or (params['role_request'] is None):
raise ValueError("Missing the required parameter `role_request` when calling `update_role`")
resource_path = '/roles'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'role_request' in params:
body_params = params['role_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/xml', 'application/xml', 'application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RolePagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
| [
"[email protected]"
]
| |
30743d0660f99cca916c12814e164669ead70026 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa2/sample/expr_lists-45.py | 9aff4784319ac14303406fc9b8c82678ed9274ee | []
| no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | x:[int] = None
y:[object] = None
z:[bool] = None
o:object = None
x = [1, $Exp, 3]
x = []
y = [1, True]
z = [False, True]
x = None
o = x
o = x = [1]
| [
"[email protected]"
]
| |
0b5b7f88519fa9b9b26e3ad6652ff1a4672c1541 | f2c773e7ccdd60caf5a7c062305cfcd14d11beec | /AR_Scripts_1.0.16_R21_Deprecated/AR_SwapObjects.py | 85e406b4c862dc6ca5dea0e0ae5157af60259cd3 | []
| no_license | aturtur/cinema4d-scripts | 4ccfbc3403326a79076d9bcf001189cd5427f46a | a87fc6c835db5d205f8428cc67ccd30fdd4b4d4b | refs/heads/master | 2023-07-03T13:34:58.735879 | 2023-06-19T09:57:22 | 2023-06-19T09:57:22 | 63,731,563 | 316 | 49 | null | 2022-04-24T02:31:17 | 2016-07-19T22:15:05 | Python | UTF-8 | Python | false | false | 2,091 | py | """
AR_SwapObjects
Author: Arttu Rautio (aturtur)
Website: http://aturtur.com/
Name-US: AR_SwapObjects
Version: 1.0
Description-US: Swaps selected objects between each other. Holding SHIFT while executing script swaps also objects place in hierarchy.
Written for Maxon Cinema 4D R21.207
Python version 2.7.14
"""
# Libraries
import c4d
# Functions
def swapObjects():
doc = c4d.documents.GetActiveDocument() # Get active Cinema 4D document
bc = c4d.BaseContainer() # Initialize Base Container
tempNullA = c4d.BaseObject(c4d.Onull) # Initialize temporary Null object
tempNullB = c4d.BaseObject(c4d.Onull)
selection = doc.GetActiveObjects(c4d.GETACTIVEOBJECTFLAGS_NONE) # Get selection
objA = selection[0] # Get object A
objB = selection[1] # Get objet B
matA = objA.GetMg() # Get object A's global matrix
matB = objB.GetMg() # Get object B's global matrix
doc.AddUndo(c4d.UNDOTYPE_CHANGE, objA) # Add undo for changing object A
doc.AddUndo(c4d.UNDOTYPE_CHANGE, objB) # Add undo for changing object B
tempNullA.InsertBefore(objA) # Insert temp Null A before object A
tempNullB.InsertBefore(objB) # Insert temp Null B before object B
if c4d.gui.GetInputState(c4d.BFM_INPUT_KEYBOARD,c4d.BFM_INPUT_CHANNEL,bc):
if bc[c4d.BFM_INPUT_QUALIFIER] & c4d.QSHIFT: # If 'shift' key is pressed
objA.InsertAfter(tempNullB) # Move object
objB.InsertAfter(tempNullA) # Move object
objA.SetMg(matB) # Set new matrix to object A
objB.SetMg(matA) # Set new matrix to object B
tempNullA.Remove() # Delete temporary objects
tempNullB.Remove()
return True # Everything is fine
def main():
try: # Try to execute following script
doc = c4d.documents.GetActiveDocument() # Get active Cinema 4D document
doc.StartUndo() # Start recording undos
swapObjects() # Run the script
doc.EndUndo() # Stop recording undos
c4d.EventAdd() # Refresh Cinema 4D
except: # If something went wrong
pass # Do nothing
# Execute main()
if __name__=='__main__':
main() | [
"[email protected]"
]
| |
351f10db84028c7b90967a57fd7c5947cf1c2ff1 | 4a1b61cf551db7843050cc7080cec6fd60c4f8cc | /2020/백준문제/트리/00_트리.py | bc90198b1bcad51c6c1ca207c0bc74de3b890221 | []
| no_license | phoenix9373/Algorithm | 4551692027ca60e714437fd3b0c86462f635d8ff | c66fd70e14bb8357318e8b8f386d2e968f0c4d98 | refs/heads/master | 2023-08-24T10:01:20.798430 | 2021-10-15T07:57:36 | 2021-10-15T07:57:36 | 288,092,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 938 | py | import sys
sys.stdin = open('input_00.txt', 'r')
def preorder(n):
if n: # 트리가 존재하면, 0이 아니면.
print(n, end=' ')
preorder(tree[n][0])
preorder(tree[n][1])
def inorder(n):
if n: # 트리가 존재하면, 0이 아니면.
preorder(tree[n][0])
print(n, end=' ')
preorder(tree[n][1])
def postorder(n):
if n: # 트리가 존재하면, 0이 아니면.
preorder(tree[n][0])
preorder(tree[n][1])
print(n, end=' ')
# 트리 입력받기.
N = int(input()) # 노드의 수.
E = 12 # 간선의 수.
tree = [[0, 0, 0] for _ in range(N + 1)]
arr = list(map(int, input().split()))
for i in range(E):
if tree[arr[2 * i]][0] == 0:
tree[arr[2 * i]][0] = arr[2 * i + 1]
else:
tree[arr[2 * i]][1] = arr[2 * i + 1]
tree[arr[2 * i + 1]][2] = arr[2 * i]
print(arr)
preorder(1)
print()
inorder(1)
print()
postorder(1)
| [
"[email protected]"
]
| |
3b89389daeeefbd5bfb316297767be67e33037ad | aef5c3a8fc1a0849e8ed7dcdf4ea0446f64c342c | /zapd/admin.py | 11cffccef493d84b52ed6a47db8f4850407810cd | []
| no_license | eoliveros/zapd | c21e05dde1b318870483a2a34799fffdd1fcbd69 | b17afbc5b05fcbd27370d9ea9e6c2e6fc6bed7d6 | refs/heads/master | 2022-10-16T02:01:49.969941 | 2020-06-16T00:36:15 | 2020-06-16T00:36:15 | 171,779,747 | 0 | 0 | null | 2019-02-21T01:43:15 | 2019-02-21T01:43:14 | null | UTF-8 | Python | false | false | 1,402 | py | from flask import url_for
import flask_admin
from flask_admin import helpers as admin_helpers
from app_core import app, db
from models import security, RestrictedModelView, ProposalModelView, UserModelView, TransactionRestrictedModelView, AMWalletRestrictedModelView, \
Role, User, Category, Proposal, Transaction, CreatedTransaction, AMWallet, AMDevice
# Create admin
admin = flask_admin.Admin(
app,
'ZAPD Admin',
base_template='my_master.html',
template_mode='bootstrap3',
)
# Add model views
admin.add_view(UserModelView(User, db.session, category='Admin'))
admin.add_view(RestrictedModelView(Role, db.session, category='Admin'))
admin.add_view(RestrictedModelView(Category, db.session, category='Admin'))
admin.add_view(AMWalletRestrictedModelView(AMWallet, db.session, name='App Metrics - Wallet', category='Admin'))
admin.add_view(ProposalModelView(Proposal, db.session))
admin.add_view(TransactionRestrictedModelView(Transaction, db.session, category='ZAPD'))
admin.add_view(RestrictedModelView(CreatedTransaction, db.session, category='ZAPD'))
# define a context processor for merging flask-admin's template context into the
# flask-security views.
@security.context_processor
def security_context_processor():
return dict(
admin_base_template=admin.base_template,
admin_view=admin.index_view,
h=admin_helpers,
get_url=url_for
)
| [
"[email protected]"
]
| |
e58160be043c25f1567117706578c6627e844ccb | bf72636241a871d9a7519a577395f9d1fd7b38c2 | /tools_box/_selling/doctype/daily_route_activity/daily_route_activity.py | cfad18b8a1834abd2997ab43008e8996ba9faa94 | [
"MIT"
]
| permissive | Athenolabs/Tools-Box | fc6400d9d88cc8ba0a3d48e38a0918f0022ce914 | c4e4e368a0bec115f84bc33ae011d7e0fd02932f | refs/heads/master | 2021-01-23T10:58:36.243182 | 2017-05-30T13:44:04 | 2017-05-30T13:44:04 | 93,116,515 | 2 | 1 | null | 2017-06-02T01:58:32 | 2017-06-02T01:58:31 | null | UTF-8 | Python | false | false | 284 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class DailyRouteActivity(Document):
pass
| [
"[email protected]"
]
| |
15f753d76464d7abfd4fcf2a4b8dd8743d72fd97 | 462a30862d0303d1d1beeebb2d33bb2a625d5336 | /catchpy/settings/local.py | 995d6763bc4efc46baa39e79fbf3ac479732de8e | []
| no_license | nmaekawa/catchpy | 5eca9715c23e71ce4f6ef489607da0b0e46a14a3 | 50783648804e5b6ce57dcb7d00ba1038fd23ffdc | refs/heads/master | 2023-08-03T09:25:44.838480 | 2023-04-18T19:05:20 | 2023-04-18T19:05:20 | 98,905,832 | 10 | 3 | null | 2023-08-14T18:47:50 | 2017-07-31T15:50:19 | Python | UTF-8 | Python | false | false | 506 | py | from .dev import *
DEBUG = True
# Django Extensions
# http://django-extensions.readthedocs.org/en/latest/
try:
import django_extensions
INSTALLED_APPS += ['django_extensions']
except ImportError:
pass
# Django Debug Toolbar
# http://django-debug-toolbar.readthedocs.org/en/latest/
try:
import debug_toolbar
INSTALLED_APPS += ['debug_toolbar']
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware']
DEBUG_TOOLBAR_PATCH_SETTINGS = True
except ImportError:
pass
| [
"[email protected]"
]
| |
b8405ccbf1b037622cfb344604a81fcef9306518 | 1f5f8f95530003c6c66419519d78cb52d21f65c0 | /projects/golem_gui/tests/users/create_user/add_project_permission.py | 5ac16e37d8543470a90751eb0751b5bc624ee3b4 | []
| no_license | golemhq/golem-tests | c5d3ab04b1ea3755d8b812229feb60f513d039ac | dff8fd3a606c3d1ef8667aece6fddef8ac441230 | refs/heads/master | 2023-08-17T23:05:26.286718 | 2021-10-04T20:34:17 | 2021-10-04T20:34:17 | 105,579,436 | 4 | 1 | null | 2018-11-19T00:14:24 | 2017-10-02T20:05:55 | Python | UTF-8 | Python | false | false | 513 | py | from golem import actions
from projects.golem_gui.pages import common
from projects.golem_gui.pages.users import create_user
def setup(data):
common.access_golem(data.env.url, data.env.admin)
create_user.navigate_to_page()
def test(data):
project = 'project1'
permission = 'admin'
create_user.select_project(project)
create_user.select_permission(permission)
actions.click(create_user.add_permission_button)
create_user.assert_project_permission_in_table(project, permission)
| [
"[email protected]"
]
| |
2e1b14b5791d705897342227ca9a919f4399bccf | 73f1075c99338984795f4bd7bd7b9563ecc36d87 | /Binary_Search/74.Search_a_2D_Matrix.py | 033d554ced3b5db33c3e0b08155a2a7e62fb0138 | []
| no_license | grg909/LCtrip | 314bd173f87ec98ff13234bdd148c76482db2df7 | 96836da905526b47f0cdee8c0bb4790c4cdd6c79 | refs/heads/master | 2020-09-12T13:38:52.486189 | 2020-08-28T14:22:50 | 2020-08-28T14:22:50 | 222,442,472 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | # -*- coding: UTF-8 -*-
# @Date : 2019/12/11
# @Author : WANG JINGE
# @Email : [email protected]
# @Language: python 3.7
"""
"""
# 思路1,把二维数组扁平化
class Solution:
def searchMatrix(self, matrix, target):
try:
n, m = len(matrix), len(matrix[0])
except:
return False
start, end = 0, n*m -1
while start + 1 < end:
mid = (start + end)//2
x, y = mid/m, mid%m
if matrix[x][y] > target:
end = mid
else:
start = mid
x, y = start/m, start%m
if matrix[x][y] == target:
return True
x, y = end / m, end % m
if matrix[x][y] == target:
return True
return False
| [
"[email protected]"
]
| |
cc7b250a3c9f0394d2b4a95cc17b250ac8fc17f7 | bd2a975f5f6cd771393f994ebd428e43142ee869 | /new_render_data/input/p/script/abort/back20180419/CG/C4d/process/AnalyzeC4d.py | 8701fce3cb9979a4512eb94493a2858b24657c12 | []
| no_license | sol87/Pycharm_python36 | 1a297c9432462fc0d3189a1dc7393fdce26cb501 | fa7d53990040d888309a349cfa458a537b8d5f04 | refs/heads/master | 2023-03-16T10:35:55.697402 | 2018-11-08T09:52:14 | 2018-11-08T09:52:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,585 | py | #!/usr/bin/env python
# -*- coding=utf-8 -*-
# Author: kaname
# QQ: 1394041054
""" C4d analyzer """
# RUN:
# 1. From C4Dloader.py to loading RBAnalzer.py to do it.
# 2. AnalyzeC4d.py loading C4Dloader.py to do it.
import os
import sys
import subprocess
import string
import logging
import time
import shutil
from C4d import C4d
from C4dLoader import C4dLoader
from C4dPluginManager import C4dPlugin, C4dPluginMgr
from CommonUtil import RBCommon as CLASS_COMMON_UTIL
class AnalyzeC4d(C4d):
def __init__(self, **paramDict):
C4d.__init__(self, **paramDict)
self.format_log('AnalyzeC4d.init', 'start')
self.G_TIPS_TXT_NODE=os.path.join(self.G_WORK_RENDER_TASK_CFG, 'tips.json').replace('\\','/')
for key, value in list(self.__dict__.items()):
self.G_DEBUG_LOG.info(key + '=' + str(value))
self.format_log('done','end')
def RB_MAP_DRIVE(self):#2.chongxie
#self.format_log('[映射盘符]','[start]'.decode('utf-8').encode('gbk'))
self.G_DEBUG_LOG.info('[c4d.RB_MAP_DRIVE.start.....]')
if self.G_RENDER_OS != '0':
#delete all mappings
CLASS_COMMON_UTIL.del_net_use()
CLASS_COMMON_UTIL.del_subst()
#net use
b_flag = False
if self.G_CG_NAME == 'C4d':
map_root = os.path.basename(self.G_INPUT_PROJECT_PATH)
print(map_root + '@KANADAmmmmmmm')
map_dict = os.path.join(self.G_INPUT_PROJECT_PATH)
print(map_root + '@KANADAnnnnnnn')
map_cmd = 'net use %s: "%s"' % (map_root, map_dict)
CLASS_COMMON_UTIL.cmd_python3(map_cmd,my_log=self.G_DEBUG_LOG)
# #base RB_MAP_DRIVE
# if self.G_CG_NAME != 'Max' and self.G_TASK_JSON_DICT['system_info'].has_key('mnt_map'):
# map_dict = self.G_TASK_JSON_DICT['system_info']['mnt_map']
# for key,value in map_dict.items():
# value = os.path.normpath(value)
# map_cmd = 'net use "%s" "%s"' % (key,value)
# CLASS_COMMON_UTIL.cmd_python3(map_cmd,my_log=self.G_DEBUG_LOG)
# if key.lower() == 'b:':
# b_flag = True
if not b_flag:
map_cmd_b = 'net use B: "%s"' % (os.path.normpath(self.G_PLUGIN_PATH))
CLASS_COMMON_UTIL.cmd(map_cmd_b,my_log=self.G_DEBUG_LOG,try_count=3)
self.G_DEBUG_LOG.info('[c4d.RB_MAP_DRIVE.end.....]')
self.format_log('done','end')
def RB_CONFIG(self):
self.G_DEBUG_LOG.info('[c4d.analyze.配置插件开始]')
self.G_DEBUG_LOG.info('[c4d.analyze.plugin.config.start......]')
self.plugin_config()
self.G_DEBUG_LOG.info('[c4d.analyze.配置插件完成]')
self.G_DEBUG_LOG.info('[c4d.analyze.plugin.config.end......]')
def RB_RENDER(self):
self.G_DEBUG_LOG.info('[c4d.RBanalyse.start.....]')
self.G_FEE_PARSER.set('render','start_time',str(int(time.time())))
cg_ver = self.G_CG_VERSION
task_id = self.G_TASK_ID
cg_file = self.G_INPUT_CG_FILE
task_json = self.G_TASK_JSON
asset_json = self.G_ASSET_JSON
tips_json = self.G_TIPS_TXT_NODE
c4d_loader = C4dLoader(cg_ver, task_id, cg_file, task_json, asset_json, tips_json)
c4d_loader.execute()
self.G_FEE_PARSER.set('render','end_time',str(int(time.time())))
self.G_DEBUG_LOG.info('[c4d.RBanalyse.end.....]')
| [
"[email protected]"
]
| |
41363247c358198e8cecea4460b8076fd9c34398 | 01301e5f486883865e3696f38ef913a232958343 | /antlir/compiler/test_images/print_ok.py | a38dabfa12fe5a4e77e3b8b8fd720897c11764b0 | [
"MIT"
]
| permissive | SaurabhAgarwala/antlir | 85fb09c87dafde56622b4107224b41f873f66442 | d9513d35d3eaa9d28717a40057a14d099c6ec775 | refs/heads/main | 2023-06-25T09:05:30.619684 | 2021-07-01T23:04:57 | 2021-07-01T23:06:11 | 382,355,446 | 0 | 0 | MIT | 2021-07-02T13:30:39 | 2021-07-02T13:30:39 | null | UTF-8 | Python | false | false | 294 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
'Prints the unicode string "ok" via the `print` function to `stdout`, on 1 line'
print("ok")
| [
"[email protected]"
]
| |
0f276a9b40c35cb921b2f49748656afb5c5442d9 | 0f0a7adfae45e07a896c5cd5648ae081d4ef7790 | /python数据结构/慕课测试题/打印实心矩形.py | f31f5eb66436884a6fbfd6372e3042c933196836 | []
| no_license | renlei-great/git_window- | e2c578544c7a8bdd97a7a9da7be0464d6955186f | 8bff20a18d7bbeeaf714aa49bf15ab706153cc28 | refs/heads/master | 2021-07-19T13:09:01.075494 | 2020-06-13T06:14:37 | 2020-06-13T06:14:37 | 227,722,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | args = input().split()
alist = [int(i) for i in args]
# min_number = min(alist)
min_number = alist[0]
for i in alist:
if min_number > i:
min_number = i
print(min_number)
| [
"[email protected]"
]
| |
166d339829928c03eae087789acaafe7f5329a46 | 267f2c09420436e97275986f825045cbe81fd3ec | /buy & sell vinyl records 3.5.3.py | 5215d6374e530fd31aa37d163087968486904c55 | []
| no_license | aiqbal-hhs/91906-7 | f1ddc21846bee6dd9dcf4f75bdabe68989390769 | 8d6aadedff8c6585c204a256b5bd3ad8294a815f | refs/heads/main | 2023-05-15T00:17:41.407536 | 2021-06-04T10:32:21 | 2021-06-04T10:32:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,680 | py | from functools import partial
from tkinter import *
import random
root = Tk()
stock_list = ["Igor - Tyler The Creator",
"Good Kid Maad City - Kendrick Lamar",
"Demon Days - Gorillaz"]
class stock:
def __init__(self, name, amount):
self.name = name
self.amount
stock_list.append(self)
##########################################buy frame######################################################
#formatting variables....
background_color = "orange"
# converter Main Screen GUI...
buy_frame = Frame(width=360, bg=background_color)
buy_frame.grid()
# buy title (row 0)
buy_label = Label(buy_frame, text="Buy page",
font=("Arial", "16", "bold"),
bg=background_color,
padx=10, pady=5)
buy_label.grid(row=0, column=0)
# buy heading (label, row 1)
buy_heading = Label(buy_frame, text="Buy heading goes here",
font=("Arial", "12"),
bg=background_color,
padx=10, pady=5)
buy_heading.grid(row=1, column=0)
# buy heading (label, row 2)
buy_text = Label(buy_frame, text="this is where you buy vinyls",
font="Arial 9 italic", wrap=250, justify=LEFT,
bg=background_color,
padx=10, pady=10)
buy_text.grid(row=2, column=0)
#entry for amount of vinyls the user wants to buy
e = Entry(buy_frame, width=25)
e.insert(0,"")
e.grid(row=4, column=1)
myButton = Button(buy_frame, text="Enter", font=("Arial", "14"),
padx=10, pady=10, command=help)
myButton.grid(row=5, column=1)
#Creating the Dropdown Menu
chosen_option = StringVar()
option_menu = OptionMenu(buy_frame, chosen_option, stock_list[0], *stock_list)
option_menu.grid(row=1, column=1)
##########################################sell frame######################################################
#formatting variables....
sell_background_color = "blue"
# converter Main Screen GUI...
sell_frame = Frame(width=360, bg=sell_background_color)
sell_frame.grid()
# sell title (row 0)
sell_label = Label(sell_frame, text="Sell page",
font=("Arial", "16", "bold"),
bg=sell_background_color,
padx=10, pady=5)
sell_label.grid(row=0, column=0)
# sell heading (label, row 1)
sell_heading = Label(sell_frame, text="sell heading goes here",
font=("Arial", "12"),
bg=sell_background_color,
padx=10, pady=5)
sell_heading.grid(row=1, column=0)
# buy heading (label, row 2)
sell_text = Label(sell_frame, text="this is where you buy vinyls",
font="Arial 9 italic", wrap=250, justify=LEFT,
bg=sell_background_color,
padx=10, pady=10)
sell_text.grid(row=2, column=0)
#entry for amount of vinyls the user wants to buy
sell_e = Entry(sell_frame, width=25)
sell_e.insert(0,"")
sell_e.grid(row=4, column=1)
sell_Button = Button(sell_frame, text="Enter", font=("Arial", "14"),
padx=10, pady=10, command=help)
sell_Button.grid(row=5, column=1)
#Creating the Dropdown Menu
sell_chosen_option = StringVar()
sell_option_menu = OptionMenu(sell_frame, sell_chosen_option, stock_list[0], *stock_list)
sell_option_menu.grid(row=1, column=1)
##########################################stock frame############################
#main routine
if __name__ == "__main__":
root.title("Buy & Sell Vinyl Records")
root.mainloop()
| [
"[email protected]"
]
| |
ef9dd66a281bd4a8cfff524ae8a983149449e1cd | ca17bd80ac1d02c711423ac4093330172002a513 | /binary_tree_longest_consecutive_sequence/LongestSequence_better.py | 3413f0a627ca955427b2a27755e726678c29a746 | []
| no_license | Omega094/lc_practice | 64046dea8bbdaee99d767b70002a2b5b56313112 | e61776bcfd5d93c663b247d71e00f1b298683714 | refs/heads/master | 2020-03-12T13:45:13.988645 | 2018-04-23T06:28:32 | 2018-04-23T06:28:32 | 130,649,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def helper(self, root):
if not root: return 0, 0
leftG, leftL = self.helper(root.left)
rightG, rightL = self.helper(root.right)
currentL = 1
if root.left and root.val + 1 == root.left.val :
currentL =max(currentL, leftL + 1)
if root.right and root.val + 1 == root.right.val :
currentL = max(currentL, rightL + 1)
currentG = max(currentL, leftG, rightG)
return currentG, currentL
def longestConsecutive(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.longest = 0
if not root: return 0
return self.helper(root)[0]
| [
"[email protected]"
]
| |
59dd09fa952c05fb2664214cd30c0473025458e0 | 43e53df2f2bc1779c2896541940a235e66a02b02 | /day18/qq发送消息.py | ab63f8dfee03cb49856868ecbdb35ef1e150b795 | []
| no_license | songdanlee/python_code_basic | ddb3276b0473a261423c43d5d8e7a1ff038d5c51 | f32cd4dc9670e55ffa6abe04c9184bfa5d8bbc41 | refs/heads/master | 2020-07-14T21:05:30.471486 | 2019-08-30T14:55:51 | 2019-08-30T14:55:51 | 205,402,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | import os
Name = input('Name of the Receiver: ')
Name = '穆梓'
clientDict = {'lz':'513278236',
'穆梓':'318750798'
} # 人名 和对应的 qq号
os.system('start tencent://message/?uin=' + clientDict[Name]) | [
"[email protected]"
]
| |
bf883990f5d5a2a677f673e28b5c4877284c147d | fde186bd141ed055ba8ab915b2ad25355f8f3fb6 | /ABC/070/py/A.py | 66689bce17b2f299f4639476d5684fcfd9e35d34 | []
| no_license | Tsukumo3/Atcoder | 259ea6487ad25ba2d4bf96d3e1cf9be4a427d24e | 5f8d5cf4c0edee5f54b8e78bc14a62e23cab69cb | refs/heads/master | 2020-12-20T05:04:39.222657 | 2020-10-17T01:39:04 | 2020-10-17T01:39:04 | 235,969,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | '''
ABC070 A - Palindromic Number
https://atcoder.jp/contests/abc070/tasks/abc070_a
'''
n = input()
if n[0] == n[2]:
ans = 'Yes'
else:
ans = 'No'
print(ans)
| [
"[email protected]"
]
| |
b9b123916eba2a46e552b8cb0e286f5b55b8e3e2 | e6f2d7e407d2b516152094d0834e78603c9eb60b | /wen_python_16/pic_1.py | 6be48cde753d4cc2948ea9632e02d8c0580a5dbd | []
| no_license | pylinx64/wen_python_16 | 5d63a44d2cbc8380e57b9f3c6887ab91578ec6cb | c9e2f9083f848d502bce2e0cf049ccba2677e981 | refs/heads/main | 2023-04-18T04:43:32.601474 | 2021-05-05T10:07:30 | 2021-05-05T10:07:30 | 336,603,250 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | import turtle
import time
t = turtle.Pen()
colors = ['lime', '#C35A62', '#9CC35A', '#5AC3B7', '#C35AB8']
turtle.bgcolor('black')
t.pencolor(colors[2])
t.circle(100)
t.left(320)
t.forward(200)
t.circle(100)
time.sleep(50)
| [
"[email protected]"
]
| |
a2c75d7e2b2e0d54e1631a4ce6785d7266097d6e | 9cd180fc7594eb018c41f0bf0b54548741fd33ba | /sdk/python/pulumi_azure_nextgen/network/v20170901/zone.py | 11bfa1976eb4864afd69e9171e7f0790cc681bd9 | [
"Apache-2.0",
"BSD-3-Clause"
]
| permissive | MisinformedDNA/pulumi-azure-nextgen | c71971359450d03f13a53645171f621e200fe82d | f0022686b655c2b0744a9f47915aadaa183eed3b | refs/heads/master | 2022-12-17T22:27:37.916546 | 2020-09-28T16:03:59 | 2020-09-28T16:03:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,856 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['Zone']
class Zone(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
zone_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Describes a DNS zone.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] etag: The etag of the zone.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] zone_name: The name of the DNS zone (without a terminating dot).
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['etag'] = etag
if location is None:
raise TypeError("Missing required property 'location'")
__props__['location'] = location
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
if zone_name is None:
raise TypeError("Missing required property 'zone_name'")
__props__['zone_name'] = zone_name
__props__['max_number_of_record_sets'] = None
__props__['name'] = None
__props__['name_servers'] = None
__props__['number_of_record_sets'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:Zone"), pulumi.Alias(type_="azure-nextgen:network/v20150504preview:Zone"), pulumi.Alias(type_="azure-nextgen:network/v20160401:Zone"), pulumi.Alias(type_="azure-nextgen:network/v20171001:Zone"), pulumi.Alias(type_="azure-nextgen:network/v20180301preview:Zone"), pulumi.Alias(type_="azure-nextgen:network/v20180501:Zone")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Zone, __self__).__init__(
'azure-nextgen:network/v20170901:Zone',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Zone':
"""
Get an existing Zone resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Zone(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
The etag of the zone.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="maxNumberOfRecordSets")
def max_number_of_record_sets(self) -> pulumi.Output[int]:
"""
The maximum number of record sets that can be created in this DNS zone. This is a read-only property and any attempt to set this value will be ignored.
"""
return pulumi.get(self, "max_number_of_record_sets")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nameServers")
def name_servers(self) -> pulumi.Output[Sequence[str]]:
"""
The name servers for this DNS zone. This is a read-only property and any attempt to set this value will be ignored.
"""
return pulumi.get(self, "name_servers")
@property
@pulumi.getter(name="numberOfRecordSets")
def number_of_record_sets(self) -> pulumi.Output[int]:
"""
The current number of record sets in this DNS zone. This is a read-only property and any attempt to set this value will be ignored.
"""
return pulumi.get(self, "number_of_record_sets")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"[email protected]"
]
| |
d5f2b424d4ed273d886ee3533b078836331a62e5 | 97eac4a05c77e1b6898b84c9606afa13428e45df | /Important_Functions/fib.py | 84c34f782b19848ecb61c528a94af491a974b47a | []
| no_license | ryanmcg86/Euler_Answers | 8f71b93ea15fceeeeb6b661d7401e40b760a38e6 | 28374025448b16aab9ed1dd801aafc3d602f7da8 | refs/heads/master | 2022-08-11T13:31:11.038918 | 2022-07-28T00:35:11 | 2022-07-28T00:35:11 | 190,278,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | '''This is a O(log n) implementation of a function that retreives the nth number in Fibonacci's sequence.'''
fibs = {0: 0, 1: 1}
def fib(n):
if n in fibs: return fibs[n]
if n % 2 == 0:
fibs[n] = ((2 * fib((n / 2) - 1)) + fib(n / 2)) * fib(n / 2)
else:
fibs[n] = fib((n - 1) / 2)**2 + fib((n + 1) / 2)**2
return fibs[n]
| [
"[email protected]"
]
| |
e258038aad904c2a62e39e78d3c0d2cf97592f7e | 7714d7fe86c99c059e339e895e265658fa3ce36e | /backend/home/migrations/0005_auto_20200807_0839.py | aa38d5dae63fac410eabc371a886dabc919134b3 | []
| no_license | crowdbotics-apps/mobile-7-aug-dev-8582 | f9454c8a9b3ca34e0b7dce328554658fd3fe02e9 | f569d0a9ae3effb99d6ee00127f87015296a4993 | refs/heads/master | 2023-07-11T13:56:39.164407 | 2020-08-07T09:01:31 | 2020-08-07T09:01:31 | 285,739,310 | 0 | 0 | null | 2021-08-03T20:03:29 | 2020-08-07T04:46:04 | JavaScript | UTF-8 | Python | false | false | 551 | py | # Generated by Django 2.2.15 on 2020-08-07 08:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0004_hjkhgkjhkjhkj'),
]
operations = [
migrations.RemoveField(
model_name='customtext',
name='hgfhgfhgf',
),
migrations.RemoveField(
model_name='customtext',
name='hjgjhgjhghjg',
),
migrations.RemoveField(
model_name='customtext',
name='kjhkjhkjh',
),
]
| [
"[email protected]"
]
| |
5cb4b3265a4257fb238248d7885ca2ac89655b57 | a479a5773fd5607f96c3b84fed57733fe39c3dbb | /napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/__init__.py | a8bd8939dcba1b7eac4ab69d320c8aff3409529a | [
"Apache-2.0"
]
| permissive | napalm-automation/napalm-yang | 839c711e9294745534f5fbbe115e0100b645dbca | 9148e015b086ebe311c07deb92e168ea36fd7771 | refs/heads/develop | 2021-01-11T07:17:20.226734 | 2019-05-15T08:43:03 | 2019-05-15T08:43:03 | 69,226,025 | 65 | 64 | Apache-2.0 | 2019-05-15T08:43:24 | 2016-09-26T07:48:42 | Python | UTF-8 | Python | false | false | 19,744 | py | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class undefined_subtlv(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/extended-ipv4-reachability/prefixes/prefix/undefined-subtlvs/undefined-subtlv. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Sub-TLVs that are not defined in the model or not recognised by
system.
"""
__slots__ = ("_path_helper", "_extmethods", "__type", "__state")
_yang_name = "undefined-subtlv"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__type = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"extended-ipv4-reachability",
"prefixes",
"prefix",
"undefined-subtlvs",
"undefined-subtlv",
]
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/type (leafref)
YANG Description: A reference to a subTLV
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/type (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: A reference to a subTLV
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError(
"Cannot set keys directly when" + " within an instantiated list"
)
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """type must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=False)""",
}
)
self.__type = t
if hasattr(self, "_set"):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state (container)
YANG Description: State parameters of the undefined sub-TLV.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of the undefined sub-TLV.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
type = __builtin__.property(_get_type)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("type", type), ("state", state)])
from . import state
class undefined_subtlv(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/extended-ipv4-reachability/prefixes/prefix/undefined-subtlvs/undefined-subtlv. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Sub-TLVs that are not defined in the model or not recognised by
system.
"""
__slots__ = ("_path_helper", "_extmethods", "__type", "__state")
_yang_name = "undefined-subtlv"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__type = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"extended-ipv4-reachability",
"prefixes",
"prefix",
"undefined-subtlvs",
"undefined-subtlv",
]
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/type (leafref)
YANG Description: A reference to a subTLV
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/type (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: A reference to a subTLV
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError(
"Cannot set keys directly when" + " within an instantiated list"
)
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """type must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=False)""",
}
)
self.__type = t
if hasattr(self, "_set"):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state (container)
YANG Description: State parameters of the undefined sub-TLV.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of the undefined sub-TLV.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
type = __builtin__.property(_get_type)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("type", type), ("state", state)])
| [
"[email protected]"
]
| |
6a87a8e5d278ede9d444df333d662804bf68b370 | fbd347498b4ec04440dd91da0f62d3bc8aa85bff | /ex.031.precoPassagemOnibus.py | a323c227e902c1c41edaa64acec1370c78d468cd | [
"MIT"
]
| permissive | romulorm/cev-python | 254ae208b468aa4e23bf59838de389d045f7d8ef | b5c6844956c131a9e4e02355459c218739ebf8c5 | refs/heads/master | 2021-05-18T22:27:31.179430 | 2020-04-17T01:39:04 | 2020-04-17T01:39:04 | 251,455,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | # Exercício Python 031: Desenvolva um programa que pergunte a distância de uma viagem em Km. Calcule o preço da passagem,
# cobrando R$0,50 por Km para viagens de até 200Km e R$0,45 parta viagens mais longas.
distancia = float(input("Qual a distância da viagem, em quilômetros? "))
preco = distancia * 0.45 if distancia > 200 else distancia * 0.50
print("Você vai pagar R$ {:.2f} por uma viagem de {} quilômetros.".format(preco, int(distancia)))
| [
"[email protected]"
]
| |
0d0379c91606561fd1684c3b56b5a59c7ac79ac6 | 2147b03faa984c3f82b452bfa2e44738762c0620 | /users/models.py | 0e5dba17c8ef60df08f56723c7b7cee4655f5822 | []
| no_license | crowdbotics-apps/pawn-shop-30678 | 44d485d1e4bf5540320518921750293c8649ea53 | 844572b9e385948fdfbe1c3113481bf0961e810e | refs/heads/master | 2023-07-30T16:02:19.844017 | 2021-09-19T11:07:57 | 2021-09-19T11:07:57 | 408,103,844 | 2 | 0 | null | 2021-10-06T00:15:01 | 2021-09-19T11:05:16 | Python | UTF-8 | Python | false | false | 890 | py | from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
class User(AbstractUser):
# WARNING!
"""
Some officially supported features of Crowdbotics Dashboard depend on the initial
state of this User model (Such as the creation of superusers using the CLI
or password reset in the dashboard). Changing, extending, or modifying this model
may lead to unexpected bugs and or behaviors in the automated flows provided
by Crowdbotics. Change it at your own risk.
This model represents the User instance of the system, login system and
everything that relates with an `User` is represented by this model.
"""
name = models.CharField(
null=True,
blank=True,
max_length=255,
)
| [
"[email protected]"
]
| |
9327c7c353f57edc531a78952f182e4b45b0c405 | a46e3ab5260c819e2b1a20343205b248a76314f3 | /pycharm_dict_str_split_unexpected.py | 9c4c8332fb5b3185d40c302f5e19bc170359ecf9 | []
| no_license | albertz/playground | 97ea882eb077e341c69f9e593918d38f89f8bc64 | f30c6330d855056f1756eeb558aa51fe72040c4e | refs/heads/master | 2023-08-16T16:33:01.780047 | 2023-07-31T11:46:58 | 2023-07-31T11:46:58 | 3,687,829 | 10 | 3 | null | null | null | null | UTF-8 | Python | false | false | 139 | py |
"""
https://youtrack.jetbrains.com/issue/PY-43916
"""
s = "a=b,c=d"
opts = dict([opt.split("=", 1) for opt in s.split(",")])
print(opts)
| [
"[email protected]"
]
| |
883d700804d9b19145bc3f36b3590a29fd7206bc | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/eqptdiagp/rsfcoddiag.py | e9fdbd6d639608f643b2b2e047fbb1d9ba7e0857 | []
| no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 8,484 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RsFcOdDiag(Mo):
"""
A source relation to the fabric card connecting different IO cards. Note that this relation is an internal object.
"""
meta = SourceRelationMeta("cobra.model.eqptdiagp.RsFcOdDiag", "cobra.model.eqpt.FC")
meta.cardinality = SourceRelationMeta.ONE_TO_M
meta.moClassName = "eqptdiagpRsFcOdDiag"
meta.rnFormat = "rsfcOdDiag-[%(tDn)s]"
meta.category = MoCategory.RELATIONSHIP_TO_LOCAL
meta.label = "Relation to Fabric Module"
meta.writeAccessMask = 0x800080800000001
meta.readAccessMask = 0x800080800000001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.tag.Tag")
meta.childClasses.add("cobra.model.aaa.RbacAnnotation")
meta.childClasses.add("cobra.model.tag.Annotation")
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Annotation", "annotationKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.aaa.RbacAnnotation", "rbacDom-"))
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Tag", "tagKey-"))
meta.parentClasses.add("cobra.model.eqptdiagp.SpTsOdFc")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.reln.To")
meta.rnPrefixes = [
('rsfcOdDiag-', True),
]
prop = PropMeta("str", "annotation", "annotation", 37727, PropCategory.REGULAR)
prop.label = "Annotation. Suggested format orchestrator:value"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("annotation", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "extMngdBy", "extMngdBy", 39866, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "undefined"
prop._addConstant("msc", "msc", 1)
prop._addConstant("undefined", "undefined", 0)
meta.props.add("extMngdBy", prop)
prop = PropMeta("str", "forceResolve", "forceResolve", 107, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = True
prop.defaultValueStr = "yes"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("forceResolve", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "nodeId", "nodeId", 12569, PropCategory.REGULAR)
prop.label = "None"
prop.isOper = True
prop.range = [(1, 16000)]
prop.defaultValue = 0
prop.defaultValueStr = "not-found"
prop._addConstant("not-found", "not-found", 0)
meta.props.add("nodeId", prop)
prop = PropMeta("str", "rType", "rType", 106, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("local", "local", 3)
prop._addConstant("mo", "mo", 1)
prop._addConstant("service", "service", 2)
meta.props.add("rType", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "state", "state", 103, PropCategory.REGULAR)
prop.label = "State"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unformed"
prop._addConstant("cardinality-violation", "cardinality-violation", 5)
prop._addConstant("formed", "formed", 1)
prop._addConstant("invalid-target", "invalid-target", 4)
prop._addConstant("missing-target", "missing-target", 2)
prop._addConstant("unformed", "unformed", 0)
meta.props.add("state", prop)
prop = PropMeta("str", "stateQual", "stateQual", 104, PropCategory.REGULAR)
prop.label = "State Qualifier"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("default-target", "default-target", 2)
prop._addConstant("mismatch-target", "mismatch-target", 1)
prop._addConstant("none", "none", 0)
meta.props.add("stateQual", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 12568, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 3205
prop.defaultValueStr = "eqptFC"
prop._addConstant("eqptFC", None, 3205)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 12567, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("tDn", prop)
prop = PropMeta("str", "tType", "tType", 105, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("all", "all", 2)
prop._addConstant("mo", "mo", 1)
prop._addConstant("name", "name", 0)
meta.props.add("tType", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
meta.namingProps.append(getattr(meta.props, "tDn"))
getattr(meta.props, "tDn").needDelimiter = True
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Path"
meta.deploymentQueryPaths.append(DeploymentPathMeta("DiagFCRelOnDemandPolToNode", "On demand diag fabric card relation to Fabric Node", "cobra.model.fabric.Node"))
def __init__(self, parentMoOrDn, tDn, markDirty=True, **creationProps):
namingVals = [tDn]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
]
| |
74dfd93a93cab894593cc99b17f0005ace2dc769 | 3e71f4d64b63e74a61447994a68f497f66c5e905 | /nnutil/model/adversarial_transformer.py | b216c878941bcd5cfa1c15722b704591dca182ec | [
"BSD-3-Clause"
]
| permissive | aroig/nnutil | 40a648ec56214dbad8610ec8d9c1bdc642f136e9 | 88df41ee89f592a28c1661ee8837dd8e8ca42cf3 | refs/heads/master | 2020-03-25T18:58:01.708160 | 2019-06-18T22:00:54 | 2019-06-18T22:00:54 | 144,058,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,274 | py | import os
import numpy as np
import tensorflow as tf
import nnutil as nn
from .base_model import BaseModel
class AdversarialTransformer(BaseModel):
def __init__(self, name, shape):
super(AdversarialTransformer, self).__init__(name)
self._shape = shape
self._transformer = None
self._discriminator = None
@property
def input_shape(self):
return self._shape
@property
def output_shape(self):
return self._shape
@property
def layers(self):
return self._transformer.layers
def transformer_network(self, params):
raise NotImplementedError
def discriminator_network(self, params):
raise NotImplementedError
def features_placeholder(self, batch_size=1):
return {
'source': tf.placeholder(dtype=tf.float32,
shape=(batch_size,) + self._shape,
name='source'),
'target': tf.placeholder(dtype=tf.float32,
shape=(batch_size,) + self._shape,
name='target')
}
def loss_function(self, tgt_image, synth_image, params):
step = tf.train.get_global_step()
# Sample weights, so that easy samples weight less
sample_bias = params.get('sample_bias', 0.0)
sample_bias_step = params.get('sample_bias_step', 0)
# Regularizer weight
regularizer = params.get('regularizer', 0.0)
regularizer_step = params.get('regularizer_step', 0)
# Calculate total loss function
with tf.name_scope('losses'):
sample_loss = tf.norm(nn.util.flatten(synth_image - tgt_image), ord=2, axis=1)
# TODO: perform importance sampling here
model_loss = tf.reduce_mean(sample_loss)
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
regularization_dampening = tf.sigmoid(tf.cast(step - regularizer_step, dtype=tf.float32) / 10.0)
total_loss = model_loss + regularizer * regularization_dampening * sum([l for l in regularization_losses])
tf.summary.scalar("model_loss", model_loss)
return total_loss
def model_fn(self, features, labels, mode, params, config):
src_image = features['source']
tgt_image = features['target']
step = tf.train.get_global_step()
training = (mode == tf.estimator.ModeKeys.TRAIN)
self._transformer = nn.layers.Segment(self.transformer_network(params), name="transformer")
self._discriminator = nn.layers.Segment(self.transformer_network(params), name="discriminator")
synth_image = self._transformer.apply(src_image, training=training)
if mode == tf.estimator.ModeKeys.PREDICT:
return self.prediction_estimator_spec(src_image, synth_image, params, config)
loss = self.loss_function(tgt_image, synth_image, params)
# Configure the training and eval phases
if mode == tf.estimator.ModeKeys.TRAIN:
return self.training_estimator_spec(loss, src_image, synth_image, tgt_image, params, config)
else:
return self.evaluation_estimator_spec(loss, src_image, synth_image, tgt_image, params, config)
def training_estimator_spec(self, loss, src_image, synth_image, tgt_image, params, config):
step = tf.train.get_global_step()
learning_rate = params.get('learning_rate', 0.0001)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=0, beta2=0.9)
# Manually apply gradients. We want the gradients for summaries. We need
# to apply them manually in order to avoid having duplicate gradient ops.
gradients = optimizer.compute_gradients(loss)
# Make sure we update averages on each training step
extra_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_ops):
train_op = optimizer.apply_gradients(gradients, global_step=step)
nn.summary.image_transformation(
"transformation",
src_image[0, :],
synth_image[0, :])
nn.summary.image_transformation(
"truth",
tgt_image[0, :],
synth_image[0, :])
nn.summary.layers("layer_summary_{}".format(self._transformer.name),
layers=self._transformer.layers,
gradients=gradients,
activations=self._transformer.layer_activations)
nn.summary.layers("layer_summary_{}".format(self._discriminator.name),
layers=self._discriminator.layers,
gradients=gradients,
activations=self._discriminator.layer_activations)
training_hooks = []
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.TRAIN,
loss=loss,
training_hooks=training_hooks,
train_op=train_op)
def evaluation_estimator_spec(self, loss, src_image, synth_image, tgt_image, params, config):
eval_metric_ops = {}
evaluation_hooks = []
# Make sure we run update averages on each training step
extra_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_ops):
loss = tf.identity(loss)
eval_dir = os.path.join(config.model_dir, "eval")
evaluation_hooks.append(
nn.train.EvalSummarySaverHook(
output_dir=eval_dir,
summary_op=tf.summary.merge_all()
)
)
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
evaluation_hooks=evaluation_hooks,
eval_metric_ops=eval_metric_ops)
def prediction_estimator_spec(self, src_image, synth_image, params, config):
predictions = {
"synth": synth_image
}
exports = {}
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
export_outputs=exports)
| [
"[email protected]"
]
| |
9a945c38b5ac272314f3cc18b8d69a3004068b3d | c9500ad778b8521aaa85cb7fe3239989efaa4799 | /plugins/trendmicro_visionone/icon_trendmicro_visionone/actions/submit_file_to_sandbox/__init__.py | aff6b3ae31a70139a54adafc52b8b179ae63bb49 | [
"MIT"
]
| permissive | rapid7/insightconnect-plugins | 5a6465e720f114d71b1a82fe14e42e94db104a0b | 718d15ca36c57231bb89df0aebc53d0210db400c | refs/heads/master | 2023-09-01T09:21:27.143980 | 2023-08-31T10:25:36 | 2023-08-31T10:25:36 | 190,435,635 | 61 | 60 | MIT | 2023-09-14T08:47:37 | 2019-06-05T17:05:12 | Python | UTF-8 | Python | false | false | 80 | py | # GENERATED BY KOMAND SDK - DO NOT EDIT
from .action import SubmitFileToSandbox
| [
"[email protected]"
]
| |
f55c8a4be2c1181299895c4fe33e44f6c2de40c5 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /ia95ckhN5ztgfJHe4_7.py | 3ba1f214826ddee32eec6ed44940399db61237ca | []
| no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,202 | py | """
In JavaScript, there are two types of comments:
1. Single-line comments start with `//`
2. Multi-line or inline comments start with `/*` and end with `*/`
The input will be a sequence of `//`, `/*` and `*/`. **Every`/*` must have a
`*/` that immediately follows it**. To add, there can be **no single-line
comments in between multi-line comments** in between the `/*` and `*/`.
Create a function that returns `True` if comments are properly formatted, and
`False` otherwise.
### Examples
comments_correct("//////") ➞ True
# 3 single-line comments: ["//", "//", "//"]
comments_correct("/**//**////**/") ➞ True
# 3 multi-line comments + 1 single-line comment:
# ["/*", "*/", "/*", "*/", "//", "/*", "*/"]
comments_correct("///*/**/") ➞ False
# The first /* is missing a */
comments_correct("/////") ➞ False
# The 5th / is single, not a double //
### Notes
N/A
"""
def comments_correct(txt):
if len(txt)%2 !=0:
return False
chunks = []
for n in range(0,len(txt)-2,2):
chunks.append(txt[n:n+2])
for i in range(len(chunks)-1):
if chunks[i] == '/*' and chunks[i+1] != '*/':
return False
return True
| [
"[email protected]"
]
| |
6f0da9774e428291d826ce32f0b2b035b3d95848 | adb6fe118613d60af9abfa73055599d205cf2108 | /视觉/XLwork/XL4/XL4_2.py | 5566d364d670d79d8f81b8ab2cda2c8a9d120eab | []
| no_license | lijianmin01/Third_college_grade | 18845f666a7fc1ece24d2ee45ee287e1efc0ca11 | 5e5b1f64375506de79ed94c8b2fc266fe1af4d6a | refs/heads/master | 2022-12-31T15:26:05.521297 | 2020-10-17T01:34:32 | 2020-10-17T01:34:32 | 295,317,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,738 | py | import cv2
import numpy as np
# 全局变量
# 第几张图片 0 第一张 1 第二张
img_flag = 0
# 第一张图片
def on_EVENT_LBUTTONDOWN1(event, x, y,flags, param):
# 点击三次,获得三个位置的坐标,销毁窗口
if event == cv2.EVENT_LBUTTONDOWN:
xy = "%d,%d" % (x, y)
a.append(x)
b.append(y)
cv2.circle(img1, (x, y), 1, ( 0, 0,255), thickness=4)
cv2.putText(img1, xy, (x, y), cv2.FONT_HERSHEY_PLAIN,1.0, (0, 0, 0), thickness=1)
cv2.imshow("image1", img1)
# 第二张图片
def on_EVENT_LBUTTONDOWN2(event, x, y,flags, param):
# 点击三次,获得三个位置的坐标,销毁窗口
if event == cv2.EVENT_LBUTTONDOWN:
xy = "%d,%d" % (x, y)
a1.append(x)
b1.append(y)
cv2.circle(img2, (x, y), 1, (255, 0, 0), thickness=4)
cv2.putText(img2, xy, (x, y), cv2.FONT_HERSHEY_PLAIN,1.0, (0, 0, 0), thickness=1)
cv2.imshow("image2", img2)
# 获取同名点对
def get_same_point(img_flag):
# 第一张图片
cv2.namedWindow("image1")
cv2.setMouseCallback("image1", on_EVENT_LBUTTONDOWN1)
cv2.imshow("image1", img1)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 第二张图片
cv2.namedWindow("image2")
cv2.setMouseCallback("image2", on_EVENT_LBUTTONDOWN2)
cv2.imshow("image2", img2)
cv2.waitKey(0)
cv2.destroyAllWindows()
# print(a)
# print(b)
# print(a1)
# print(b1)
len_1 = len(a)
img_sq_1 = np.ones((len_1,3),dtype='int')
img_sq_2 = np.ones((len_1,2), dtype='int')
img_sq_1[:,0] = a[:]
img_sq_1[:,1] = b[:]
img_sq_2[:,0] = a1[:]
img_sq_2[:,1] = b1[:]
print(img_sq_1)
print(img_sq_2)
return img_sq_1,img_sq_2
if __name__ == '__main__':
# 分表保存第一张图片与第二张图片的同名点对
# 记录同名点对
# 第一张图片
a,b = [], []
# 第二张图片
a1, b1 = [], []
img1 = cv2.imread(r'T:\imgs\XL4\klcc_a.png')
img2 = cv2.imread(r"T:\imgs\XL4\klcc_b.png")
img1_copy = img1[:]
img2_copy = img2[:]
# img_sq_1, img_sq_2 = get_same_point(img_flag)
# 获取同点对
img_sq_1,img_sq_2 = get_same_point(img_flag)
"""
[[318 250 1]
[153 318 1]
[344 351 1]]
[[243 270]
[ 74 342]
[272 369]]
# 为了避免重复获取同点对,所以直接获取,后期删了
X = np.mat([[318,250,1],[153,318,1],[344,351,1]])
U = np.mat([[243,270],[ 74,342],[272,369]])
"""
X = np.mat(img_sq_1)
U = np.mat(img_sq_2)
# 前A
A = np.dot(X.I,U)
print(A)
# 因为当时画图的时候,图像已经被修改,所以,恢复原图像
img1 = img1_copy[:]
img2 = img2_copy[:]
M1,N1 = img1.shape[0],img1.shape[1]
M2, N2 = img2.shape[0], img2.shape[1]
img1_cnt = img1[:]
img2_cnt = img2[:]
# 建立一个大型图片
# 确定变换后的图片坐标
# 变换后图片的坐标 X
save_img2 = []
for x in range(M1):
for y in range(N1):
cnt_sq = np.array([x, y, 1]).dot(A)
cnt_sq = [int(cnt_sq.tolist()[0][0]),int(cnt_sq.tolist()[0][1])]
save_img2.append(cnt_sq)
# 参考图片 U
save_img1 = []
for x in range(M2):
for y in range(N2):
save_img1.append([x,y])
save_img1 = np.array(save_img1)
# 找变换后的图片的最小坐标
save_img2=np.array(save_img2)
min_h = np.min(save_img2,axis=1)
# 记录x,y 最小坐标
x_min,y_min = min_h[0],min_h[1]
img3 = np.zeros([1000,1000, 3], np.uint8)
save_img1_1 = save_img1[:]
save_img2_1 = save_img2[:]
if x_min<0:
cnt = abs(x_min)
for i in range(len(save_img1)):
save_img1[i][0]+=cnt
for i in range(len(save_img2)):
save_img2[i][0]+=cnt
if y_min<0:
cnt = abs(y_min)
for i in range(len(save_img1)):
save_img1[i][1]+=cnt
for i in range(len(save_img2)):
save_img2[i][1]+=cnt
# print(save_img1_1)
# print(save_img2_1)
for i in range(len(save_img1)):
try:
img3[save_img1_1[i][0],save_img1_1[i][1]]=img1[save_img1[i][0],save_img1[i][1]]
except:
# img3[save_img1_1[i][0], save_img1_1[i][1]] = img1[save_img1[i][0]-1, save_img1[i][1]-1]
continue
for i in range(len(save_img2)):
try:
img3[save_img2_1[i][0],save_img2_1[i][1]]=img2[save_img2[i][0],save_img2[i][1]]
except:
#img3[save_img1_1[i][0], save_img1_1[i][1]] = img1[save_img2[i][0]-1, save_img2[i][1]-1]
continue
cv2.imshow("3",img3)
cv2.waitKey(0)
| [
"[email protected]"
]
| |
1972d15adead71f72550f9c4ff7bfc3a1ad6a084 | 62c6884e9597d96a25d274515d6124c46daffec8 | /zvt/stats/stats.py | 7128f18fc673ad777d15adc0d5b1552ed4641b1b | [
"MIT"
]
| permissive | doncat99/zvt | 0f9305442af287e63f15de11cb2e2f6b5f9b3d05 | 831183bdf7a6d0fc3acd3ea51984df590078eec6 | refs/heads/master | 2023-03-22T13:35:17.277276 | 2021-03-10T14:02:08 | 2021-03-10T14:02:08 | 284,984,720 | 13 | 2 | null | null | null | null | UTF-8 | Python | false | false | 22,003 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# QuantStats: Portfolio analytics for quants
# https://github.com/ranaroussi/quantstats
#
# Copyright 2019 Ran Aroussi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as _pd
import numpy as _np
from math import ceil as _ceil
from scipy.stats import (norm as _norm, linregress as _linregress)
from . import utils as _utils
# ======== STATS ========
def pct_rank(prices, window=60):
""" rank prices by window """
rank = _utils.multi_shift(prices, window).T.rank(pct=True).T
return rank.iloc[:, 0] * 100.
def compsum(returns):
""" Calculates rolling compounded returns """
return returns.add(1).cumprod() - 1
def comp(returns):
""" Calculates total compounded returns """
return returns.add(1).prod() - 1
def expected_return(returns, aggregate=None, compounded=True):
"""
returns the expected return for a given period
by calculating the geometric holding period return
"""
returns = _utils._prepare_returns(returns)
returns = _utils.aggregate_returns(returns, aggregate, compounded)
return _np.product(1 + returns) ** (1 / len(returns)) - 1
def geometric_mean(retruns, aggregate=None, compounded=True):
""" shorthand for expected_return() """
return expected_return(retruns, aggregate, compounded)
def ghpr(retruns, aggregate=None, compounded=True):
""" shorthand for expected_return() """
return expected_return(retruns, aggregate, compounded)
def outliers(returns, quantile=.95):
"""returns series of outliers """
return returns[returns > returns.quantile(quantile)].dropna(how='all')
def remove_outliers(returns, quantile=.95):
""" returns series of returns without the outliers """
return returns[returns < returns.quantile(quantile)]
def best(returns, aggregate=None, compounded=True):
""" returns the best day/month/week/quarter/year's return """
returns = _utils._prepare_returns(returns)
return _utils.aggregate_returns(returns, aggregate, compounded).max()
def worst(returns, aggregate=None, compounded=True):
""" returns the worst day/month/week/quarter/year's return """
returns = _utils._prepare_returns(returns)
return _utils.aggregate_returns(returns, aggregate, compounded).min()
def consecutive_wins(returns, aggregate=None, compounded=True):
""" returns the maximum consecutive wins by day/month/week/quarter/year """
returns = _utils._prepare_returns(returns)
returns = _utils.aggregate_returns(returns, aggregate, compounded) > 0
return _utils.count_consecutive(returns).max()
def consecutive_losses(returns, aggregate=None, compounded=True):
"""
returns the maximum consecutive losses by
day/month/week/quarter/year
"""
returns = _utils._prepare_returns(returns)
returns = _utils.aggregate_returns(returns, aggregate, compounded) < 0
return _utils.count_consecutive(returns).max()
def exposure(returns):
""" returns the market exposure time (returns != 0) """
returns = _utils._prepare_returns(returns)
def _exposure(ret):
ex = len(ret[(~_np.isnan(ret)) & (ret != 0)]) / len(ret)
return _ceil(ex * 100) / 100
if isinstance(returns, _pd.DataFrame):
_df = {}
for col in returns.columns:
_df[col] = _exposure(returns[col])
return _pd.Series(_df)
return _exposure(returns)
def win_rate(returns, aggregate=None, compounded=True):
""" calculates the win ratio for a period """
def _win_rate(series):
try:
return len(series[series > 0]) / len(series[series != 0])
except Exception:
return 0.
returns = _utils._prepare_returns(returns)
if aggregate:
returns = _utils.aggregate_returns(returns, aggregate, compounded)
if isinstance(returns, _pd.DataFrame):
_df = {}
for col in returns.columns:
_df[col] = _win_rate(returns[col])
return _pd.Series(_df)
return _win_rate(returns)
def avg_return(returns, aggregate=None, compounded=True):
"""
calculates the average return/trade return for a period
returns = _utils._prepare_returns(returns)
"""
returns = _utils._prepare_returns(returns)
if aggregate:
returns = _utils.aggregate_returns(returns, aggregate, compounded)
return returns[returns != 0].dropna().mean()
def avg_win(returns, aggregate=None, compounded=True):
"""
calculates the average winning
return/trade return for a period
"""
returns = _utils._prepare_returns(returns)
if aggregate:
returns = _utils.aggregate_returns(returns, aggregate, compounded)
return returns[returns > 0].dropna().mean()
def avg_loss(returns, aggregate=None, compounded=True):
"""
calculates the average low if
return/trade return for a period
"""
returns = _utils._prepare_returns(returns)
if aggregate:
returns = _utils.aggregate_returns(returns, aggregate, compounded)
return returns[returns < 0].dropna().mean()
def volatility(returns, periods=252, annualize=True):
""" calculates the volatility of returns for a period """
std = _utils._prepare_returns(returns).std()
if annualize:
return std * _np.sqrt(periods)
return std
def implied_volatility(returns, periods=252, annualize=True):
""" calculates the implied volatility of returns for a period """
logret = _utils.log_returns(returns)
if annualize:
return logret.rolling(periods).std() * _np.sqrt(periods)
return logret.std()
# ======= METRICS =======
def sharpe(returns, rf=0., periods=252, annualize=True):
"""
calculates the sharpe ratio of access returns
If rf is non-zero, you must specify periods.
In this case, rf is assumed to be expressed in yearly (annualized) terms
Args:
* returns (Series, DataFrame): Input return series
* rf (float): Risk-free rate expressed as a yearly (annualized) return
* periods (int): Frequency of returns (252 for daily, 12 for monthly)
* annualize: return annualize sharpe?
"""
if rf != 0 and periods is None:
raise Exception('Must provide periods if rf != 0')
returns = _utils._prepare_returns(returns, rf, periods)
res = returns.mean() / returns.std()
if annualize:
return res * _np.sqrt(1 if periods is None else periods)
return res
def sortino(returns, rf=0, periods=252, annualize=True):
"""
calculates the sortino ratio of access returns
If rf is non-zero, you must specify periods.
In this case, rf is assumed to be expressed in yearly (annualized) terms
Calculation is based on this paper by Red Rock Capital
http://www.redrockcapital.com/Sortino__A__Sharper__Ratio_Red_Rock_Capital.pdf
"""
if rf != 0 and periods is None:
raise Exception('Must provide periods if rf != 0')
returns = _utils._prepare_returns(returns, rf, periods)
downside = (returns[returns < 0] ** 2).sum() / len(returns)
res = returns.mean() / _np.sqrt(downside)
if annualize:
return res * _np.sqrt(1 if periods is None else periods)
return res
def cagr(returns, rf=0., compounded=True):
"""
calculates the communicative annualized growth return
(CAGR%) of access returns
If rf is non-zero, you must specify periods.
In this case, rf is assumed to be expressed in yearly (annualized) terms
"""
total = _utils._prepare_returns(returns, rf)
if compounded:
total = comp(total)
else:
total = _np.sum(total)
years = (returns.index[-1] - returns.index[0]).days / 365.
res = abs(total + 1.0) ** (1.0 / years) - 1
if isinstance(returns, _pd.DataFrame):
res = _pd.Series(res)
res.index = returns.columns
return res
def rar(returns, rf=0.):
"""
calculates the risk-adjusted return of access returns
(CAGR / exposure. takes time into account.)
If rf is non-zero, you must specify periods.
In this case, rf is assumed to be expressed in yearly (annualized) terms
"""
returns = _utils._prepare_returns(returns, rf)
return cagr(returns) / exposure(returns)
def skew(returns):
"""
calculates returns' skewness
(the degree of asymmetry of a distribution around its mean)
"""
return _utils._prepare_returns(returns).skew()
def kurtosis(returns):
"""
calculates returns' kurtosis
(the degree to which a distribution peak compared to a normal distribution)
"""
return _utils._prepare_returns(returns).kurtosis()
def calmar(returns):
""" calculates the calmar ratio (CAGR% / MaxDD%) """
returns = _utils._prepare_returns(returns)
cagr_ratio = cagr(returns)
max_dd = max_drawdown(returns)
return cagr_ratio / abs(max_dd)
def ulcer_index(returns, rf=0):
""" calculates the ulcer index score (downside risk measurment) """
returns = _utils._prepare_returns(returns, rf)
dd = 1. - returns / returns.cummax()
return _np.sqrt(_np.divide((dd**2).sum(), returns.shape[0] - 1))
def ulcer_performance_index(returns, rf=0):
"""
calculates the ulcer index score
(downside risk measurment)
"""
returns = _utils._prepare_returns(returns, rf)
dd = 1. - returns / returns.cummax()
ulcer = _np.sqrt(_np.divide((dd**2).sum(), returns.shape[0] - 1))
return returns.mean() / ulcer
def upi(returns, rf=0):
""" shorthand for ulcer_performance_index() """
return ulcer_performance_index(returns, rf)
def risk_of_ruin(returns):
"""
calculates the risk of ruin
(the likelihood of losing all one's investment capital)
"""
returns = _utils._prepare_returns(returns)
wins = win_rate(returns)
return ((1 - wins) / (1 + wins)) ** len(returns)
def ror(returns):
""" shorthand for risk_of_ruin() """
return risk_of_ruin(returns)
def value_at_risk(returns, sigma=1, confidence=0.95):
"""
calculats the daily value-at-risk
(variance-covariance calculation with confidence n)
"""
returns = _utils._prepare_returns(returns)
mu = returns.mean()
sigma *= returns.std()
if confidence > 1:
confidence = confidence / 100
return _norm.ppf(1 - confidence, mu, sigma)
def var(returns, sigma=1, confidence=0.95):
""" shorthand for value_at_risk() """
return value_at_risk(returns, sigma, confidence)
def conditional_value_at_risk(returns, sigma=1, confidence=0.95):
"""
calculats the conditional daily value-at-risk (aka expected shortfall)
quantifies the amount of tail risk an investment
"""
returns = _utils._prepare_returns(returns)
var = value_at_risk(returns, sigma, confidence)
c_var = returns[returns < var].values.mean()
return c_var if ~_np.isnan(c_var) else var
def cvar(returns, sigma=1, confidence=0.95):
""" shorthand for conditional_value_at_risk() """
return conditional_value_at_risk(returns, sigma, confidence)
def expected_shortfall(returns, sigma=1, confidence=0.95):
""" shorthand for conditional_value_at_risk() """
return conditional_value_at_risk(returns, sigma, confidence)
def tail_ratio(returns, cutoff=0.95):
"""
measures the ratio between the right
(95%) and left tail (5%).
"""
returns = _utils._prepare_returns(returns)
return abs(returns.quantile(cutoff) / returns.quantile(1 - cutoff))
def payoff_ratio(returns):
""" measures the payoff ratio (average win/average loss) """
returns = _utils._prepare_returns(returns)
return avg_win(returns) / abs(avg_loss(returns))
def win_loss_ratio(returns):
""" shorthand for payoff_ratio() """
return payoff_ratio(returns)
def profit_ratio(returns):
""" measures the profit ratio (win ratio / loss ratio) """
returns = _utils._prepare_returns(returns)
wins = returns[returns >= 0]
loss = returns[returns < 0]
win_ratio = abs(wins.mean() / wins.count())
loss_ratio = abs(loss.mean() / loss.count())
try:
return win_ratio / loss_ratio
except Exception:
return 0.
def profit_factor(returns):
""" measures the profit ratio (wins/loss) """
returns = _utils._prepare_returns(returns)
return abs(returns[returns >= 0].sum() / returns[returns < 0].sum())
def gain_to_pain_ratio(returns):
""" shorthand for profit_factor() """
return profit_factor(returns)
def cpc_index(returns):
"""
measures the cpc ratio
(profit factor * win % * win loss ratio)
"""
returns = _utils._prepare_returns(returns)
return profit_factor(returns) * win_rate(returns) * \
win_loss_ratio(returns)
def common_sense_ratio(returns):
""" measures the common sense ratio (profit factor * tail ratio) """
returns = _utils._prepare_returns(returns)
return profit_factor(returns) * tail_ratio(returns)
def outlier_win_ratio(returns, quantile=.99):
"""
calculates the outlier winners ratio
99th percentile of returns / mean positive return
"""
returns = _utils._prepare_returns(returns)
return returns.quantile(quantile).mean() / returns[returns >= 0].mean()
def outlier_loss_ratio(returns, quantile=.01):
"""
calculates the outlier losers ratio
1st percentile of returns / mean negative return
"""
returns = _utils._prepare_returns(returns)
return returns.quantile(quantile).mean() / returns[returns < 0].mean()
def recovery_factor(returns):
""" measures how fast the strategy recovers from drawdowns """
returns = _utils._prepare_returns(returns)
total_returns = comp(returns)
max_dd = max_drawdown(returns)
return total_returns / abs(max_dd)
def risk_return_ratio(returns):
"""
calculates the return / risk ratio
(sharpe ratio without factoring in the risk-free rate)
"""
returns = _utils._prepare_returns(returns)
return returns.mean() / returns.std()
def max_drawdown(prices):
""" calculates the maximum drawdown """
prices = _utils._prepare_prices(prices)
return (prices / prices.expanding(min_periods=0).max()).min() - 1
def to_drawdown_series(prices):
""" convert price series to drawdown series """
prices = _utils._prepare_prices(prices)
dd = prices / _np.maximum.accumulate(prices) - 1.
return dd.replace([_np.inf, -_np.inf, -0], 0)
def drawdown_details(drawdown):
"""
calculates drawdown details, including start/end/valley dates,
duration, max drawdown and max dd for 99% of the dd period
for every drawdown period
"""
def _drawdown_details(drawdown):
# mark no drawdown
no_dd = drawdown == 0
# extract dd start dates
starts = ~no_dd & no_dd.shift(1)
starts = list(starts[starts].index)
# extract end dates
ends = no_dd & (~no_dd).shift(1)
ends = list(ends[ends].index)
# no drawdown :)
if not starts:
return _pd.DataFrame(
index=[], columns=('start', 'valley', 'end', 'days',
'max drawdown', '99% max drawdown'))
# drawdown series begins in a drawdown
if ends and starts[0] > ends[0]:
starts.insert(0, drawdown.index[0])
# series ends in a drawdown fill with last date
if not ends or starts[-1] > ends[-1]:
ends.append(drawdown.index[-1])
# build dataframe from results
data = []
for i, _ in enumerate(starts):
dd = drawdown[starts[i]:ends[i]]
clean_dd = -remove_outliers(-dd, .99)
data.append((starts[i], dd.idxmin(), ends[i],
(ends[i] - starts[i]).days,
dd.min() * 100, clean_dd.min() * 100))
df = _pd.DataFrame(data=data,
columns=('start', 'valley', 'end', 'days',
'max drawdown',
'99% max drawdown'))
df['days'] = df['days'].astype(int)
df['max drawdown'] = df['max drawdown'].astype(float)
df['99% max drawdown'] = df['99% max drawdown'].astype(float)
df['start'] = df['start'].dt.strftime('%Y-%m-%d')
df['end'] = df['end'].dt.strftime('%Y-%m-%d')
df['valley'] = df['valley'].dt.strftime('%Y-%m-%d')
return df
if isinstance(drawdown, _pd.DataFrame):
_dfs = {}
for col in drawdown.columns:
_dfs[col] = _drawdown_details(drawdown[col])
return _pd.concat(_dfs, axis=1)
return _drawdown_details(drawdown)
def kelly_criterion(returns):
"""
calculates the recommended maximum amount of capital that
should be allocated to the given strategy, based on the
Kelly Criterion (http://en.wikipedia.org/wiki/Kelly_criterion)
"""
returns = _utils._prepare_returns(returns)
win_loss_ratio = payoff_ratio(returns)
win_prob = win_rate(returns)
lose_prob = 1 - win_prob
return ((win_loss_ratio * win_prob) - lose_prob) / win_loss_ratio
# ==== VS. BENCHMARK ====
def r_squared(returns, benchmark):
""" measures the straight line fit of the equity curve """
# slope, intercept, r_val, p_val, std_err = _linregress(
_, _, r_val, _, _ = _linregress(
_utils._prepare_returns(returns),
_utils._prepare_benchmark(benchmark, returns.index))
return r_val**2
def r2(returns, benchmark):
""" shorthand for r_squared() """
return r_squared(returns, benchmark)
def information_ratio(returns, benchmark):
"""
calculates the information ratio
(basically the risk return ratio of the net profits)
"""
diff_rets = _utils._prepare_returns(returns) - \
_utils._prepare_benchmark(benchmark, returns.index)
return diff_rets.mean() / diff_rets.std()
def greeks(returns, benchmark, periods=252.):
""" calculates alpha and beta of the portfolio """
# ----------------------------
# data cleanup
returns = _utils._prepare_returns(returns)
benchmark = _utils._prepare_benchmark(benchmark, returns.index)
# ----------------------------
# find covariance
matrix = _np.cov(returns, benchmark)
beta = matrix[0, 1] / matrix[1, 1]
# calculates measures now
alpha = returns.mean() - beta * benchmark.mean()
alpha = alpha * periods
return _pd.Series({
"beta": beta,
"alpha": alpha,
# "vol": _np.sqrt(matrix[0, 0]) * _np.sqrt(periods)
}).fillna(0)
def rolling_greeks(returns, benchmark, periods=252):
""" calculates rolling alpha and beta of the portfolio """
df = _pd.DataFrame(data={
"returns": _utils._prepare_returns(returns),
"benchmark": _utils._prepare_benchmark(benchmark, returns.index)
})
corr = df.rolling(int(periods)).corr().unstack()['returns']['benchmark']
std = df.rolling(int(periods)).std()
beta = corr * std['returns'] / std['benchmark']
alpha = df['returns'].mean() - beta * df['benchmark'].mean()
# alpha = alpha * periods
return _pd.DataFrame(index=returns.index, data={
"beta": beta,
"alpha": alpha
}).fillna(0)
def compare(returns, benchmark, aggregate=None, compounded=True,
round_vals=None):
"""
compare returns to benchmark on a
day/week/month/quarter/year basis
"""
returns = _utils._prepare_returns(returns)
benchmark = _utils._prepare_benchmark(benchmark, returns.index)
data = _pd.DataFrame(data={
'Benchmark': _utils.aggregate_returns(
benchmark, aggregate, compounded) * 100,
'Returns': _utils.aggregate_returns(
returns, aggregate, compounded) * 100
})
data['Multiplier'] = data['Returns'] / data['Benchmark']
data['Won'] = _np.where(data['Returns'] >= data['Benchmark'], '+', '-')
if round_vals is not None:
return _np.round(data, round_vals)
return data
def monthly_returns(returns, eoy=True, compounded=True):
""" calculates monthly returns """
if isinstance(returns, _pd.DataFrame):
returns.columns = map(str.lower, returns.columns)
if len(returns.columns) > 1 and 'close' in returns.columns:
returns = returns['close']
else:
returns = returns[returns.columns[0]]
returns = _utils._prepare_returns(returns)
original_returns = returns.copy()
returns = _pd.DataFrame(
_utils.group_returns(returns,
returns.index.strftime('%Y-%m-01'),
compounded))
returns.columns = ['Returns']
returns.index = _pd.to_datetime(returns.index)
# get returnsframe
returns['Year'] = returns.index.strftime('%Y')
returns['Month'] = returns.index.strftime('%b')
# make pivot table
returns = returns.pivot('Year', 'Month', 'Returns').fillna(0)
# handle missing months
for month in ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']:
if month not in returns.columns:
returns.loc[:, month] = 0
# order columns by month
returns = returns[['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']]
if eoy:
returns['eoy'] = _utils.group_returns(
original_returns, original_returns.index.year).values
returns.columns = map(lambda x: str(x).upper(), returns.columns)
returns.index.name = None
return returns
| [
"[email protected]"
]
| |
b3dc5ea3d89b1e07a55ce83f932073cd9b52c537 | b6df7cda5c23cda304fcc0af1450ac3c27a224c1 | /data/codes/ericmartel_Perforce.py | f0b775db30e972ffd0563d511eca16f05a452ea5 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
]
| permissive | vieira-rafael/py-search | 88ee167fa1949414cc4f3c98d33f8ecec1ce756d | b8c6dccc58d72af35e4d4631f21178296f610b8a | refs/heads/master | 2021-01-21T04:59:36.220510 | 2016-06-20T01:45:34 | 2016-06-20T01:45:34 | 54,433,313 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 35,544 | py | # Written by Eric Martel ([email protected] / www.ericmartel.com)
# Direct port of the Sublime Text 2 version also available on my github, see README.md for more info.
import sublimeimport sublime_plugin
import osimport statimport subprocessimport tempfileimport threadingimport jsonimport systry: from Queue import Queue, Emptyexcept ImportError: from queue import Queue, Empty # python 3.x# Plugin Settings are located in 'perforce.sublime-settings' make a copy in the User folder to keep changes
# global variable used when calling p4 - it stores the path of the file in the current view, used to determine with P4CONFIG to use# whenever a view is selected, the variable gets updatedglobal_folder = ''
class PerforceP4CONFIGHandler(sublime_plugin.EventListener): def on_activated(self, view): if view.file_name(): global global_folder global_folder, filename = os.path.split(view.file_name())
# Executed at startup to store the path of the plugin... necessary to open files relative to the pluginperforceplugin_dir = os.getcwd()
# Utility functionsdef ConstructCommand(in_command): perforce_settings = sublime.load_settings('Perforce.sublime-settings') p4Env = perforce_settings.get('perforce_p4env') p4Path = perforce_settings.get('perforce_p4path') if ( p4Path == None or p4Path == '' ): p4Path = '' command = '' if(p4Env and p4Env != ''): command = '. {0} && {1}'.format(p4Env, p4Path) elif(sublime.platform() == "osx"): command = '. ~/.bash_profile && {0}'.format(p4Path) # Revert change until threading is fixed # command = getPerforceConfigFromPreferences(command) command += in_command return command
def getPerforceConfigFromPreferences(command): perforce_settings = sublime.load_settings('Perforce.sublime-settings')
# check to see if the sublime preferences include the given p4 config # if it does, then add it to the command in the form "var=value command" # so that they get inserted into the environment the command runs in def addP4Var(command, var): p4var = perforce_settings.get(var) if p4var: if sublime.platform() == "windows": return command + "SET {0}={1} && ".format(var, p4var) return "{0}{1}={2} ".format(command, var, p4var) return command command = addP4Var(command, "P4PORT") command = addP4Var(command, "P4CLIENT") command = addP4Var(command, "P4USER") command = addP4Var(command, "P4PASSWD") return command
def GetUserFromClientspec(): command = ConstructCommand('p4 info') p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
if(err): WarnUser("usererr {0}".format(err.strip())) return -1
# locate the line containing "User name: " and extract the following name startindex = result.find("User name: ") if(startindex == -1): WarnUser("Unexpected output from 'p4 info'.") return -1 startindex += 11 # advance after 'User name: '
endindex = result.find("\n", startindex) if(endindex == -1): WarnUser("Unexpected output from 'p4 info'.") return -1
return result[startindex:endindex].strip();
def GetClientRoot(in_dir): # check if the file is in the depot command = ConstructCommand('p4 info') p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
if(err): WarnUser(err.strip()) return -1 # locate the line containing "Client root: " and extract the following path startindex = result.find("Client root: ") if(startindex == -1): # sometimes the clientspec is not displayed sublime.error_message("Perforce Plugin: p4 info didn't supply a valid clientspec, launching p4 client"); command = ConstructCommand('p4 client') p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() return -1 startindex += 13 # advance after 'Client root: '
endindex = result.find("\n", startindex) if(endindex == -1): WarnUser("Unexpected output from 'p4 info'.") return -1
# convert all paths to "os.sep" slashes convertedclientroot = result[startindex:endindex].strip().replace('\\', os.sep).replace('/', os.sep)
return convertedclientroot
def IsFolderUnderClientRoot(in_folder): # check if the file is in the depot clientroot = GetClientRoot(in_folder) if(clientroot == -1): return 0
clientroot = clientroot.lower() if(clientroot == "null"): return 1;
# convert all paths to "os.sep" slashes convertedfolder = in_folder.lower().replace('\\', os.sep).replace('/', os.sep); clientrootindex = convertedfolder.find(clientroot);
if(clientrootindex == -1): return 0 return 1
def IsFileInDepot(in_folder, in_filename): isUnderClientRoot = IsFolderUnderClientRoot(in_folder); if(os.path.isfile(os.path.join(in_folder, in_filename))): # file exists on disk, not being added if(isUnderClientRoot): return 1 else: return 0 else: if(isUnderClientRoot): return -1 # will be in the depot, it's being added else: return 0
def GetPendingChangelists(): # Launch p4 changes to retrieve all the pending changelists currentuser = GetUserFromClientspec() if(currentuser == -1): return 0, "Unexpected output from 'p4 info'."
command = ConstructCommand('p4 changes -s pending -u {0}'.format(currentuser))
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8") if(not err): return 1, result return 0, result
def AppendToChangelistDescription(changelist, input): # First, create an empty changelist, we will then get the cl number and set the description command = ConstructCommand('p4 change -o {0}'.format(changelist)) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
if(err): return 0, err
# Find the description field and modify it lines = result.splitlines()
descriptionindex = -1 for index, line in enumerate(lines): if(line.strip() == "Description:"): descriptionindex = index break; filesindex = -1 for index, line in enumerate(lines): if(line.strip() == "Files:"): filesindex = index break;
if(filesindex == -1): # The changelist is empty endindex = index else: endindex = filesindex - 1
perforce_settings = sublime.load_settings('Perforce.sublime-settings') lines.insert(endindex , "\t{0}".format(input))
temp_changelist_description_file = open(os.path.join(tempfile.gettempdir(), "tempchangelist.txt"), 'w')
try: temp_changelist_description_file.write(perforce_settings.get('perforce_end_line_separator').join(lines)) finally: temp_changelist_description_file.close()
command = ConstructCommand('p4 change -i < {0}'.format(temp_changelist_description_file.name)) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
# Clean up os.unlink(temp_changelist_description_file.name)
if(err): return 0, err
return 1, result
def PerforceCommandOnFile(in_command, in_folder, in_filename): command = ConstructCommand('p4 {0} "{1}"'.format(in_command, in_filename)) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
if(not err): return 1, result.strip() else: return 0, err.strip()
def WarnUser(message): perforce_settings = sublime.load_settings('Perforce.sublime-settings') if(perforce_settings.get('perforce_warnings_enabled')): if(perforce_settings.get('perforce_log_warnings_to_status')): sublime.status_message("Perforce [warning]: {0}".format(message)) else: print("Perforce [warning]: {0}".format(message))
def LogResults(success, message): if(success >= 0): print("Perforce: {0}".format(message)) else: WarnUser(message);
def IsFileWritable(in_filename): if(not in_filename): return 0
# if it doesn't exist, it's "writable" if(not os.path.isfile(in_filename)): return 1
filestats = os.stat(in_filename)[0]; if(filestats & stat.S_IWRITE): return 1 return 0
# Checkout sectiondef Checkout(in_filename): if(IsFileWritable(in_filename)): return -1, "File is already writable."
folder_name, filename = os.path.split(in_filename) isInDepot = IsFileInDepot(folder_name, filename)
if(isInDepot != 1): return -1, "File is not under the client root." # check out the file return PerforceCommandOnFile("edit", folder_name, in_filename); class PerforceAutoCheckout(sublime_plugin.EventListener): def on_modified(self, view): if(not view.file_name()): return
if(IsFileWritable(view.file_name())): return
perforce_settings = sublime.load_settings('Perforce.sublime-settings')
# check if this part of the plugin is enabled if(not perforce_settings.get('perforce_auto_checkout') or not perforce_settings.get('perforce_auto_checkout_on_modified')): return if(view.is_dirty()): success, message = Checkout(view.file_name()) LogResults(success, message);
def on_pre_save(self, view): perforce_settings = sublime.load_settings('Perforce.sublime-settings')
# check if this part of the plugin is enabled if(not perforce_settings.get('perforce_auto_checkout') or not perforce_settings.get('perforce_auto_checkout_on_save')): return if(view.is_dirty()): success, message = Checkout(view.file_name()) LogResults(success, message);
class PerforceCheckoutCommand(sublime_plugin.TextCommand): def run(self, edit): if(self.view.file_name()): success, message = Checkout(self.view.file_name()) LogResults(success, message) else: WarnUser("View does not contain a file")
# Add sectiondef Add(in_folder, in_filename): # add the file return PerforceCommandOnFile("add", in_folder, in_filename);
class PerforceAutoAdd(sublime_plugin.EventListener): preSaveIsFileInDepot = 0 def on_pre_save(self, view): # file already exists, no need to add if view.file_name() and os.path.isfile(view.file_name()): return
global global_folder global_folder, filename = os.path.split(view.file_name())
perforce_settings = sublime.load_settings('Perforce.sublime-settings')
self.preSaveIsFileInDepot = 0
# check if this part of the plugin is enabled if(not perforce_settings.get('perforce_auto_add')): WarnUser("Auto Add disabled") return
folder_name, filename = os.path.split(view.file_name())
if(not IsFolderUnderClientRoot(folder_name)): WarnUser("Adding file outside of clientspec, ignored for auto add") return
self.preSaveIsFileInDepot = IsFileInDepot(folder_name, filename)
def on_post_save(self, view): if(self.preSaveIsFileInDepot == -1): folder_name, filename = os.path.split(view.file_name()) success, message = Add(folder_name, filename) LogResults(success, message)
class PerforceAddCommand(sublime_plugin.TextCommand): def run(self, edit): if(self.view.file_name()): folder_name, filename = os.path.split(self.view.file_name())
if(IsFileInDepot(folder_name, filename)): success, message = Add(folder_name, filename) else: success = 0 message = "File is not under the client root."
LogResults(success, message) else: WarnUser("View does not contain a file")
# Rename sectiondef Rename(in_filename, in_newname): command = ConstructCommand('p4 integrate -d -t -Di -f "{0}" "{1}"'.format(in_filename, in_newname)) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
if(err): return 0, err.strip() command = ConstructCommand('p4 delete "{0}" "{1}"'.format(in_filename, in_newname)) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
if(not err): return 1, result.strip() else: return 0, err.strip()
class PerforceRenameCommand(sublime_plugin.WindowCommand): def run(self): # Get the description self.window.show_input_panel('New File Name', self.window.active_view().file_name(), self.on_done, self.on_change, self.on_cancel)
def on_done(self, input): success, message = Rename(self.window.active_view().file_name(), input) if(success): self.window.run_command('close') self.window.open_file(input)
LogResults(success, message)
def on_change(self, input): pass
def on_cancel(self): pass
# Delete sectiondef Delete(in_folder, in_filename): success, message = PerforceCommandOnFile("delete", in_folder, in_filename) if(success): # test if the file is deleted if(os.path.isfile(os.path.join(in_folder, in_filename))): success = 0
return success, message
class PerforceDeleteCommand(sublime_plugin.WindowCommand): def run(self): if(self.window.active_view().file_name()): folder_name, filename = os.path.split(self.window.active_view().file_name())
if(IsFileInDepot(folder_name, filename)): success, message = Delete(folder_name, filename) if(success): # the file was properly deleted on perforce, ask Sublime Text to close the view self.window.run_command('close'); else: success = 0 message = "File is not under the client root."
LogResults(success, message) else: WarnUser("View does not contain a file")
# Revert sectiondef Revert(in_folder, in_filename): # revert the file return PerforceCommandOnFile("revert", in_folder, in_filename);
class PerforceRevertCommand(sublime_plugin.TextCommand): def run_(self, edit_token, args): # revert cannot be called when an Edit object exists, manually handle the run routine if(self.view.file_name()): folder_name, filename = os.path.split(self.view.file_name())
if(IsFileInDepot(folder_name, filename)): success, message = Revert(folder_name, filename) if(success): # the file was properly reverted, ask Sublime Text to refresh the view self.view.run_command('revert'); else: success = 0 message = "File is not under the client root."
LogResults(success, message) else: WarnUser("View does not contain a file")
# Diff sectiondef Diff(in_folder, in_filename): # diff the file return PerforceCommandOnFile("diff", in_folder, in_filename);
class PerforceDiffCommand(sublime_plugin.TextCommand): def run(self, edit): if(self.view.file_name()): folder_name, filename = os.path.split(self.view.file_name())
if(IsFileInDepot(folder_name, filename)): success, message = Diff(folder_name, filename) else: success = 0 message = "File is not under the client root."
LogResults(success, message) else: WarnUser("View does not contain a file") # Graphical Diff With Depot sectionclass GraphicalDiffThread(threading.Thread): def __init__(self, in_folder, in_filename, in_endlineseparator, in_command): self.folder = in_folder self.filename = in_filename self.endlineseparator = in_endlineseparator self.command = in_command threading.Thread.__init__(self)
def run(self): success, content = PerforceCommandOnFile("print", self.folder, self.filename) if(not success): return 0, content
# Create a temporary file to hold the depot version depotFileName = "depot"+self.filename tmp_file = open(os.path.join(tempfile.gettempdir(), depotFileName), 'w')
# Remove the first two lines of content linebyline = content.splitlines(); content=self.endlineseparator.join(linebyline[1:]);
try: tmp_file.write(content) finally: tmp_file.close()
# Launch P4Diff with both files and the same arguments P4Win passes it diffCommand = self.command diffCommand = diffCommand.replace('%depotfile_path', tmp_file.name) diffCommand = diffCommand.replace('%depotfile_name', depotFileName) diffCommand = diffCommand.replace('%file_path', os.path.join(self.folder, self.filename)) diffCommand = diffCommand.replace('%file_name', self.filename)
command = ConstructCommand(diffCommand) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
# Clean up os.unlink(tmp_file.name);
def GraphicalDiffWithDepot(self, in_folder, in_filename): perforce_settings = sublime.load_settings('Perforce.sublime-settings') diffcommand = perforce_settings.get('perforce_selectedgraphicaldiffapp_command') if not diffcommand: diffcommand = perforce_settings.get('perforce_default_graphical_diff_command') GraphicalDiffThread(in_folder, in_filename, perforce_settings.get('perforce_end_line_separator'), diffcommand).start()
return 1, "Launching thread for Graphical Diff"
class PerforceGraphicalDiffWithDepotCommand(sublime_plugin.TextCommand): def run(self, edit): if(self.view.file_name()): folder_name, filename = os.path.split(self.view.file_name())
if(IsFileInDepot(folder_name, filename)): success, message = GraphicalDiffWithDepot(self, folder_name, filename) else: success = 0 message = "File is not under the client root."
LogResults(success, message) else: WarnUser("View does not contain a file")
class PerforceSelectGraphicalDiffApplicationCommand(sublime_plugin.WindowCommand): def run(self): diffapps = [] if os.path.exists(perforceplugin_dir + os.sep + 'graphicaldiffapplications.json'): f = open(perforceplugin_dir + os.sep + 'graphicaldiffapplications.json') applications = json.load(f) f.close()
for entry in applications.get('applications'): formattedentry = [] formattedentry.append(entry.get('name')) formattedentry.append(entry.get('exename')) diffapps.append(formattedentry)
self.window.show_quick_panel(diffapps, self.on_done) def on_done(self, picked): if picked == -1: return f = open(perforceplugin_dir + os.sep + 'graphicaldiffapplications.json') applications = json.load(f) entry = applications.get('applications')[picked] f.close()
sublime.status_message(__name__ + ': Please make sure that {0} is reachable - you might need to restart Sublime Text 2.'.format(entry['exename']))
settings = sublime.load_settings('Perforce.sublime-settings') settings.set('perforce_selectedgraphicaldiffapp', entry['name']) settings.set('perforce_selectedgraphicaldiffapp_command', entry['diffcommand']) sublime.save_settings('Perforce.sublime-settings')
# List Checked Out Files sectionclass ListCheckedOutFilesThread(threading.Thread): def __init__(self, window): self.window = window threading.Thread.__init__(self)
def ConvertFileNameToFileOnDisk(self, in_filename): clientroot = GetClientRoot(os.path.dirname(in_filename)) if(clientroot == -1): return 0
if(clientroot == "null"): return in_filename;
filename = clientroot + os.sep + in_filename.replace('\\', os.sep).replace('/', os.sep)
return filename
def MakeFileListFromChangelist(self, in_changelistline): files_list = [] currentuser = GetUserFromClientspec() # Launch p4 opened to retrieve all files from changelist command = ConstructCommand('p4 opened -c {0} -u {1}'.format(in_changelistline[1], currentuser)) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8") if(not err): lines = result.splitlines() for line in lines: # remove the change # poundindex = line.rfind('#') cleanedfile = line[0:poundindex]
# just keep the filename cleanedfile = '/'.join(cleanedfile.split('/')[3:])
file_entry = [cleanedfile[cleanedfile.rfind('/')+1:]] file_entry.append("Changelist: {0}".format(in_changelistline[1])) file_entry.append(' '.join(in_changelistline[7:])); localfile = self.ConvertFileNameToFileOnDisk(cleanedfile) if(localfile != 0): file_entry.append(localfile) files_list.append(file_entry)
return files_list
def MakeCheckedOutFileList(self): files_list = self.MakeFileListFromChangelist(['','default','','','','','','Default Changelist']);
currentuser = GetUserFromClientspec() if(currentuser == -1): return files_list
# Launch p4 changes to retrieve all the pending changelists command = ConstructCommand('p4 changes -s pending -u {0}'.format(currentuser));
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
if(not err): changelists = result.splitlines()
# for each line, extract the change, and run p4 opened on it to list all the files for changelistline in changelists: changelistlinesplit = changelistline.split(' ') files_list.extend(self.MakeFileListFromChangelist(changelistlinesplit))
return files_list
def run(self): self.files_list = self.MakeCheckedOutFileList()
def show_quick_panel(): if not self.files_list: sublime.error_message(__name__ + ': There are no checked out files to list.') return self.window.show_quick_panel(self.files_list, self.on_done) sublime.set_timeout(show_quick_panel, 10)
def on_done(self, picked): if picked == -1: return file_name = self.files_list[picked][3]
def open_file(): self.window.open_file(file_name) sublime.set_timeout(open_file, 10)
class PerforceListCheckedOutFilesCommand(sublime_plugin.WindowCommand): def run(self): ListCheckedOutFilesThread(self.window).start()
# Create Changelist sectiondef CreateChangelist(description): # First, create an empty changelist, we will then get the cl number and set the description command = ConstructCommand('p4 change -o') p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
if(err): return 0, err
# Find the description field and modify it desclabel = 'Description:{0}'.format(os.linesep) descindex = result.find(desclabel) + len(desclabel) descend = result.find(os.linesep*2, descindex) result = '{0}\t{1}{2}'.format(result[0:descindex], description, result[descend:])
# Remove all files from the query, we want them to stay in Default filesindex = result.rfind("Files:") # The Files: section we want to get rid of is only present if there's files in the default changelist if(filesindex > 640): result = result[0:filesindex];
temp_changelist_description_file = open(os.path.join(tempfile.gettempdir(), "tempchangelist.txt"), 'w')
try: temp_changelist_description_file.write(result) finally: temp_changelist_description_file.close()
command = ConstructCommand('p4 change -i < {0}'.format(temp_changelist_description_file.name)) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
# Clean up os.unlink(temp_changelist_description_file.name)
if(err): return 0, err
return 1, result
class PerforceCreateChangelistCommand(sublime_plugin.WindowCommand): def run(self): # Get the description self.window.show_input_panel('Changelist Description', '', self.on_done, self.on_change, self.on_cancel)
def on_done(self, input): success, message = CreateChangelist(input) LogResults(success, message)
def on_change(self, input): pass
def on_cancel(self): pass
# Move Current File to Changelistdef MoveFileToChangelist(in_filename, in_changelist): folder_name, filename = os.path.split(in_filename)
command = ConstructCommand('p4 reopen -c {0} "{1}"'.format(in_changelist, filename)) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
if(err): return 0, err return 1, result
class ListChangelistsAndMoveFileThread(threading.Thread): def __init__(self, window): self.window = window self.view = window.active_view() threading.Thread.__init__(self)
def MakeChangelistsList(self): success, rawchangelists = GetPendingChangelists();
resultchangelists = ['New', 'Default'];
if(success): changelists = rawchangelists.splitlines()
# for each line, extract the change for changelistline in changelists: changelistlinesplit = changelistline.split(' ') # Insert at two because we receive the changelist in the opposite order and want to keep new and default on top resultchangelists.insert(2, "Changelist {0} - {1}".format(changelistlinesplit[1], ' '.join(changelistlinesplit[7:])))
return resultchangelists
def run(self): self.changelists_list = self.MakeChangelistsList() def show_quick_panel(): if not self.changelists_list: sublime.error_message(__name__ + ': There are no changelists to list.') return self.window.show_quick_panel(self.changelists_list, self.on_done)
sublime.set_timeout(show_quick_panel, 10)
def on_done(self, picked): if picked == -1: return changelistlist = self.changelists_list[picked].split(' ')
def move_file(): changelist = 'Default' if(len(changelistlist) > 1): # Numbered changelist changelist = changelistlist[1] else: changelist = changelistlist[0]
if(changelist == 'New'): # Special Case self.window.show_input_panel('Changelist Description', '', self.on_description_done, self.on_description_change, self.on_description_cancel) else: success, message = MoveFileToChangelist(self.view.file_name(), changelist.lower()) LogResults(success, message);
sublime.set_timeout(move_file, 10)
def on_description_done(self, input): success, message = CreateChangelist(input) if(success == 1): # Extract the changelist name from the message changelist = message.split(' ')[1] # Move the file success, message = MoveFileToChangelist(self.view.file_name(), changelist)
LogResults(success, message) def on_description_change(self, input): pass
def on_description_cancel(self): pass
class PerforceMoveCurrentFileToChangelistCommand(sublime_plugin.WindowCommand): def run(self): # first, test if the file is under the client root folder_name, filename = os.path.split(self.window.active_view().file_name()) isInDepot = IsFileInDepot(folder_name, filename)
if(isInDepot != 1): WarnUser("File is not under the client root.") return 0
ListChangelistsAndMoveFileThread(self.window).start()
# Add Line to Changelist Descriptionclass AddLineToChangelistDescriptionThread(threading.Thread): def __init__(self, window): self.window = window self.view = window.active_view() threading.Thread.__init__(self)
def MakeChangelistsList(self): success, rawchangelists = GetPendingChangelists();
resultchangelists = [];
if(success): changelists = rawchangelists.splitlines()
# for each line, extract the change, and run p4 opened on it to list all the files for changelistline in changelists: changelistlinesplit = changelistline.split(' ') # Insert at zero because we receive the changelist in the opposite order # Might be more efficient to sort... changelist_entry = ["Changelist {0}".format(changelistlinesplit[1])] changelist_entry.append(' '.join(changelistlinesplit[7:])); resultchangelists.insert(0, changelist_entry)
return resultchangelists
def run(self): self.changelists_list = self.MakeChangelistsList() def show_quick_panel(): if not self.changelists_list: sublime.error_message(__name__ + ': There are no changelists to list.') return self.window.show_quick_panel(self.changelists_list, self.on_done)
sublime.set_timeout(show_quick_panel, 10)
def on_done(self, picked): if picked == -1: return changelistlist = self.changelists_list[picked][0].split(' ')
def get_description_line(): self.changelist = changelistlist[1] self.window.show_input_panel('Changelist Description', '', self.on_description_done, self.on_description_change, self.on_description_cancel)
sublime.set_timeout(get_description_line, 10)
def on_description_done(self, input): success, message = AppendToChangelistDescription(self.changelist, input) LogResults(success, message) def on_description_change(self, input): pass
def on_description_cancel(self): pass
class PerforceAddLineToChangelistDescriptionCommand(sublime_plugin.WindowCommand): def run(self): AddLineToChangelistDescriptionThread(self.window).start()
# Submit sectionclass SubmitThread(threading.Thread): def __init__(self, window): self.window = window self.view = window.active_view() threading.Thread.__init__(self)
def MakeChangelistsList(self): success, rawchangelists = GetPendingChangelists();
resultchangelists = ['Default'];
currentuser = GetUserFromClientspec(); command = ConstructCommand('p4 opened -c default -u {0}'.format(currentuser)) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
if err: resultchangelists.pop()
if success: changelists = rawchangelists.splitlines()
# for each line, extract the change for changelistline in changelists: changelistlinesplit = changelistline.split(' ') # Insert at two because we receive the changelist in the opposite order and want to keep default on top resultchangelists.insert(1, "Changelist {0} - {1}".format(changelistlinesplit[1], ' '.join(changelistlinesplit[7:])))
return resultchangelists
def run(self): self.changelists_list = self.MakeChangelistsList() def show_quick_panel(): if not self.changelists_list: sublime.error_message(__name__ + ': There are no changelists to list.') return self.window.show_quick_panel(self.changelists_list, self.on_done)
sublime.set_timeout(show_quick_panel, 10)
def on_done(self, picked): if picked == -1: return changelist = self.changelists_list[picked] changelistsections = changelist.split(' ')
command = '' # Check in the selected changelist if changelistsections[0] != 'Default': command = ConstructCommand('p4 submit -c {0}'.format(changelistsections[1])) else: command = ConstructCommand('p4 submit') p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8") def on_description_change(self, input): pass
def on_description_cancel(self): pass
class PerforceSubmitCommand(sublime_plugin.WindowCommand): def run(self): SubmitThread(self.window).start()
class PerforceLogoutCommand(sublime_plugin.WindowCommand): def run(self): try: command = ConstructCommand("p4 set P4PASSWD=") p = subprocess.Popen(command, stdin=subprocess.PIPE,stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) p.communicate() except ValueError: pass
class PerforceLoginCommand(sublime_plugin.WindowCommand): def run(self): self.window.show_input_panel("Enter Perforce Password", "", self.on_done, None, None)
def on_done(self, password): try: command = ConstructCommand("p4 logout") p = subprocess.Popen(command, stdin=subprocess.PIPE,stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) p.communicate() #unset var command = ConstructCommand("p4 set P4PASSWD={0}".format(password)) p = subprocess.Popen(command, stdin=subprocess.PIPE,stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) p.communicate() except ValueError: pass
class PerforceUnshelveClCommand(sublime_plugin.WindowCommand): def run(self): try: ShelveClCommand(self.window, False).start() except: WarnUser("Unknown Error, does the included P4 Version support Shelve?") return -1class PerforceShelveClCommand(sublime_plugin.WindowCommand): def run(self): try: ShelveClCommand(self.window, True).start() except: WarnUser("Unknown Error, does the included P4 Version support Shelve?") return -1
class ShelveClCommand(threading.Thread): def __init__(self, window, shelve=True): self.shelve = shelve self.window = window threading.Thread.__init__(self)
def run(self): self.changelists_list = self.MakeChangelistsList() def show_quick_panel(): if not self.changelists_list: sublime.error_message(__name__ + ': There are no changelists to list.') return self.window.show_quick_panel(self.changelists_list, self.on_done)
sublime.set_timeout(show_quick_panel, 10)
def on_done(self, picked): if picked == -1: return changelistlist = self.changelists_list[picked].split(' ')
changelist = 'Default' if(len(changelistlist) > 1): # Numbered changelist changelist = changelistlist[1] else: changelist = changelistlist[0] if self.shelve: cmdString = "shelve -c{0}".format(changelist) else: cmdString = "unshelve -s{0} -f".format(changelist) command = ConstructCommand("p4 {0}".format(cmdString)) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=global_folder, shell=True) result, err = p.communicate() result = result.decode("utf-8") err = err.decode("utf-8")
if(err): WarnUser("usererr {0}".format(err.strip())) return -1
def MakeChangelistsList(self): success, rawchangelists = GetPendingChangelists();
resultchangelists = []
if(success): changelists = rawchangelists.splitlines()
# for each line, extract the change for changelistline in changelists: changelistlinesplit = changelistline.split(' ') resultchangelists.insert(0, "Changelist {0} - {1}".format(changelistlinesplit[1], ' '.join(changelistlinesplit[7:])))
return resultchangelists | [
"[email protected]"
]
| |
b741ff399ab76da7346243c4a6b8b998babe038f | b3f33d53507b09bc8193c5fc113fe2f28d95f6da | /empinfo/forms.py | 4e01d73915ed2eae414d2d03cf9e13da6356e549 | []
| no_license | Jayant2185/Django_Employee | f07beb3b3497a84c75ba43a623a7ebb7390b18b4 | ac3650670ddecd634b847bb39c3f0e9372b1bb4f | refs/heads/master | 2020-04-24T07:49:20.986569 | 2019-02-21T06:13:06 | 2019-02-21T06:13:06 | 171,810,493 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | from django import forms
from empinfo.models import Employee
class EmployeeForm(forms.ModelForm):
class Meta:
model = Employee
fields = "__all__" | [
"[email protected]"
]
| |
07f5f0febe13636216e15925edf3d44f1db27c2f | 2d80791a21a049243dd2bf7dd95a46c4d4b2510b | /domains/datastructures/linkedlist/CompareLists.py | c0436a2864f8f8fe985aca810532048acb762799 | []
| no_license | jayrav13/jr-hackerrank | 909346d101fdf08a54ff75ec2ee39c90e661b251 | b7e0130fdd1c2eb4436871be3255200eac8ca3d9 | refs/heads/master | 2021-01-15T15:36:16.772814 | 2016-11-21T18:59:15 | 2016-11-21T18:59:15 | 48,657,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | """
Compare two linked list
head could be None as well for empty list
Node is defined as
return back the head of the linked list in the below method.
"""
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next = next_node
def CompareLists(headA, headB):
count = 0
while headA != None or headB != None:
if (headA == None and headB != None) or (headA != None and headB == None):
return 0
if headA.data != headB.data:
count = count + 1
headA = headA.next
headB = headB.next
if count == 0:
return 1
else:
return 0
| [
"[email protected]"
]
| |
f875953eeb6bc655bf365406127b7e55f238a6e8 | d91fe0e972f2befab71987a732111b56245c5efc | /example_sm_pkg/nodes/subscriber_publisher_class_example.py | 9c626cf9b3902a827c6dde41ecd95bc0f3438280 | []
| no_license | karla3jo/robocup2014 | 2064e8102d5a3251ae582b7ed37ab80d0398f71c | 3d8563956fd1276b7e034402a9348dd5cb3dc165 | refs/heads/master | 2020-07-26T08:22:13.932741 | 2014-07-14T13:58:48 | 2014-07-14T13:58:48 | 21,850,936 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,093 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 22 12:00:00 2013
@author: sampfeiffer
"""
import roslib; roslib.load_manifest('example_sm_pkg')
import rospy
import sys
from std_msgs.msg import String
class myNode():
def __init__(self, argument_one):
# my class variables
self.my_variable_string = "I'm a string yo!"
self.subs = rospy.Subscriber('my_topic_to_subscribe', String, self.myCallback)
self.pub = rospy.Publisher('my_topic_to_publish', String, latch=True)
self.myMethod()
def myCallback(self, data):
rospy.loginfo("Received from topic data!")
self.myMethod()
def myMethod(self):
rospy.loginfo("Using the method!")
publish_this_thing = String("I'm the content of a string!")
self.pub.publish(publish_this_thing)
if __name__ == '__main__':
rospy.init_node('node_name')
if len(sys.argv) < 2:
print "Error, we need an arg!"
rospy.loginfo("No args given, closing...")
exit()
node = myNode("this is an argument")
rospy.spin() | [
"[email protected]"
]
| |
995e5340e3e9b0c8c5de25b5387d45937c15f28d | ac7e039a70ba627f6d9a7a02c9a8849ed5e18a89 | /unep.project-database/tags/0.5/reports/ProjectsByStaffReportFactory.py | 098ba274cf4516efa70e202f34be5109ec024408 | []
| no_license | jean/project-database | 65a2559844175350351ba87e820d25c3037b5fb2 | e818d322ec11d950f2770cd5324fbcd1acaa734d | refs/heads/master | 2021-01-01T06:27:24.528764 | 2014-01-31T11:11:45 | 2014-01-31T11:11:45 | 32,125,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | from Report import Report
class ProjectsByStaffReportFactory(object):
def __init__(self, context, **kw):
self.context = context
self.params = kw
def getReport(self):
# create and fill the report
name = "Projects by Staff Report"
report = Report(name)
report.setReportHeaders(( name,),)
report.setTableHeaders(((
'Staff Name',
'Staff position',
'Focal Area',
'Project Title',
'Project Cycle Stage',
'Last milestone action',
'Actual date',
'Project Grant ',
),))
# XXX Implement this
# report.setTableRows()
# report.setTableTotals([])
# report.setReportFooters()
return report
| [
"jurgen.blignaut@61ed036f-b72b-0410-9ea5-b9ec1d72d98d"
]
| jurgen.blignaut@61ed036f-b72b-0410-9ea5-b9ec1d72d98d |
ea9d3ee3230d73f421fb22d2f59af8f113c81b91 | c1bd12405d244c5924a4b069286cd9baf2c63895 | /azure-mgmt-network/azure/mgmt/network/v2015_06_15/models/load_balancing_rule_py3.py | 695a8e63e56403f3519346e6c5ce8aa055f7b53e | [
"MIT"
]
| permissive | lmazuel/azure-sdk-for-python | 972708ad5902778004680b142874582a284a8a7c | b40e0e36cc00a82b7f8ca2fa599b1928240c98b5 | refs/heads/master | 2022-08-16T02:32:14.070707 | 2018-03-29T17:16:15 | 2018-03-29T17:16:15 | 21,287,134 | 1 | 3 | MIT | 2019-10-25T15:56:00 | 2014-06-27T19:40:56 | Python | UTF-8 | Python | false | false | 5,391 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class LoadBalancingRule(SubResource):
"""A loag balancing rule for a load balancer.
All required parameters must be populated in order to send to Azure.
:param id: Resource Identifier.
:type id: str
:param frontend_ip_configuration: A reference to frontend IP addresses.
:type frontend_ip_configuration:
~azure.mgmt.network.v2015_06_15.models.SubResource
:param backend_address_pool: A reference to a pool of DIPs. Inbound
traffic is randomly load balanced across IPs in the backend IPs.
:type backend_address_pool:
~azure.mgmt.network.v2015_06_15.models.SubResource
:param probe: The reference of the load balancer probe used by the load
balancing rule.
:type probe: ~azure.mgmt.network.v2015_06_15.models.SubResource
:param protocol: Required. The transport protocol for the external
endpoint. Possible values are 'Udp' or 'Tcp'. Possible values include:
'Udp', 'Tcp'
:type protocol: str or
~azure.mgmt.network.v2015_06_15.models.TransportProtocol
:param load_distribution: The load distribution policy for this rule.
Possible values are 'Default', 'SourceIP', and 'SourceIPProtocol'.
Possible values include: 'Default', 'SourceIP', 'SourceIPProtocol'
:type load_distribution: str or
~azure.mgmt.network.v2015_06_15.models.LoadDistribution
:param frontend_port: Required. The port for the external endpoint. Port
numbers for each rule must be unique within the Load Balancer. Acceptable
values are between 1 and 65534.
:type frontend_port: int
:param backend_port: The port used for internal connections on the
endpoint. Acceptable values are between 1 and 65535.
:type backend_port: int
:param idle_timeout_in_minutes: The timeout for the TCP idle connection.
The value can be set between 4 and 30 minutes. The default value is 4
minutes. This element is only used when the protocol is set to TCP.
:type idle_timeout_in_minutes: int
:param enable_floating_ip: Configures a virtual machine's endpoint for the
floating IP capability required to configure a SQL AlwaysOn Availability
Group. This setting is required when using the SQL AlwaysOn Availability
Groups in SQL server. This setting can't be changed after you create the
endpoint.
:type enable_floating_ip: bool
:param provisioning_state: Gets the provisioning state of the PublicIP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'protocol': {'required': True},
'frontend_port': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'frontend_ip_configuration': {'key': 'properties.frontendIPConfiguration', 'type': 'SubResource'},
'backend_address_pool': {'key': 'properties.backendAddressPool', 'type': 'SubResource'},
'probe': {'key': 'properties.probe', 'type': 'SubResource'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'load_distribution': {'key': 'properties.loadDistribution', 'type': 'str'},
'frontend_port': {'key': 'properties.frontendPort', 'type': 'int'},
'backend_port': {'key': 'properties.backendPort', 'type': 'int'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'enable_floating_ip': {'key': 'properties.enableFloatingIP', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, protocol, frontend_port: int, id: str=None, frontend_ip_configuration=None, backend_address_pool=None, probe=None, load_distribution=None, backend_port: int=None, idle_timeout_in_minutes: int=None, enable_floating_ip: bool=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None:
super(LoadBalancingRule, self).__init__(id=id, **kwargs)
self.frontend_ip_configuration = frontend_ip_configuration
self.backend_address_pool = backend_address_pool
self.probe = probe
self.protocol = protocol
self.load_distribution = load_distribution
self.frontend_port = frontend_port
self.backend_port = backend_port
self.idle_timeout_in_minutes = idle_timeout_in_minutes
self.enable_floating_ip = enable_floating_ip
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
| [
"[email protected]"
]
| |
59f898c24b7c31d0cbe76ef107a8a875644260fd | e4c6acac07427baf82b44c17198dab5b78b44fa7 | /warn/warn.py | 2e374b944fce69f2ab0ee357d7bfd5128807795a | [
"MIT"
]
| permissive | scopatz/warn | bf1b33320031857233ee525fc56957a511eb2d37 | a528bca192856f3cbf81e5fb133b143cb247a789 | refs/heads/master | 2020-04-06T05:30:32.463854 | 2016-08-29T00:23:41 | 2016-08-29T00:23:41 | 67,258,506 | 0 | 0 | null | 2016-09-02T22:20:43 | 2016-09-02T22:20:43 | null | UTF-8 | Python | false | false | 8,594 | py | """ A module that replace the built-ins warning module wit a more flexible
interface.
"""
import warnings
import sys
import re
from warnings import (_is_internal_frame, _next_external_frame,
_filters_mutated, showwarning, defaultaction,
onceregistry)
wfmu = _filters_mutated
warnings._filters_version = 1
def _filters_mutated():
warnings._filters_version += 1
warnings._filters_mutated = _filters_mutated
def new_warn_explicit(message, category, filename, lineno,
module=None, registry=None, module_globals=None,
emit_module=None):
lineno = int(lineno)
if module is None:
module = filename or "<unknown>"
if module[-3:].lower() == ".py":
module = module[:-3] # XXX What about leading pathname?
if registry is None:
registry = {}
if registry.get('version', 0) != warnings._filters_version:
registry.clear()
registry['version'] = warnings._filters_version
if isinstance(message, Warning):
text = str(message)
category = message.__class__
else:
text = message
message = category(message)
key = (text, category, lineno)
# Quick test for common case
if registry.get(key):
return
# Search the filters
for item in warnings.filters:
item = _get_proxy_filter(item)
if len(item) == 5:
action, msg, cat, mod, ln = item
emod = None
else:
action, msg, cat, mod, ln, emod = item
if ((msg is None or msg.match(text)) and
issubclass(category, cat) and
(mod is None or mod.match(module)) and
(emod is None or emod.match(emit_module)) and
(ln == 0 or lineno == ln)):
break
else:
action = defaultaction
# Early exit actions
if action == "ignore":
registry[key] = 1
return
# Prime the linecache for formatting, in case the
# "file" is actually in a zipfile or something.
import linecache
linecache.getlines(filename, module_globals)
if action == "error":
raise message
# Other actions
if action == "once":
registry[key] = 1
oncekey = (text, category)
if onceregistry.get(oncekey):
return
onceregistry[oncekey] = 1
elif action == "always":
pass
elif action == "module":
registry[key] = 1
altkey = (text, category, 0)
if registry.get(altkey):
return
registry[altkey] = 1
elif action == "default":
registry[key] = 1
elif action == "custom":
pass
else:
# Unrecognized actions are errors
raise RuntimeError(
"Unrecognized action (%r) in warnings.filters:\n %s" %
(action, item))
if not callable(showwarning):
raise TypeError("warnings.showwarning() must be set to a "
"function or method")
# Print message and context
showwarning(message, category, filename, lineno)
def _get_stack_frame(stacklevel):
stacklevel = stacklevel + 1
if stacklevel <= 1 or _is_internal_frame(sys._getframe(1)):
# If frame is too small to care or if the warning originated in
# internal code, then do not try to hide any frames.
frame = sys._getframe(stacklevel)
else:
frame = sys._getframe(1)
# Look for one frame less since the above line starts us off.
for x in range(stacklevel-1):
frame = _next_external_frame(frame)
if frame is None:
raise ValueError
return frame
def new_warn(message, category=None, stacklevel=1, emitstacklevel=1):
"""Issue a warning, or maybe ignore it or raise an exception."""
# Check if message is already a Warning object
####################
### Get category ###
####################
if isinstance(message, Warning):
category = message.__class__
# Check category argument
if category is None:
category = UserWarning
if not (isinstance(category, type) and issubclass(category, Warning)):
raise TypeError("category must be a Warning subclass, "
"not '{:s}'".format(type(category).__name__))
# Get context information
try:
frame = _get_stack_frame(stacklevel)
except ValueError:
globals = sys.__dict__
lineno = 1
else:
globals = frame.f_globals
lineno = frame.f_lineno
try:
eframe = _get_stack_frame(emitstacklevel)
except ValueError:
eglobals = sys.__dict__
else:
eglobals = eframe.f_globals
if '__name__' in eglobals:
emodule = eglobals['__name__']
else:
emodule = "<string>"
####################
### Get Filename ###
####################
if '__name__' in globals:
module = globals['__name__']
else:
module = "<string>"
####################
### Get Filename ###
####################
filename = globals.get('__file__')
if filename:
fnl = filename.lower()
if fnl.endswith(".pyc"):
filename = filename[:-1]
else:
if module == "__main__":
try:
filename = sys.argv[0]
except AttributeError:
# embedded interpreters don't have sys.argv, see bug #839151
filename = '__main__'
if not filename:
filename = module
registry = globals.setdefault("__warningregistry__", {})
new_warn_explicit(message, category, filename, lineno, module, registry,
globals, emit_module=emodule)
_proxy_map = {}
re_matchall = re.compile('', re.I)
class ProxyWarning(Warning): pass # NOQA
def _set_proxy_filter(warningstuple):
"""set up a proxy that store too long warnings in a separate map"""
if len(warningstuple) > 5:
key = len(_proxy_map)+1
_proxy_map[key] = warningstuple
return ('custom', re_matchall, ProxyWarning, re_matchall, key)
else:
return warningstuple
def _get_proxy_filter(warningstuple):
"""set up a proxy that store too long warnings in a separate map"""
if warningstuple[2] == ProxyWarning:
return _proxy_map[warningstuple[4]]
else:
return warningstuple
def newfilterwarnings(action, message="", category=Warning, module="", lineno=0,
append=False, emodule=""):
"""Insert an entry into the list of warnings filters (at the front).
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'message' -- a regex that the warning message must match
'category' -- a class that the warning must be a subclass of
'module' -- a regex that the module name must match
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(message, str), "message must be a string"
assert isinstance(category, type), "category must be a class"
assert issubclass(category, Warning), "category must be a Warning subclass"
assert isinstance(module, str), "module must be a string"
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
if emodule:
item = (action, re.compile(message, re.I), category,
re.compile(module), lineno, re.compile(emodule, ))
else:
item = (action, re.compile(message, re.I), category,
re.compile(module), lineno)
if append:
warnings.filters.append(_set_proxy_filter(item))
else:
warnings.filters.insert(0, _set_proxy_filter(item))
warnings._filters_mutated()
class Patch:
def __init__(self):
self._enter = 0
def __call__(self):
if not self._enter:
self._warn_explit = warnings.warn_explicit
self._warn = warnings.warn
self._filterwarnings = warnings.filterwarnings
warnings.warn_explicit = new_warn_explicit
warnings.warn = new_warn
warnings.filterwarnings = newfilterwarnings
self._enter += 1
def __enter__(self):
return self.__call__()
def __exit__(self):
self._enter -= 1
if self._enter:
return
else:
pass
# restore original stat
patch = Patch()
| [
"[email protected]"
]
| |
32299d75b478e539707e32ef50bd264407775fda | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02796/s396415723.py | 183a9157222ec19c1853c4eb1252eb8dcabd5ca6 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | rs = [(x - l, x + l) for x, l in (map(int, input().split()) for _ in range(int(input())))]
rs.sort(key=lambda x: x[1])
last = - 10 ** 9
ans = 0
for l, r in rs:
if last <= l:
ans += 1
last = r
print(ans) | [
"[email protected]"
]
| |
298fb512ae1280030836435e20ea3f5487152026 | 1bab425414ff522916c70fbc57562ad7e7e2aa64 | /build_libtcod.py | 94b053869450719211c905bff082869f96a359a8 | [
"BSD-2-Clause-Views",
"Python-2.0"
]
| permissive | Rakaneth/python-tcod | c958797193a365c5ccd8b1cb9d1a143938e4ba05 | 70ff1895fd7ae87bf66f16e388211db389d983fd | refs/heads/master | 2020-04-13T20:47:56.853106 | 2018-12-28T18:42:20 | 2018-12-28T18:42:20 | 163,439,619 | 0 | 0 | NOASSERTION | 2018-12-28T18:42:21 | 2018-12-28T18:40:01 | Python | UTF-8 | Python | false | false | 11,464 | py | #!/usr/bin/env python3
import os
import sys
import glob
from cffi import FFI
from pycparser import c_parser, c_ast, parse_file, c_generator
import shutil
import subprocess
import platform
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
import zipfile
SDL2_VERSION = os.environ.get('SDL_VERSION', '2.0.9')
TDL_NO_SDL2_EXPORTS = os.environ.get('TDL_NO_SDL2_EXPORTS', '0') == '1'
CFFI_HEADER = 'tcod/cffi.h'
CFFI_EXTRA_CDEFS = 'tcod/cdef.h'
BITSIZE, LINKAGE = platform.architecture()
def walk_sources(directory):
for path, dirs, files in os.walk(directory):
for source in files:
if source.endswith('.c') or source.endswith('.cpp'):
yield os.path.join(path, source)
def find_sources(directory):
return [os.path.join(directory, source)
for source in os.listdir(directory)
if source.endswith('.c')]
def get_sdl2_file(version):
if sys.platform == 'win32':
sdl2_file = 'SDL2-devel-%s-VC.zip' % (version,)
else:
assert sys.platform == 'darwin'
sdl2_file = 'SDL2-%s.dmg' % (version,)
sdl2_local_file = os.path.join('dependencies', sdl2_file)
sdl2_remote_file = 'https://www.libsdl.org/release/%s' % sdl2_file
if not os.path.exists(sdl2_local_file):
print('Downloading %s' % sdl2_remote_file)
urlretrieve(sdl2_remote_file, sdl2_local_file)
return sdl2_local_file
def unpack_sdl2(version):
sdl2_path = 'dependencies/SDL2-%s' % (version,)
if sys.platform == 'darwin':
sdl2_dir = sdl2_path
sdl2_path += '/SDL2.framework'
if os.path.exists(sdl2_path):
return sdl2_path
sdl2_arc = get_sdl2_file(version)
print('Extracting %s' % sdl2_arc)
if sdl2_arc.endswith('.zip'):
with zipfile.ZipFile(sdl2_arc) as zf:
zf.extractall('dependencies/')
else:
assert sdl2_arc.endswith('.dmg')
subprocess.check_call(['hdiutil', 'mount', sdl2_arc])
subprocess.check_call(['mkdir', '-p', sdl2_dir])
subprocess.check_call(['cp', '-r', '/Volumes/SDL2/SDL2.framework',
sdl2_dir])
subprocess.check_call(['hdiutil', 'unmount', '/Volumes/SDL2'])
return sdl2_path
module_name = 'tcod._libtcod'
include_dirs = [
'.',
'libtcod/src/vendor/',
'libtcod/src/vendor/zlib/',
]
extra_parse_args = []
extra_compile_args = []
extra_link_args = []
sources = []
libraries = []
library_dirs = []
define_macros = []
sources += walk_sources('tcod/')
sources += walk_sources('tdl/')
sources += walk_sources('libtcod/src/libtcod')
sources += ['libtcod/src/vendor/glad.c']
sources += ['libtcod/src/vendor/lodepng.cpp']
sources += ['libtcod/src/vendor/stb.c']
sources += ['libtcod/src/vendor/utf8proc/utf8proc.c']
sources += glob.glob('libtcod/src/vendor/zlib/*.c')
if TDL_NO_SDL2_EXPORTS:
extra_parse_args.append('-DTDL_NO_SDL2_EXPORTS')
if sys.platform == 'win32':
libraries += ['User32', 'OpenGL32']
define_macros.append(('TCODLIB_API', ''))
define_macros.append(('_CRT_SECURE_NO_WARNINGS', None))
if 'linux' in sys.platform:
libraries += ['GL']
if sys.platform == 'darwin':
extra_link_args += ['-framework', 'OpenGL']
extra_link_args += ['-framework', 'SDL2']
else:
libraries += ['SDL2']
# included SDL headers are for whatever OS's don't easily come with them
if sys.platform in ['win32', 'darwin']:
SDL2_PATH = unpack_sdl2(SDL2_VERSION)
include_dirs.append('libtcod/src/zlib/')
if sys.platform == 'win32':
include_dirs.append(os.path.join(SDL2_PATH, 'include'))
ARCH_MAPPING = {'32bit': 'x86', '64bit': 'x64'}
SDL2_LIB_DIR = os.path.join(SDL2_PATH, 'lib/', ARCH_MAPPING[BITSIZE])
library_dirs.append(SDL2_LIB_DIR)
SDL2_LIB_DEST = os.path.join('tcod', ARCH_MAPPING[BITSIZE])
if not os.path.exists(SDL2_LIB_DEST):
os.mkdir(SDL2_LIB_DEST)
shutil.copy(os.path.join(SDL2_LIB_DIR, 'SDL2.dll'), SDL2_LIB_DEST)
def fix_header(filepath):
"""Removes leading whitespace from a MacOS header file.
This whitespace is causing issues with directives on some platforms.
"""
with open(filepath, 'r+') as f:
current = f.read()
fixed = '\n'.join(line.strip() for line in current.split('\n'))
if current == fixed:
return
f.seek(0)
f.truncate()
f.write(fixed)
if sys.platform == 'darwin':
HEADER_DIR = os.path.join(SDL2_PATH, 'Headers')
fix_header(os.path.join(HEADER_DIR, 'SDL_assert.h'))
fix_header(os.path.join(HEADER_DIR, 'SDL_config_macosx.h'))
include_dirs.append(HEADER_DIR)
extra_link_args += ['-F%s/..' % SDL2_PATH]
extra_link_args += ['-rpath', '%s/..' % SDL2_PATH]
extra_link_args += ['-rpath', '/usr/local/opt/llvm/lib/']
if sys.platform not in ['win32', 'darwin']:
extra_parse_args += subprocess.check_output(['sdl2-config', '--cflags'],
universal_newlines=True
).strip().split()
extra_compile_args += extra_parse_args
extra_link_args += subprocess.check_output(['sdl2-config', '--libs'],
universal_newlines=True
).strip().split()
class CustomPostParser(c_ast.NodeVisitor):
def __init__(self):
self.ast = None
self.typedefs = []
self.removeable_typedefs = []
self.funcdefs = []
def parse(self, ast):
self.ast = ast
self.visit(ast)
for node in self.funcdefs:
ast.ext.remove(node)
for node in self.removeable_typedefs:
ast.ext.remove(node)
return ast
def visit_Typedef(self, node):
if node.name in ['wchar_t', 'size_t']:
# remove fake typedef placeholders
self.removeable_typedefs.append(node)
else:
self.generic_visit(node)
if node.name in self.typedefs:
print('warning: %s redefined' % node.name)
self.removeable_typedefs.append(node)
self.typedefs.append(node.name)
def visit_EnumeratorList(self, node):
"""Replace enumerator expressions with '...' stubs."""
for type, enum in node.children():
if enum.value is None:
pass
elif isinstance(enum.value, (c_ast.BinaryOp, c_ast.UnaryOp)):
enum.value = c_ast.Constant('int', '...')
elif hasattr(enum.value, 'type'):
enum.value = c_ast.Constant(enum.value.type, '...')
def visit_ArrayDecl(self, node):
if not node.dim:
return
if isinstance(node.dim, (c_ast.BinaryOp, c_ast.UnaryOp)):
node.dim = c_ast.Constant('int', '...')
def visit_Decl(self, node):
if node.name is None:
self.generic_visit(node)
elif (node.name and 'vsprint' in node.name or
node.name in ['SDL_vsscanf',
'SDL_vsnprintf',
'SDL_LogMessageV']):
# exclude va_list related functions
self.ast.ext.remove(node)
elif node.name in ['screen']:
# exclude outdated 'extern SDL_Surface* screen;' line
self.ast.ext.remove(node)
else:
self.generic_visit(node)
def visit_FuncDef(self, node):
"""Exclude function definitions. Should be declarations only."""
self.funcdefs.append(node)
def get_cdef():
generator = c_generator.CGenerator()
return generator.visit(get_ast())
def get_ast():
global extra_parse_args
if 'win32' in sys.platform:
extra_parse_args += [r'-I%s/include' % SDL2_PATH]
if 'darwin' in sys.platform:
extra_parse_args += [r'-I%s/Headers' % SDL2_PATH]
ast = parse_file(filename=CFFI_HEADER, use_cpp=True,
cpp_args=[r'-Idependencies/fake_libc_include',
r'-DDECLSPEC=',
r'-DSDLCALL=',
r'-DTCODLIB_API=',
r'-DSDL_FORCE_INLINE=',
r'-U__GNUC__',
r'-D_SDL_thread_h',
r'-DDOXYGEN_SHOULD_IGNORE_THIS',
r'-DMAC_OS_X_VERSION_MIN_REQUIRED=1060',
r'-D__attribute__(x)=',
r'-D_PSTDINT_H_INCLUDED',
] + extra_parse_args)
ast = CustomPostParser().parse(ast)
return ast
# Can force the use of OpenMP with this variable.
try:
USE_OPENMP = eval(os.environ.get('USE_OPENMP', 'None').title())
except Exception:
USE_OPENMP = None
tdl_build = os.environ.get('TDL_BUILD', 'RELEASE').upper()
MSVC_CFLAGS = {
'DEBUG': ['/Od'],
'RELEASE': ['/GL', '/O2', '/GS-'],
}
MSVC_LDFLAGS = {
'DEBUG': [],
'RELEASE': ['/LTCG'],
}
GCC_CFLAGS = {
'DEBUG': ['-O0'],
'RELEASE': ['-flto', '-O3', '-fPIC'],
}
if sys.platform == 'win32' and '--compiler=mingw32' not in sys.argv:
extra_compile_args.extend(MSVC_CFLAGS[tdl_build])
extra_link_args.extend(MSVC_LDFLAGS[tdl_build])
if USE_OPENMP is None:
USE_OPENMP = sys.version_info[:2] >= (3, 5)
if USE_OPENMP:
extra_compile_args.append('/openmp')
else:
extra_compile_args.extend(GCC_CFLAGS[tdl_build])
extra_link_args.extend(GCC_CFLAGS[tdl_build])
if USE_OPENMP is None:
USE_OPENMP = sys.platform != 'darwin'
if USE_OPENMP:
extra_compile_args.append('-fopenmp')
extra_link_args.append('-fopenmp')
ffi = FFI()
ffi.cdef(get_cdef())
ffi.cdef(open(CFFI_EXTRA_CDEFS, 'r').read())
ffi.set_source(
module_name, '#include <tcod/cffi.h>',
include_dirs=include_dirs,
library_dirs=library_dirs,
sources=sources,
libraries=libraries,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
define_macros=define_macros,
)
CONSTANT_MODULE_HEADER = '''"""
Constants from the libtcod C API.
This module is auto-generated by `build_libtcod.py`.
"""
from __future__ import absolute_import
from tcod.color import Color
'''
def write_library_constants():
"""Write libtcod constants into the tcod.constants module."""
from tcod._libtcod import lib, ffi
import tcod.color
with open('tcod/constants.py', 'w') as f:
f.write(CONSTANT_MODULE_HEADER)
for name in dir(lib):
value = getattr(lib, name)
if name[:5] == 'TCOD_':
if name.isupper(): # const names
f.write('%s = %r\n' % (name[5:], value))
elif name.startswith('FOV'): # fov const names
f.write('%s = %r\n' % (name, value))
elif name[:6] == 'TCODK_': # key name
f.write('KEY_%s = %r\n' % (name[6:], value))
f.write('\n# --- colors ---\n')
for name in dir(lib):
if name[:5] != 'TCOD_':
continue
value = getattr(lib, name)
if not isinstance(value, ffi.CData):
continue
if ffi.typeof(value) != ffi.typeof('TCOD_color_t'):
continue
color = tcod.color.Color._new_from_cdata(value)
f.write('%s = %r\n' % (name[5:], color))
if __name__ == "__main__":
write_library_constants()
| [
"[email protected]"
]
| |
d7a7dd541bc436358194e21d56e22cccd5a27ae9 | 5178f5aa20a857f8744fb959e8b246079c800c65 | /02_oop/tr/src/23_list/list_tr2.py | 1c3806bcda6b62f6a939b84a1cd64d558d120ece | []
| no_license | murayama333/python2020 | 4c3f35a0d78426c96f0fbaed335f9a63227205da | 8afe367b8b42fcf9489fff1da1866e88f3af3b33 | refs/heads/master | 2021-05-19T04:03:46.295906 | 2021-03-09T22:23:58 | 2021-03-09T22:23:58 | 251,520,131 | 0 | 3 | null | 2020-10-26T01:20:09 | 2020-03-31T06:35:18 | Python | UTF-8 | Python | false | false | 99 | py | my_list = ["a", "b", "c", "d", "e"]
my_list.reverse()
print("".join([e.upper() for e in my_list]))
| [
"[email protected]"
]
| |
122b76e57de2082a15a22ffe30f332ef29d31dd6 | 8245ecc361319340b5b196b76dc8cf1d5075c3b1 | /reservations/views.py | 6872d1de2dbff040b8a1412b6e1b63bdd5a01625 | []
| no_license | Korimse/airbnb_clone | bc267e384fc098f179387ba3153614c71f999edc | c20a82cb196ad9ad6b697cf874bca34b5461c87e | refs/heads/master | 2023-06-30T11:17:53.412006 | 2021-08-03T16:30:47 | 2021-08-03T16:30:47 | 391,269,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,313 | py | import datetime
from django.views.generic import View
from django.contrib import messages
from django.shortcuts import render, redirect, reverse
from django.http import Http404
from rooms import models as room_models
from reviews import forms as review_forms
from . import models
class CreateError(Exception):
pass
def create(request, room, year, month, day):
try:
date_obj = datetime.datetime(year, month, day)
room = room_models.Room.objects.get(pk=room)
models.BookedDay.objects.get(day=date_obj, reservation__room=room)
raise CreateError()
except (room_models.Room.DoesNotExist, CreateError):
messages.error(request, "Can't Reserve That Room")
return redirect(reverse("core:home"))
except models.BookedDay.DoesNotExist:
reservation = models.Reservation.objects.create(
guest=request.user,
room=room,
check_in=date_obj,
check_out=date_obj + datetime.timedelta(days=1),
)
return redirect(reverse("reservations:detail", kwargs={"pk": reservation.pk}))
class ReservationDetailView(View):
def get(self, *args, **kwargs):
pk = kwargs.get("pk")
reservation = models.Reservation.objects.get_or_none(pk=pk)
if not reservation or (
reservation.guest != self.request.user
and reservation.room.host != self.request.user
):
raise Http404()
form = review_forms.CreateReviewForm()
return render(
self.request,
"reservations/detail.html",
{"reservation": reservation, "form": form},
)
def edit_reservation(request, pk, verb):
reservation = models.Reservation.objects.get_or_none(pk=pk)
if not reservation or (
reservation.guest != request.user and reservation.room.host != request.user
):
raise Http404()
if verb == "confirm":
reservation.status = models.Reservation.STATUS_CONFIRMED
elif verb == "cancel":
reservation.status = models.Reservation.STATUS_CANCELED
models.BookedDay.objects.filter(reservation=reservation).delete()
reservation.save()
messages.success(request, "Reservation Updated")
return redirect(reverse("reservations:detail", kwargs={"pk": reservation.pk}))
| [
"[email protected]"
]
| |
66af24b8e79c42a8dc7aa3ebdc1ace6b22534927 | f9e8733ed87858b12bfee6b70ccdddd6a616b60a | /73.py | 1b81f914bf328740233161f9aaa72772c4032d9f | []
| no_license | MajestyLee/leetcode_TopInterview | c1c9c923d3bf42cd4777bb2a2ccd21654a7c6dbb | 30b7d5acec716b7d754141835fc8bafe4411437e | refs/heads/master | 2020-04-01T12:19:20.837383 | 2018-11-06T02:13:44 | 2018-11-06T02:13:44 | 153,200,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,201 | py | '''
Given a m x n matrix, if an element is 0, set its entire row and column to 0. Do it in-place.
Example 1:
Input:
[
[1,1,1],
[1,0,1],
[1,1,1]
]
Output:
[
[1,0,1],
[0,0,0],
[1,0,1]
]
Example 2:
Input:
[
[0,1,2,0],
[3,4,5,2],
[1,3,1,5]
]
Output:
[
[0,0,0,0],
[0,4,5,0],
[0,3,1,0]
]
Follow up:
A straight forward solution using O(mn) space is probably a bad idea.
A simple improvement uses O(m + n) space, but still not the best solution.
Could you devise a constant space solution?
'''
class Solution(object):
def setZeroes(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
row = set()
col = set()
for i in range(0,len(matrix)):
for j in range(0,len(matrix[0])):
if matrix[i][j] == 0:
row.add(i)
col.add(j)
row = list(row)
col = list(col)
for i in range(0,len(row)):
matrix[row[i]] = [0 for _ in range(0,len(matrix[0]))]
for j in range(0,len(col)):
for jj in range(len(matrix)):
matrix[jj][col[j]] = 0 | [
"[email protected]"
]
| |
b91382be96010e2e1aefacdcb707ef46b39f8400 | 3ca6302ebdc0e47d5d462435ad24a2886cfa5063 | /64.py | 5ce1111db6333ea60c31349788bb7a2df4797496 | []
| no_license | Sem31/PythonBasics | 3859276820d484025d6c3d8f9efaf131b8626da8 | d9bfd520b67056a3cbb747f7a4b71fe55871c082 | refs/heads/master | 2020-04-24T19:09:48.608293 | 2019-02-23T10:56:26 | 2019-02-23T10:56:26 | 172,203,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | #create csv file by this programs
import csv
with open("example1.csv",'w')as obj:
field = ["name","salary"]
writer = csv.DictWriter(obj, fieldnames=field)
writer.writeheader()
writer.writerow({'name':'bob','salary':10000})
writer.writerow({'name':'sem','salary':40000})
writer.writerow({'name':'kamlesh','salary':30000})
writer.writerow({'name':'vishal','salary':50000}) | [
"[email protected]"
]
| |
9fea6a7e73c6e6d9c9615de59079f3c61274895b | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/tourn.py | b96981b870a4794e10ed270201c3a0c1ab925f9b | []
| no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 26 | py | ii = [('MereHHB3.py', 13)] | [
"[email protected]"
]
| |
43171e67ff9e36899ce8b565c03eaac899555a02 | b7f3edb5b7c62174bed808079c3b21fb9ea51d52 | /components/policy/tools/PRESUBMIT.py | 8d6bc1a9cc400f3e06219f8a9d4ecd123cddc991 | [
"BSD-3-Clause"
]
| permissive | otcshare/chromium-src | 26a7372773b53b236784c51677c566dc0ad839e4 | 64bee65c921db7e78e25d08f1e98da2668b57be5 | refs/heads/webml | 2023-03-21T03:20:15.377034 | 2020-11-16T01:40:14 | 2020-11-16T01:40:14 | 209,262,645 | 18 | 21 | BSD-3-Clause | 2023-03-23T06:20:07 | 2019-09-18T08:52:07 | null | UTF-8 | Python | false | false | 847 | py | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
def RunOtherPresubmit(function_name, input_api, output_api):
# Apply the PRESUBMIT for components/policy/resources to run the syntax check.
presubmit_path = (
input_api.change.RepositoryRoot() + \
'/components/policy/resources/PRESUBMIT.py')
presubmit_content = input_api.ReadFile(presubmit_path)
global_vars = {}
exec (presubmit_content, global_vars)
return global_vars[function_name](input_api, output_api)
def CheckChangeOnUpload(input_api, output_api):
return RunOtherPresubmit("CheckChangeOnUpload", input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return RunOtherPresubmit("CheckChangeOnCommit", input_api, output_api)
| [
"[email protected]"
]
| |
a6a6984813486278c4dc89f5e5201d922504d0eb | fcaa66bb55cb96342fc673e88363337fac95a184 | /MovieApp/migrations/0004_auto_20210610_1948.py | c749cbeeb8d6a5a28e0b123163bcbbfbf191e942 | []
| no_license | rushabhgediya38/MovieTicketBooking | 3f0ab4fbea6011c47968ae0d50a42d8bacf4ffdb | beeb59d671d96418c0959ed072f4ffcf517a1b0c | refs/heads/main | 2023-05-14T05:55:09.176174 | 2021-06-13T15:02:42 | 2021-06-13T15:02:42 | 375,613,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | # Generated by Django 3.2.4 on 2021-06-10 14:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('MovieApp', '0003_images'),
]
operations = [
migrations.CreateModel(
name='M_lang',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language', models.CharField(max_length=256)),
],
),
migrations.AddField(
model_name='movie',
name='M_lang',
field=models.ManyToManyField(to='MovieApp.M_lang'),
),
]
| [
"[email protected]"
]
| |
c7e85775629e15d592aad6dc1b386daa9b3152fc | 524591f2c4f760bc01c12fea3061833847a4ff9a | /arm/opt/ros/kinetic/lib/python2.7/dist-packages/sensor_msgs/msg/_PointCloud2.py | 63495f1677ff10df73ab8c86a47cfff83c7bb456 | [
"BSD-3-Clause"
]
| permissive | Roboy/roboy_plexus | 6f78d45c52055d97159fd4d0ca8e0f32f1fbd07e | 1f3039edd24c059459563cb81d194326fe824905 | refs/heads/roboy3 | 2023-03-10T15:01:34.703853 | 2021-08-16T13:42:54 | 2021-08-16T13:42:54 | 101,666,005 | 2 | 4 | BSD-3-Clause | 2022-10-22T13:43:45 | 2017-08-28T16:53:52 | C++ | UTF-8 | Python | false | false | 13,149 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from sensor_msgs/PointCloud2.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import sensor_msgs.msg
import std_msgs.msg
class PointCloud2(genpy.Message):
_md5sum = "1158d486dd51d683ce2f1be655c3c181"
_type = "sensor_msgs/PointCloud2"
_has_header = True #flag to mark the presence of a Header object
_full_text = """# This message holds a collection of N-dimensional points, which may
# contain additional information such as normals, intensity, etc. The
# point data is stored as a binary blob, its layout described by the
# contents of the "fields" array.
# The point cloud data may be organized 2d (image-like) or 1d
# (unordered). Point clouds organized as 2d images may be produced by
# camera depth sensors such as stereo or time-of-flight.
# Time of sensor data acquisition, and the coordinate frame ID (for 3d
# points).
Header header
# 2D structure of the point cloud. If the cloud is unordered, height is
# 1 and width is the length of the point cloud.
uint32 height
uint32 width
# Describes the channels and their layout in the binary data blob.
PointField[] fields
bool is_bigendian # Is this data bigendian?
uint32 point_step # Length of a point in bytes
uint32 row_step # Length of a row in bytes
uint8[] data # Actual point data, size is (row_step*height)
bool is_dense # True if there are no invalid points
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: sensor_msgs/PointField
# This message holds the description of one point entry in the
# PointCloud2 message format.
uint8 INT8 = 1
uint8 UINT8 = 2
uint8 INT16 = 3
uint8 UINT16 = 4
uint8 INT32 = 5
uint8 UINT32 = 6
uint8 FLOAT32 = 7
uint8 FLOAT64 = 8
string name # Name of field
uint32 offset # Offset from start of point struct
uint8 datatype # Datatype enumeration, see above
uint32 count # How many elements in the field
"""
__slots__ = ['header','height','width','fields','is_bigendian','point_step','row_step','data','is_dense']
_slot_types = ['std_msgs/Header','uint32','uint32','sensor_msgs/PointField[]','bool','uint32','uint32','uint8[]','bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,height,width,fields,is_bigendian,point_step,row_step,data,is_dense
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(PointCloud2, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.height is None:
self.height = 0
if self.width is None:
self.width = 0
if self.fields is None:
self.fields = []
if self.is_bigendian is None:
self.is_bigendian = False
if self.point_step is None:
self.point_step = 0
if self.row_step is None:
self.row_step = 0
if self.data is None:
self.data = b''
if self.is_dense is None:
self.is_dense = False
else:
self.header = std_msgs.msg.Header()
self.height = 0
self.width = 0
self.fields = []
self.is_bigendian = False
self.point_step = 0
self.row_step = 0
self.data = b''
self.is_dense = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.height, _x.width))
length = len(self.fields)
buff.write(_struct_I.pack(length))
for val1 in self.fields:
_x = val1.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_get_struct_IBI().pack(_x.offset, _x.datatype, _x.count))
_x = self
buff.write(_get_struct_B2I().pack(_x.is_bigendian, _x.point_step, _x.row_step))
_x = self.data
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.is_dense))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.fields is None:
self.fields = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.fields = []
for i in range(0, length):
val1 = sensor_msgs.msg.PointField()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.name = str[start:end].decode('utf-8')
else:
val1.name = str[start:end]
_x = val1
start = end
end += 9
(_x.offset, _x.datatype, _x.count,) = _get_struct_IBI().unpack(str[start:end])
self.fields.append(val1)
_x = self
start = end
end += 9
(_x.is_bigendian, _x.point_step, _x.row_step,) = _get_struct_B2I().unpack(str[start:end])
self.is_bigendian = bool(self.is_bigendian)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.data = str[start:end]
start = end
end += 1
(self.is_dense,) = _get_struct_B().unpack(str[start:end])
self.is_dense = bool(self.is_dense)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.height, _x.width))
length = len(self.fields)
buff.write(_struct_I.pack(length))
for val1 in self.fields:
_x = val1.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_get_struct_IBI().pack(_x.offset, _x.datatype, _x.count))
_x = self
buff.write(_get_struct_B2I().pack(_x.is_bigendian, _x.point_step, _x.row_step))
_x = self.data
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.is_dense))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.fields is None:
self.fields = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.fields = []
for i in range(0, length):
val1 = sensor_msgs.msg.PointField()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.name = str[start:end].decode('utf-8')
else:
val1.name = str[start:end]
_x = val1
start = end
end += 9
(_x.offset, _x.datatype, _x.count,) = _get_struct_IBI().unpack(str[start:end])
self.fields.append(val1)
_x = self
start = end
end += 9
(_x.is_bigendian, _x.point_step, _x.row_step,) = _get_struct_B2I().unpack(str[start:end])
self.is_bigendian = bool(self.is_bigendian)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.data = str[start:end]
start = end
end += 1
(self.is_dense,) = _get_struct_B().unpack(str[start:end])
self.is_dense = bool(self.is_dense)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_IBI = None
def _get_struct_IBI():
global _struct_IBI
if _struct_IBI is None:
_struct_IBI = struct.Struct("<IBI")
return _struct_IBI
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
_struct_B2I = None
def _get_struct_B2I():
global _struct_B2I
if _struct_B2I is None:
_struct_B2I = struct.Struct("<B2I")
return _struct_B2I
| [
"[email protected]"
]
| |
baf02ed9910963e5ed29164ba414f88415d59e00 | ae5bdb32f5ae61f422e537222601e0fe4f86739c | /py2app_tests/argv_app/setup.py | 432f60efe00ec2498ecebe46d1699b3bb23c06bb | [
"MIT",
"Python-2.0"
]
| permissive | acclivity/py2app | beeefa84eaeaa40edfcbed25d4edb500ddd60a61 | a3dafb2c559dc9be78ebe1c44887820f9451806c | refs/heads/master | 2021-03-26T09:11:01.176301 | 2020-03-16T22:25:26 | 2020-03-16T22:25:26 | 247,691,716 | 0 | 0 | NOASSERTION | 2020-03-16T12:04:10 | 2020-03-16T12:04:09 | null | UTF-8 | Python | false | false | 148 | py | from setuptools import setup
setup(
name='BasicApp',
app=['main.py'],
options=dict(py2app=dict(
argv_emulation=True,
)),
)
| [
"[email protected]"
]
| |
d4dd8d3af20c272a50ffd0226634bd7465a6f2ee | 6ff12f51b9a1b9f751cec3df21813803d2455f1e | /tools/link_graph_generator.py | cf503594caed97072da3912f1fad3b5706416592 | []
| no_license | prdx/PoliteScrapper | 5d40089bb399c3d08fb848355b73cdc530c8327c | e84a49fa197e484361d2e69421b32fd4240c884c | refs/heads/master | 2020-03-23T18:36:21.340544 | 2018-08-01T21:48:14 | 2018-08-01T21:48:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,554 | py | from bs4 import BeautifulSoup
import os
import pickle
files = [f for f in os.listdir('.') if os.path.isfile(f) and f.endswith(".xml")]
outlinks = {}
inlinks = {}
def generate_outlink_file():
print("Generating outlinks file ...")
# Generate outlinks
for xml in files:
try:
with open(xml, "rb") as f:
soup = BeautifulSoup(f, "lxml")
url = soup.id.value.text
outlinks[url] = soup.outlinks.value.text.split(",")
except Exception as e:
print("Error processing: " + xml)
print(e)
os.rename(xml, xml + ".fail")
# Dump the outlinks
with open("../output/outlinks.p", "wb") as out:
pickle.dump(outlinks, out, protocol=pickle.HIGHEST_PROTOCOL)
print("Done generating outlinks file ...")
print("Outlinks size: " + str(len(outlinks)) + " urls")
def generate_inlink_file():
print("Generating inlinks file ...")
# Generate inlinks
for key in outlinks:
for url in outlinks[key]:
try:
inlinks[url].append(key)
except KeyError:
inlinks[url] = [key]
except Exception as e:
print("Error processing: " + key)
print(e)
# Dump the inlinks
with open("../output/inlinks.p", "wb") as out:
pickle.dump(inlinks, out, protocol=pickle.HIGHEST_PROTOCOL)
print("Inlinks size: " + str(len(inlinks)) + " urls")
print("Done inlinks file ...")
generate_outlink_file()
generate_inlink_file()
| [
"[email protected]"
]
| |
1b00196825631a6f44decdbf3560208ff455bf28 | d354d2da2a6bd47aa0f545a0bf351e982882ea4c | /setup.py | 03aded87728f2e3159fcc416da43efee5d4887cd | [
"MIT"
]
| permissive | acodebreaker/pywsd | 27dffb27a0961dbe5d09e71cc4f18e3dba10bfdf | ec8dd4bead6108e04250591d1732afcc9b0fb1bb | refs/heads/master | 2021-01-18T01:40:48.909216 | 2014-11-24T07:25:17 | 2014-11-24T07:25:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | #!/usr/bin/env python -*- coding: utf-8 -*-
#
# Python Word Sense Disambiguation (pyWSD)
#
# Copyright (C) 2014 alvations
# URL:
# For license information, see LICENSE.md
from distutils.core import setup
setup(
name='pywsd',
version='0.1',
packages=['pywsd',],
long_description='Python Implementations of Word Sense Disambiguation (WSD) technologies',
) | [
"[email protected]"
]
| |
44b0f8140acf2ee96f151344629234de62c648f2 | 3e5150447a2c90c26354500f1df9660ef35c990b | /classes/str/.rstrip() | 2409992c6e0c6d0f2ce555b6bc34fe5f619190e3 | []
| no_license | kilirobbs/python-fiddle | 8d6417ebff9d6530e713b6724f8416da86c24c65 | 9c2f320bd2391433288cd4971c2993f1dd5ff464 | refs/heads/master | 2016-09-11T03:56:39.808358 | 2013-03-19T19:26:19 | 2013-03-19T19:26:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | #!/usr/bin/env python
print "1 ".rstrip()
print "1\n".rstrip() | [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.