blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cc1d8630c5d911d03efe32fa35c8639a4a387cea
|
31648f7ba9eab4841eae211b36a5ea025570ba78
|
/exam_16_08_2020/project/software/express_software.py
|
23896589c3ea9c3f097ae03b4565a7edec1c9828
|
[
"MIT"
] |
permissive
|
ivan-yosifov88/python_oop_june_2021
|
d7c4d3ba93f3085f019a4409c33b8ae9739de372
|
7ae6126065abbcce7ce97c86d1150ae307360249
|
refs/heads/main
| 2023-07-04T21:12:07.592730 | 2021-08-18T15:12:50 | 2021-08-18T15:12:50 | 385,363,143 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 440 |
py
|
from project.software.software import Software
class ExpressSoftware(Software):
type = "Express"
coefficient_changing_capacity = 1
coefficient_changing_memory = 2
def __init__(self, name, capacity_consumption, memory_consumption):
super().__init__(name, self.type, int(capacity_consumption * self.coefficient_changing_capacity),
int(memory_consumption * self.coefficient_changing_memory))
|
[
"ivan.yosifov88gmail.com"
] |
ivan.yosifov88gmail.com
|
9fb28b5884cad2119f5b0d25c46f453739e1002b
|
5b93930ce8280b3cbc7d6b955df0bfc5504ee99c
|
/nodes/Bisong19Building/G_PartVI/H_Chapter37/B_StackedAutoencoders/index.py
|
5ea3ea09bf83cb054a2ed818f98e32b291b162f3
|
[] |
no_license
|
nimra/module_gen
|
8749c8d29beb700cac57132232861eba4eb82331
|
2e0a4452548af4fefd4cb30ab9d08d7662122cf4
|
refs/heads/master
| 2022-03-04T09:35:12.443651 | 2019-10-26T04:40:49 | 2019-10-26T04:40:49 | 213,980,247 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,914 |
py
|
# Lawrence McAfee
# ~~~~~~~~ import ~~~~~~~~
from modules.node.HierNode import HierNode
from modules.node.LeafNode import LeafNode
from modules.node.Stage import Stage
from modules.node.block.CodeBlock import CodeBlock as cbk
from modules.node.block.ImageBlock import ImageBlock as ibk
from modules.node.block.MarkdownBlock import MarkdownBlock as mbk
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Chapter 37 Autoencoders
#
#
#
#
# Figure 37-2. Stacked or deep autoencoder. The hidden layers are added
# symmetrically at both the Encoder and Decoder
#
# Stacked Autoencoders with TensorFlow 2.0
# The code example in this section shows how to implement an autoencoder network
# using TensorFlow 2.0. For simplicity, the MNIST handwriting dataset is used to create
# reconstructions of the original images. In this example, a stacked autoencoder is
# implemented with the original and reconstructed image shown in Figure 37-3. The code
# listing is presented in the following, and corresponding notes on the code are shown
# thereafter.
#
# # import TensorFlow 2.0 with GPU
# !pip install -q tf-nightly-gpu-2.0-preview
#
# # import packages
# import tensorflow as tf
#
# 477
#
# Chapter 37 Autoencoders
#
# import numpy as np
# import matplotlib.pyplot as plt
#
# # import dataset
# (x_train, _), (x_test, _) = tf.keras.datasets.mnist.load_data()
#
# # change datatype to float
# x_train = x_train.astype('float32')
# x_test = x_test.astype('float32')
#
# # scale the dataset from 0 -> 255 to 0 -> 1
# x_train /= 255
# x_test /= 255
#
# # flatten the 28x28 images into vectors of size 784
# x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
# x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
#
# # create the autoencoder model
# def model_fn():
# model_input = tf.keras.layers.Input(shape=(784,))
# encoded = tf.keras.layers.Dense(units=512, activation='relu')(model_input)
# encoded = tf.keras.layers.Dense(units=128, activation='relu')(encoded)
# encoded = tf.keras.layers.Dense(units=64, activation='relu')(encoded)
# coding_layer = tf.keras.layers.Dense(units=32)(encoded)
# decoded = tf.keras.layers.Dense(units=64, activation='relu')(coding_layer)
# decoded = tf.keras.layers.Dense(units=128, activation='relu')(decoded)
# decoded = tf.keras.layers.Dense(units=512, activation='relu')(decoded)
# decoded_output = tf.keras.layers.Dense(units=784)(decoded)
#
# # the autoencoder model
# autoencoder_model = tf.keras.Model(inputs=model_input, outputs=decoded_output)
#
# # compile the model
# autoencoder_model.compile(optimizer='adam',
# loss='binary_crossentropy',
# metrics=['accuracy'])
#
# return autoencoder_model
#
#
# 478
#
# Chapter 37 Autoencoders
#
# # build the model
# autoencoder_model = model_fn()
#
# # print autoencoder model summary
# autoencoder_model.summary()
#
# # train the model
# autoencoder_model.fit(x_train, x_train, epochs=1000, batch_size=256,
# shuffle=True, validation_data=(x_test, x_test))
#
# # visualize reconstruction
# sample_size = 6
# test_image = x_test[:sample_size]
# # reconstruct test samples
# test_reconstruction = autoencoder_model.predict(test_image)
#
# plt.figure(figsize = (8,25))
# plt.suptitle('Stacked Autoencoder Reconstruction', fontsize=16)
# for i in range(sample_size):
# plt.subplot(sample_size, 2, i*2+1)
# plt.title('Original image')
#
# plt.imshow(test_image[i].reshape((28, 28)), cmap="Greys",
# interpolation="nearest", aspect='auto')
# plt.subplot(sample_size, 2, i*2+2)
# plt.title('Reconstructed image')
#
# plt.imshow(test_reconstruction[i].reshape((28, 28)), cmap="Greys",
# interpolation="nearest", aspect='auto')
# plt.show()
#
# From the preceding code listing, take note of the following:
# • Observe the arrangement of the encoder layers and the decoder
# layers of the stacked autoencoder. Specifically note how the
# corresponding layer arrangement of the encoder and the decoder has
# the same number of neurons.
# • The loss error measures the squared difference between the inputs
# into the autoencoder network and the decoder output.
# The image in Figure 37-3 contrasts the reconstructed images from the autoencoder
# network with the original images in the dataset.
# 479
#
# Chapter 37 Autoencoders
#
#
#
#
# Figure 37-3. Stacked autoencoder reconstruction. Left: Original image. Right:
# Reconstructed image.
# 480
#
# Chapter 37 Autoencoders
#
#
# D
# enoising Autoencoders
# Denoising autoencoders add a different type of constraint to the network by imputing
# some Gaussian noise into the inputs. This noise injection forces the autoencoder to
# learn the uncorrupted form of the input features; by doing so, the autoencoder learns the
# internal representation of the dataset without memorizing the inputs.
# Another way a denoising autoencoder constrains the input is by deactivating some
# input neurons in a similar fashion to the Dropout technique. Denoising autoencoders
# use an overcomplete network architecture. This means that the dimensions of the
# hidden Encoder and Decoder layers are not restricted; hence, they are overcomplete. An
# illustration of a denoising autoencoder architecture is shown in Figure 37-4.
#
#
#
#
# Figure 37-4. Denoising autoencoder. Constraint is applied by either adding
# Gaussian noise or by switching off some a random selection of the input neurons.
#
#
#
# 481
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Content(LeafNode):
def __init__(self):
super().__init__(
"Stacked Autoencoders with TensorFlow 2.0",
# Stage.REMOVE_EXTRANEOUS,
# Stage.ORIG_BLOCKS,
# Stage.CUSTOM_BLOCKS,
# Stage.ORIG_FIGURES,
# Stage.CUSTOM_FIGURES,
# Stage.CUSTOM_EXERCISES,
)
self.add(mbk("# Stacked Autoencoders with TensorFlow 2.0"))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class StackedAutoencoders(HierNode):
def __init__(self):
super().__init__("Stacked Autoencoders with TensorFlow 2.0")
self.add(Content())
# eof
|
[
"[email protected]"
] | |
479d88ea9f511e28891f74433873a0df3ee25d51
|
605356250c655a7f98d5f1158e0ffc94175de4f7
|
/devel/lib/python2.7/dist-packages/pal_interaction_msgs/__init__.py
|
d49d01c4bf61d8c984b69716337128721f6f9c73
|
[] |
no_license
|
MatthewCallery/msc-tiago-project
|
4d3dcf07b7bc6915d2f203bbff46f6c11720ff9f
|
8c9e987c45d6152192ba36bb27781e961e7900c3
|
refs/heads/master
| 2020-11-30T04:17:53.649839 | 2017-07-11T14:38:47 | 2017-07-11T14:38:47 | 96,903,254 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 126 |
py
|
/home/mc16766/tiago_public_ws/devel/.private/pal_interaction_msgs/lib/python2.7/dist-packages/pal_interaction_msgs/__init__.py
|
[
"mc16766@it051534"
] |
mc16766@it051534
|
865eac4198f654825e9c003a59da076d6ae2126a
|
c532ecb0aa2e430b521c6a06a0c98e80886867d0
|
/hw5_ws/devel/lib/python2.7/dist-packages/mav_planning_msgs/msg/_Polygon2D.py
|
1f3689d5b14b2a5b991d2eced999b770dd7385c5
|
[] |
no_license
|
RENyunfan/NRSL
|
95d002281d0c2af72cc676ef0205ad7fb60c0876
|
6307708d7b71c86308e4d005db552b3322c6a27c
|
refs/heads/master
| 2020-04-13T02:22:59.813409 | 2018-12-27T16:44:32 | 2018-12-27T16:44:32 | 162,900,906 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 125 |
py
|
/home/kevin/workSpace/hw5_ws/devel/.private/mav_planning_msgs/lib/python2.7/dist-packages/mav_planning_msgs/msg/_Polygon2D.py
|
[
"[email protected]"
] | |
db8fa11ced5514e66f1fae92d05139f0299f77f8
|
d29405a576a0da35b79d3c73aed62c1011931341
|
/ax/core/tests/test_parameter.py
|
9f8fb519557601696394772fbe5a4744ad417604
|
[
"MIT"
] |
permissive
|
liangshi7/Ax
|
5a8c152ec7268e26654ef0a6918b505e3e98ec52
|
f20d10f619aae504f2e4509b5a786842a6c72e89
|
refs/heads/master
| 2020-09-10T17:11:33.483281 | 2019-11-14T16:54:02 | 2019-11-14T16:59:41 | 221,768,108 | 0 | 0 |
MIT
| 2019-11-14T19:07:42 | 2019-11-14T19:07:42 | null |
UTF-8
|
Python
| false | false | 9,647 |
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from ax.core.parameter import (
ChoiceParameter,
FixedParameter,
ParameterType,
RangeParameter,
)
from ax.utils.common.testutils import TestCase
class RangeParameterTest(TestCase):
def setUp(self):
self.param1 = RangeParameter(
name="x",
parameter_type=ParameterType.FLOAT,
lower=1,
upper=3,
log_scale=True,
digits=5,
is_fidelity=True,
target_value=2,
)
self.param1_repr = (
"RangeParameter(name='x', parameter_type=FLOAT, "
"range=[1.0, 3.0], log_scale=True, digits=5, fidelity=True, target_"
"value=2)"
)
self.param2 = RangeParameter(
name="y", parameter_type=ParameterType.INT, lower=10, upper=15
)
self.param2_repr = (
"RangeParameter(name='y', parameter_type=INT, range=[10, 15])"
)
def testEq(self):
param2 = RangeParameter(
name="x",
parameter_type=ParameterType.FLOAT,
lower=1,
upper=3,
log_scale=True,
digits=5,
is_fidelity=True,
target_value=2,
)
self.assertEqual(self.param1, param2)
self.assertNotEqual(self.param1, self.param2)
def testProperties(self):
self.assertEqual(self.param1.name, "x")
self.assertEqual(self.param1.parameter_type, ParameterType.FLOAT)
self.assertEqual(self.param1.lower, 1)
self.assertEqual(self.param1.upper, 3)
self.assertEqual(self.param1.digits, 5)
self.assertTrue(self.param1.log_scale)
self.assertFalse(self.param2.log_scale)
self.assertTrue(self.param1.is_numeric)
self.assertTrue(self.param1.is_fidelity)
self.assertIsNotNone(self.param1.target_value)
self.assertFalse(self.param2.is_fidelity)
self.assertIsNone(self.param2.target_value)
def testValidate(self):
self.assertFalse(self.param1.validate(None))
self.assertFalse(self.param1.validate("foo"))
self.assertTrue(self.param1.validate(1))
self.assertTrue(self.param1.validate(1.3))
def testRepr(self):
self.assertEqual(str(self.param1), self.param1_repr)
self.assertEqual(str(self.param2), self.param2_repr)
def testBadCreations(self):
with self.assertRaises(ValueError):
RangeParameter("x", ParameterType.STRING, 1, 3)
with self.assertRaises(ValueError):
RangeParameter("x", ParameterType.FLOAT, 3, 1)
with self.assertRaises(ValueError):
RangeParameter("x", ParameterType.INT, 0, 1, log_scale=True)
with self.assertRaises(ValueError):
RangeParameter("x", ParameterType.INT, 0.5, 1)
def testBadSetter(self):
with self.assertRaises(ValueError):
self.param1.update_range(upper="foo")
with self.assertRaises(ValueError):
self.param1.update_range(lower="foo")
with self.assertRaises(ValueError):
self.param1.update_range(lower=4)
with self.assertRaises(ValueError):
self.param1.update_range(upper=0.5)
with self.assertRaises(ValueError):
self.param1.update_range(lower=1.0, upper=0.9)
def testGoodSetter(self):
self.param1.update_range(lower=1.0)
self.param1.update_range(upper=1.0011)
self.param1.set_log_scale(False)
self.param1.set_digits(3)
self.assertEqual(self.param1.digits, 3)
self.assertEqual(self.param1.upper, 1.001)
# This would cast Upper = Lower = 1, which is not allowed
with self.assertRaises(ValueError):
self.param1.set_digits(1)
self.param1.update_range(lower=2.0, upper=3.0)
self.assertEqual(self.param1.lower, 2.0)
self.assertEqual(self.param1.upper, 3.0)
def testCast(self):
self.assertEqual(self.param2._cast(2.5), 2)
self.assertEqual(self.param2._cast(3), 3)
self.assertEqual(self.param2._cast(None), None)
def testClone(self):
param_clone = self.param1.clone()
self.assertEqual(self.param1.lower, param_clone.lower)
param_clone._lower = 2.0
self.assertNotEqual(self.param1.lower, param_clone.lower)
class ChoiceParameterTest(TestCase):
def setUp(self):
self.param1 = ChoiceParameter(
name="x", parameter_type=ParameterType.STRING, values=["foo", "bar", "baz"]
)
self.param1_repr = (
"ChoiceParameter(name='x', parameter_type=STRING, "
"values=['foo', 'bar', 'baz'])"
)
self.param2 = ChoiceParameter(
name="x",
parameter_type=ParameterType.STRING,
values=["foo", "bar", "baz"],
is_ordered=True,
is_task=True,
)
self.param3 = ChoiceParameter(
name="x",
parameter_type=ParameterType.STRING,
values=["foo", "bar"],
is_fidelity=True,
target_value="bar",
)
self.param3_repr = (
"ChoiceParameter(name='x', parameter_type=STRING, "
"values=['foo', 'bar'], fidelity=True, target_value='bar')"
)
def testEq(self):
param4 = ChoiceParameter(
name="x", parameter_type=ParameterType.STRING, values=["foo", "bar", "baz"]
)
self.assertEqual(self.param1, param4)
self.assertNotEqual(self.param1, self.param2)
param5 = ChoiceParameter(
name="x", parameter_type=ParameterType.STRING, values=["foo", "foobar"]
)
self.assertNotEqual(self.param1, param5)
def testProperties(self):
self.assertEqual(self.param1.name, "x")
self.assertEqual(self.param1.parameter_type, ParameterType.STRING)
self.assertEqual(len(self.param1.values), 3)
self.assertFalse(self.param1.is_numeric)
self.assertFalse(self.param1.is_ordered)
self.assertFalse(self.param1.is_task)
self.assertTrue(self.param2.is_ordered)
self.assertTrue(self.param2.is_task)
def testRepr(self):
self.assertEqual(str(self.param1), self.param1_repr)
self.assertEqual(str(self.param3), self.param3_repr)
def testValidate(self):
self.assertFalse(self.param1.validate(None))
self.assertFalse(self.param1.validate(3))
for value in ["foo", "bar", "baz"]:
self.assertTrue(self.param1.validate(value))
def testSetter(self):
self.param1.add_values(["bin"])
self.assertTrue(self.param1.validate("bin"))
self.param1.set_values(["bar", "biz"])
self.assertTrue(self.param1.validate("biz"))
self.assertTrue(self.param1.validate("bar"))
self.assertFalse(self.param1.validate("foo"))
def testSingleValue(self):
with self.assertRaises(ValueError):
ChoiceParameter(
name="x", parameter_type=ParameterType.STRING, values=["foo"]
)
with self.assertRaises(ValueError):
self.param1.set_values(["foo"])
def testClone(self):
param_clone = self.param1.clone()
self.assertEqual(len(self.param1.values), len(param_clone.values))
param_clone._values.append("boo")
self.assertNotEqual(len(self.param1.values), len(param_clone.values))
class FixedParameterTest(TestCase):
def setUp(self):
self.param1 = FixedParameter(
name="x", parameter_type=ParameterType.BOOL, value=True
)
self.param1_repr = "FixedParameter(name='x', parameter_type=BOOL, value=True)"
def testEq(self):
param2 = FixedParameter(name="x", parameter_type=ParameterType.BOOL, value=True)
self.assertEqual(self.param1, param2)
param3 = FixedParameter(
name="x", parameter_type=ParameterType.BOOL, value=False
)
self.assertNotEqual(self.param1, param3)
def testProperties(self):
self.assertEqual(self.param1.name, "x")
self.assertEqual(self.param1.parameter_type, ParameterType.BOOL)
self.assertEqual(self.param1.value, True)
self.assertFalse(self.param1.is_numeric)
def testRepr(self):
self.assertEqual(str(self.param1), self.param1_repr)
def testValidate(self):
self.assertFalse(self.param1.validate(None))
self.assertFalse(self.param1.validate("foo"))
self.assertFalse(self.param1.validate(False))
self.assertTrue(self.param1.validate(True))
def testSetter(self):
self.param1.set_value(False)
self.assertEqual(self.param1.value, False)
def testClone(self):
param_clone = self.param1.clone()
self.assertEqual(self.param1.value, param_clone.value)
param_clone._value = False
self.assertNotEqual(self.param1.value, param_clone.value)
def testCast(self):
self.assertEqual(self.param1._cast(1), True)
self.assertEqual(self.param1._cast(False), False)
self.assertEqual(self.param1._cast(None), None)
class ParameterEqualityTest(TestCase):
def setUp(self):
self.fixed_parameter = FixedParameter(
name="x", parameter_type=ParameterType.BOOL, value=True
)
self.choice_parameter = ChoiceParameter(
name="x", parameter_type=ParameterType.STRING, values=["foo", "bar", "baz"]
)
def testNotEqual(self):
self.assertNotEqual(self.fixed_parameter, self.choice_parameter)
|
[
"[email protected]"
] | |
d5484b50a845a80c7c8d7c731acd7778724d8da8
|
b21abd3873c76739ceefd1b4613a343ba2b454d1
|
/jwst/assign_wcs/util.py
|
97140961df5adc10fe0b8c84bfb6276f03a866ae
|
[
"BSD-2-Clause"
] |
permissive
|
rij/jwst
|
96a7baf95de953c51bbe67f3cdd459c114c47eef
|
1d3acecb28d9a3dcb44b993e451b69da9856187d
|
refs/heads/master
| 2020-12-24T09:56:21.784342 | 2016-06-09T19:17:01 | 2016-06-09T19:17:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,154 |
py
|
"""
Utility function for WCS
"""
import logging
import math
import copy
import functools
import numpy as np
from numpy import arctan2
from astropy.utils.misc import isiterable
from astropy.io import fits
from astropy.modeling.core import Model
from astropy.modeling.parameters import Parameter, InputParameterError
from astropy.modeling import projections
from astropy.modeling import models as astmodels
from gwcs import WCS
from gwcs import utils as gwutils
from gwcs.wcstools import wcs_from_fiducial
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
def reproject(wcs1, wcs2, origin=0):
"""
Given two WCSs return a function which takes pixel coordinates in
the first WCS and computes their location in the second one.
It performs the forward transformation of ``wcs1`` followed by the
inverse of ``wcs2``.
Parameters
----------
wcs1, wcs2 : `~gwcs.wcs.WCS`
WCS objects.
Returns
-------
_reproject : func
Function to compute the transformations. It takes x, y
positions in ``wcs1`` and returns x, y positions in ``wcs2``.
"""
def _reproject(x, y):
sky = wcs1.forward_transform(x, y)
return wcs2.backward_transform(*sky)
return _reproject
def wcs_from_footprints(wcslist, refwcs=None, transform=None, domain=None):
"""
Create a WCS from a list of WCS objects.
A fiducial point in the output coordinate frame is created from the
footprints of all WCS objects. For a spatial frame this is the center
of the union of the footprints. For a spectral frame the fiducial is in
the beginning of the footprint range.
If ``refwcs`` is not specified, the first WCS object in the list is considered
a reference. The output coordinate frame and projection (for celestial frames)
is taken from ``refwcs``.
If ``transform`` is not suplied, a compound transform comprised of
scaling and rotation is copied from ``refwcs``.
If ``domain`` is not supplied, the domain of the new WCS is computed
from the domains of all input WCSs
Parameters
----------
wcslist : list of `~gwcs.wcs.WCS`
A list of WCS objects.
refwcs : `~gwcs.wcs.WCS`, optional
Reference WCS. The output coordinate frame, the projection and a
scaling and rotation transform is created from it. If not supplied
the first WCS in the list is used as ``refwcs``.
transform : `~astropy.modeling.core.Model`, optional
A transform, passed to :class_method:`~gwcs.WCS.wcs_from_fiducial`
If not supplied Scaling | Rotation is computed from ``refwcs``.
domain : list of dicts, optional
Domain of the new WCS.
If not supplied it is computed from the domain of all inputs.
"""
if not isiterable(wcslist):
raise ValueError("Expected 'wcslist' to be an iterable of WCS objects.")
if not all([isinstance(w, WCS) for w in wcslist]):
raise TypeError("All items in wcslist are expected to be instances of gwcs.WCS.")
if refwcs is None:
refwcs = wcslist[0]
else:
if not isinstance(refwcs, WCS):
raise TypeError("Expected refwcs to be an instance of gwcs.WCS.")
fiducial = compute_fiducial(wcslist, domain)
prj = np.array([isinstance(m, projections.Projection) for m \
in refwcs.forward_transform]).nonzero()[0]
if prj:
# TODO: Fix the compound model indexing with numpy integers in astropy.
# Remove the work around this issues from here.
prj = refwcs.forward_transform[int(prj[0])]
else:
prj = None
trans = []
scales = [m for m in refwcs.forward_transform if isinstance(m, astmodels.Scale)]
if scales:
trans.append(functools.reduce(lambda x, y: x & y, scales))
rotation = [m for m in refwcs.forward_transform if \
isinstance(m, astmodels.AffineTransformation2D)]
if rotation:
trans.append(rotation[0])
if trans:
tr = functools.reduce(lambda x, y: x | y, trans)
else:
tr = None
out_frame = getattr(refwcs, getattr(refwcs, 'output_frame'))
wnew = wcs_from_fiducial(fiducial, coordinate_frame=out_frame,
projection=prj, transform=tr)
#domain_bounds = np.hstack([gwutils._domain_to_bounds(d) for d in [w.domain for w in wcslist]])
domain_footprints = [w.footprint() for w in wcslist]
domain_bounds = np.hstack([wnew.backward_transform(*f) for f in domain_footprints])
for axs in domain_bounds:
axs -= axs.min()
domain = []
for axis in out_frame.axes_order:
axis_min, axis_max = domain_bounds[axis].min(), domain_bounds[axis].max()
domain.append({'lower': axis_min, 'upper': axis_max,
'includes_lower': True, 'includes_upper': True})
wnew.domain = domain
return wnew
def compute_fiducial(wcslist, domain=None):
"""
For a celestial footprint this is the center.
For a spectral footprint, it is the beginning of the range.
This function assumes all WCSs have the same output coordinate frame.
"""
output_frame = getattr(wcslist[0], 'output_frame')
axes_types = getattr(wcslist[0], output_frame).axes_type
spatial_axes = np.array(axes_types) == 'SPATIAL'
spectral_axes = np.array(axes_types) == 'SPECTRAL'
footprints = np.hstack([w.footprint(domain=domain) for w in wcslist])
spatial_footprint = footprints[spatial_axes]
spectral_footprint = footprints[spectral_axes]
fiducial = np.empty(len(axes_types))
if (spatial_footprint).any():
lon, lat = spatial_footprint
lon, lat = np.deg2rad(lon), np.deg2rad(lat)
x_mean = np.mean(np.cos(lat) * np.cos(lon))
y_mean = np.mean(np.cos(lat) * np.sin(lon))
z_mean = np.mean(np.sin(lat))
lon_fiducial = np.rad2deg(np.arctan2(y_mean, x_mean)) % 360.0
lat_fiducial = np.rad2deg(np.arctan2(z_mean, np.sqrt(x_mean**2 + y_mean\
**2)))
fiducial[spatial_axes] = lon_fiducial, lat_fiducial
if (spectral_footprint).any():
fiducial[spectral_axes] = spectral_footprint.min()
return fiducial
def is_fits(input):
"""
Returns
--------
isFits: tuple
An ``(isfits, fitstype)`` tuple. The values of ``isfits`` and
``fitstype`` are specified as:
- ``isfits``: True|False
- ``fitstype``: if True, one of 'waiver', 'mef', 'simple'; if False, None
Notes
-----
Input images which do not have a valid FITS filename will automatically
result in a return of (False, None).
In the case that the input has a valid FITS filename but runs into some
error upon opening, this routine will raise that exception for the calling
routine/user to handle.
"""
isfits = False
fitstype = None
names = ['fits', 'fit', 'FITS', 'FIT']
#determine if input is a fits file based on extension
# Only check type of FITS file if filename ends in valid FITS string
f = None
fileclose = False
if isinstance(input, fits.HDUList):
isfits = True
f = input
else:
isfits = True in [input.endswith(l) for l in names]
# if input is a fits file determine what kind of fits it is
#waiver fits len(shape) == 3
if isfits:
if not f:
try:
f = fits.open(input, mode='readonly')
fileclose = True
except Exception as e:
if f is not None:
f.close()
raise
data0 = f[0].data
if data0 is not None:
try:
if isinstance(f[1], fits.TableHDU):
fitstype = 'waiver'
except IndexError:
fitstype = 'simple'
else:
fitstype = 'mef'
if fileclose:
f.close()
return isfits, fitstype
def not_implemented_mode(input_model, ref):
exp_type = input_model.meta.exposure.type
message = "WCS for EXP_TYPE of {0} is not implemented.".format(exp_type)
log.critical(message)
#raise AttributeError(message)
return None
|
[
"[email protected]"
] | |
e2f4c086c474a87cb50581f634658462740af143
|
65ad03b8f4d975585776a5ba3d6a6ee1750ebad4
|
/03-First Class Functions/Before/__main__.py
|
ba99256cf1a798f52d97ca90cd1eeff148e7ba1c
|
[] |
no_license
|
afettouhi/PythonFP
|
e51351d5901a28f8ecd938d8213fcb5982c51e62
|
c6a2319bc053f26cfbe70102a2dd7c1a4bcbbd57
|
refs/heads/master
| 2020-05-31T18:53:49.373967 | 2019-06-06T07:53:30 | 2019-06-06T07:53:30 | 190,445,909 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 890 |
py
|
from order import Order
from customer import Customer
def main():
cust1 = Customer()
cust1.name = 'Heart of Gold'
cust1.address = 'The Milky Way Galaxy'
cust1.enterprise = False
cust2 = Customer()
cust2.name = 'Milliways Restaurant'
cust2.address = 'Magrathea'
cust2.enterprise = True
ord1 = Order()
ord1.customer = cust1
ord1.expedited = False
ord1.shipping_address = 'Infinitely Improbable'
ord2 = (Order())
ord2.customer = cust2
ord2.expedited = True
ord2.shipping_address = 'Magrathea'
Order.orders = [ord1, ord2]
for name in ord1.get_expedited_orders_customer_names():
print(name)
for address in ord1.get_expedited_orders_customer_addresses():
print(address)
for address in ord1.get_expedited_orders_shipping_addresses():
print(address)
main()
|
[
"[email protected]"
] | |
6de42dc97bb55f64381861e2a4e08d9b14e62c76
|
f0681b8c129e8afce21e340697502230f45ce930
|
/venv/Lib/site-packages/com/vmware/nsx_policy/infra/tier_0s/locale_services/l2vpn_context_client.py
|
7157fa51e8d67cf4444b3c0a0e03c6f9e03e3da6
|
[] |
no_license
|
dungla2011/python_pyvmomi_working_sample_vmware_easy
|
8852b6fdcd0f7d0f648f6f7b6c6e4f70c7213746
|
a3b6d86a802f28c7ee249fc03523d5e5f0a2e3bd
|
refs/heads/main
| 2023-07-05T14:56:46.551091 | 2021-08-20T12:19:39 | 2021-08-20T12:19:39 | 395,496,219 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 21,840 |
py
|
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2021 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.nsx_policy.infra.tier_0s.locale_services.l2vpn_context.
#---------------------------------------------------------------------------
"""
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class L2vpns(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx_policy.infra.tier_0s.locale_services.l2vpn_context.l2vpns'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _L2vpnsStub)
self._VAPI_OPERATION_IDS = {}
def delete(self,
tier0_id,
locale_service_id,
l2vpn_id,
):
"""
:type tier0_id: :class:`str`
:param tier0_id: (required)
:type locale_service_id: :class:`str`
:param locale_service_id: (required)
:type l2vpn_id: :class:`str`
:param l2vpn_id: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'l2vpn_id': l2vpn_id,
})
def get(self,
tier0_id,
locale_service_id,
l2vpn_id,
):
"""
:type tier0_id: :class:`str`
:param tier0_id: (required)
:type locale_service_id: :class:`str`
:param locale_service_id: (required)
:type l2vpn_id: :class:`str`
:param l2vpn_id: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.L2Vpn`
:return: com.vmware.nsx_policy.model.L2Vpn
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'l2vpn_id': l2vpn_id,
})
def list(self,
tier0_id,
locale_service_id,
cursor=None,
include_mark_for_delete_objects=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
:type tier0_id: :class:`str`
:param tier0_id: (required)
:type locale_service_id: :class:`str`
:param locale_service_id: (required)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type include_mark_for_delete_objects: :class:`bool` or ``None``
:param include_mark_for_delete_objects: Include objects that are marked for deletion in results (optional,
default to false)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx_policy.model_client.L2VpnListResult`
:return: com.vmware.nsx_policy.model.L2VpnListResult
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'cursor': cursor,
'include_mark_for_delete_objects': include_mark_for_delete_objects,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def patch(self,
tier0_id,
locale_service_id,
l2vpn_id,
l2_vpn,
):
"""
:type tier0_id: :class:`str`
:param tier0_id: (required)
:type locale_service_id: :class:`str`
:param locale_service_id: (required)
:type l2vpn_id: :class:`str`
:param l2vpn_id: (required)
:type l2_vpn: :class:`com.vmware.nsx_policy.model_client.L2Vpn`
:param l2_vpn: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('patch',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'l2vpn_id': l2vpn_id,
'l2_vpn': l2_vpn,
})
def update(self,
tier0_id,
locale_service_id,
l2vpn_id,
l2_vpn,
):
"""
:type tier0_id: :class:`str`
:param tier0_id: (required)
:type locale_service_id: :class:`str`
:param locale_service_id: (required)
:type l2vpn_id: :class:`str`
:param l2vpn_id: (required)
:type l2_vpn: :class:`com.vmware.nsx_policy.model_client.L2Vpn`
:param l2_vpn: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.L2Vpn`
:return: com.vmware.nsx_policy.model.L2Vpn
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'l2vpn_id': l2vpn_id,
'l2_vpn': l2_vpn,
})
class _L2vpnsStub(ApiInterfaceStub):
def __init__(self, config):
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'tier0_id': type.StringType(),
'locale_service_id': type.StringType(),
'l2vpn_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/policy/api/v1/infra/tier-0s/{tier-0-id}/locale-services/{locale-service-id}/l2vpn-context/l2vpns/{l2vpn-id}',
path_variables={
'tier0_id': 'tier-0-id',
'locale_service_id': 'locale-service-id',
'l2vpn_id': 'l2vpn-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'tier0_id': type.StringType(),
'locale_service_id': type.StringType(),
'l2vpn_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
HasFieldsOfValidator()
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/tier-0s/{tier-0-id}/locale-services/{locale-service-id}/l2vpn-context/l2vpns/{l2vpn-id}',
path_variables={
'tier0_id': 'tier-0-id',
'locale_service_id': 'locale-service-id',
'l2vpn_id': 'l2vpn-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for list operation
list_input_type = type.StructType('operation-input', {
'tier0_id': type.StringType(),
'locale_service_id': type.StringType(),
'cursor': type.OptionalType(type.StringType()),
'include_mark_for_delete_objects': type.OptionalType(type.BooleanType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
HasFieldsOfValidator()
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/tier-0s/{tier-0-id}/locale-services/{locale-service-id}/l2vpn-context/l2vpns',
path_variables={
'tier0_id': 'tier-0-id',
'locale_service_id': 'locale-service-id',
},
query_parameters={
'cursor': 'cursor',
'include_mark_for_delete_objects': 'include_mark_for_delete_objects',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
},
content_type='application/json'
)
# properties for patch operation
patch_input_type = type.StructType('operation-input', {
'tier0_id': type.StringType(),
'locale_service_id': type.StringType(),
'l2vpn_id': type.StringType(),
'l2_vpn': type.ReferenceType('com.vmware.nsx_policy.model_client', 'L2Vpn'),
})
patch_error_dict = {
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
patch_input_value_validator_list = [
HasFieldsOfValidator()
]
patch_output_validator_list = [
]
patch_rest_metadata = OperationRestMetadata(
http_method='PATCH',
url_template='/policy/api/v1/infra/tier-0s/{tier-0-id}/locale-services/{locale-service-id}/l2vpn-context/l2vpns/{l2vpn-id}',
request_body_parameter='l2_vpn',
path_variables={
'tier0_id': 'tier-0-id',
'locale_service_id': 'locale-service-id',
'l2vpn_id': 'l2vpn-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'tier0_id': type.StringType(),
'locale_service_id': type.StringType(),
'l2vpn_id': type.StringType(),
'l2_vpn': type.ReferenceType('com.vmware.nsx_policy.model_client', 'L2Vpn'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
HasFieldsOfValidator()
]
update_output_validator_list = [
HasFieldsOfValidator()
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/policy/api/v1/infra/tier-0s/{tier-0-id}/locale-services/{locale-service-id}/l2vpn-context/l2vpns/{l2vpn-id}',
request_body_parameter='l2_vpn',
path_variables={
'tier0_id': 'tier-0-id',
'locale_service_id': 'locale-service-id',
'l2vpn_id': 'l2vpn-id',
},
query_parameters={
},
content_type='application/json'
)
operations = {
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'L2Vpn'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'L2VpnListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'patch': {
'input_type': patch_input_type,
'output_type': type.VoidType(),
'errors': patch_error_dict,
'input_value_validator_list': patch_input_value_validator_list,
'output_validator_list': patch_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'L2Vpn'),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'list': list_rest_metadata,
'patch': patch_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx_policy.infra.tier_0s.locale_services.l2vpn_context.l2vpns',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class StubFactory(StubFactoryBase):
_attrs = {
'L2vpns': L2vpns,
'l2vpns': 'com.vmware.nsx_policy.infra.tier_0s.locale_services.l2vpn_context.l2vpns_client.StubFactory',
}
|
[
"[email protected]"
] | |
1d542e307d523389d60ab5251e07e5c7fc776881
|
18d51ac0a6ca14c8221c26f0dacd8d3721ca28e9
|
/hun31.py
|
1d7fbd0f0f8cb874ba79e0399716f3e93eb88345
|
[] |
no_license
|
mahakalai/mahak
|
05f96d52880ed7b2e5eb70dd1dbf14fc533236e8
|
613be9df7743ef59b1f0e07b7df987d29bb23ec7
|
refs/heads/master
| 2020-04-15T05:01:58.541930 | 2019-07-15T16:28:32 | 2019-07-15T16:28:32 | 164,406,486 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 143 |
py
|
n=int(input())
l=[int(x) for x in input().split()]
l1=[]
mul=1
for i in range(1,len(l)):
mul=mul*l[i]
l1.append(mul)
print(max(l1))
|
[
"[email protected]"
] | |
d5689f706ada50f514498638a501965f892b556d
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_worded.py
|
b372693f29f593edd5f1838b4d766e0aac3745bb
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 226 |
py
|
from xai.brain.wordbase.verbs._word import _WORD
#calss header
class _WORDED(_WORD, ):
def __init__(self,):
_WORD.__init__(self)
self.name = "WORDED"
self.specie = 'verbs'
self.basic = "word"
self.jsondata = {}
|
[
"[email protected]"
] | |
545bbda1adda90ddac80fd555953d3811e4d7e98
|
7a4c6fe3802b740b928136fc7a5497c473386e2b
|
/credentials.py
|
6078e9e0769e2cd330defc5649428f5dca5d6788
|
[] |
no_license
|
sirrotich/Password--Locker
|
4cb882d15a2a1659c48e33227b2b46e5d44a9456
|
b2f4dfd330a7812675b71d6a3311e44139f2ae94
|
refs/heads/master
| 2020-05-05T03:55:41.040277 | 2019-04-08T15:56:22 | 2019-04-08T15:56:22 | 179,690,522 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,938 |
py
|
import pyperclip #This allows you copy and paste a given text-unit
class Credentials:
"""
Class that generates new instances of users.
"""
credentials_list = [] #empty credentials list
def __init__(self,credentials_name,usr_name,password):
self.credentials_name = credentials_name
self.usr_name = usr_name
self.password = password
def save_credentials(self):
'''
save_credentials method saves credentials objects into credentials_list
'''
Credentials.credentials_list.append(self)
@classmethod
def delete_credentials(self):
'''
delete_credentials method deletes a saved credentials from the credentials_list
'''
for credentials in cls.credentials_list:
if credentials.credentials_name == name:
return credentials_list.remove(self)
@classmethod
def find_by_name(cls,name):
for credentials in cls.credentials_list:
if credentials.credentials_name == name:
return credentials
@classmethod
def credentials_exist(cls,name):
'''
Method that checks if a credentials exists from the credentials list.
Args:
name: Acc name to search if it exists
Returns :
Boolean: True or false depending if the credentials exists
'''
for credentials in cls.credentials_list:
if credentials.password == name:
return credentials
return False
@classmethod
def display_credentials(cls): #check this line later
'''
method that returns the credentials list
'''
return cls.credentials_list
@classmethod
def copy_usr_name(cls,number):
credentials_found = Credentials.find_by_name(name)
pyperclip.copy(contact_found.usr_name)
|
[
"[email protected]"
] | |
a616707b079d8e5fa123ef8a00425c270eaf8257
|
c003c5faf5b442fa4bf3010eae46370ebcf39040
|
/voseq/genbank_fasta/tests/test_utils.py
|
b89cfb300f5c60393db249b3155c9fd3863a95d5
|
[] |
no_license
|
mezarino/VoSeq
|
35d1fb3bb1de85f50bc39bc7ac6aefcad310e120
|
bf75f1f91176a57ee23465e520d27df021576712
|
refs/heads/master
| 2021-01-18T05:28:44.318625 | 2015-02-15T15:40:12 | 2015-02-15T15:40:12 | 29,065,916 | 0 | 0 | null | 2015-01-10T18:04:33 | 2015-01-10T18:04:32 | null |
UTF-8
|
Python
| false | false | 1,722 |
py
|
from django.test import TestCase
from django.core.management import call_command
from core.utils import get_gene_codes
from core.utils import get_voucher_codes
from public_interface.models import TaxonSets
from public_interface.models import GeneSets
from public_interface.models import Genes
from genbank_fasta import utils
class TestGenBankFastaUtils(TestCase):
def setUp(self):
args = []
opts = {'dumpfile': 'test_db_dump.xml', 'verbosity': 0}
cmd = 'migrate_db'
call_command(cmd, *args, **opts)
gs = GeneSets.objects.get(geneset_name='2genes')
g = Genes.objects.get(gene_code='COI')
g2 = Genes.objects.get(gene_code='16S')
ts = TaxonSets.objects.get(taxonset_name='Erebia')
self.cleaned_data = {
'gene_codes': [g, g2],
'taxonset': ts,
'voucher_codes': 'CP200-10\r\nCP100-11',
'geneset': gs,
}
def test_get_gene_codes(self):
expected = 3
result = get_gene_codes(self.cleaned_data)
self.assertEqual(expected, len(result))
def test_dataset_reading_frame_2(self):
res = utils.Results(['CP100-10', 'CP100-11'], ['COI'])
res.get_datasets()
self.assertEqual('WAGMIGTSLSLIIRTELGNP', res.protein.splitlines()[1][0:20])
def test_get_voucher_codes(self):
expected = 3
result = get_voucher_codes(self.cleaned_data)
self.assertEqual(expected, len(result))
def test_get_voucher_codes_dropped(self):
self.cleaned_data['voucher_codes'] = 'CP100-10\r\n--CP100-11\r\nCP100-12'
expected = 2
result = get_voucher_codes(self.cleaned_data)
self.assertEqual(expected, len(result))
|
[
"[email protected]"
] | |
6f9362635f191a549b5555c279d7bffca6d697f5
|
11130633fe59b222da0696dc05e72ac30871a573
|
/Problem_Solving/leetcode/Sequential(Non-Linear)_data_structure/Tree/543_Diameter_of_Binary_Tree/diameterOfBinaryTree.py
|
07a99675edeb2bce7241276d6b2a9954688e53f8
|
[] |
no_license
|
Jin-SukKim/Algorithm
|
024aa77c6bf63666910a1eb03407e808a05307ec
|
5f2a14fe1f64032126df55b1eadc1580a32735f3
|
refs/heads/master
| 2023-09-01T20:50:13.150780 | 2023-09-01T07:54:32 | 2023-09-01T07:54:32 | 263,555,962 | 4 | 0 | null | 2023-02-14T14:36:38 | 2020-05-13T07:24:51 |
C++
|
UTF-8
|
Python
| false | false | 576 |
py
|
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
def diameterOfBinaryTree(root: TreeNode) ->int :
longest: int = 0
def dfs(node: TreeNode) -> int:
if not node:
return -1
# 왼쪽 오른쪽의각 리프 노드까지 탐색
left = dfs(node.left)
right = dfs(node.right)
# 가장 긴 경로
longest = max()
# 상태값
return max(left, right) + 1
dfs(root)
return longest
|
[
"[email protected]"
] | |
446b8e28e38c08382df586afe3e8b6076aabc31c
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_136/3170.py
|
b6dc51d486002f5afd37165abc48d1430d695198
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 497 |
py
|
#!/usr/bin/python
import sys
N = int(sys.stdin.readline())
for case in xrange(N):
C, F, X = map(float,sys.stdin.readline().split())
n_farms_f = (F*X - 2*C)/(C*F) - 1.
n_farms = int((F*X - 2*C)/(C*F) - 1.)
if n_farms_f > 0:
t1 = 0.
for i in xrange(n_farms+1):
t1 += 1./(2.+i*F) # 2 comes from initial rate
t = C * t1 + (X / (2. + ((n_farms+1) * F)))
else:
t = X / 2.
print "Case #%d: %.7f" % (case+1, t)
|
[
"[email protected]"
] | |
8ed4bb03243b16d9453a9c48d83128a0f7695c57
|
e63ab09f227459380c317aa1694cffd04255c807
|
/cheshire3/graph/selector.py
|
1fdbfff0492435f2acdc4cbfc817d76c3192b8b3
|
[
"ICU",
"X11"
] |
permissive
|
bitwhite/cheshire3
|
91a0d2f8d2e79ac277ac4f7a3bea9efa911ce3d6
|
ca27bc2600d217e36a429ccfe064f11d9b200193
|
refs/heads/master
| 2021-05-27T03:50:09.456813 | 2013-10-10T13:47:16 | 2013-10-10T13:47:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 862 |
py
|
from cheshire3.selector import SimpleSelector
from cheshire3.exceptions import C3ObjectTypeError
from cheshire3.graph.record import GraphRecord
class SparqlSelector(SimpleSelector):
def __init__(self, session, config, parent):
SimpleSelector.__init__(self, session, config, parent)
def process_record(self, session, record):
if not isinstance(record, GraphRecord):
raise C3ObjectTypeError("{0.__class__.__name__} can only process GraphRecords; {1.__class__.__name__} supplied".format(self, record))
else:
vals = []
for src in self.sources:
for xp in src:
# this will be a SparqlQueryResult object
mv = record.process_sparql(session, xp['string'], xp['maps'])
vals.append(mv.selected)
return vals
|
[
"[email protected]"
] | |
c9aee74c7e8a5fbe9602ecf231180839e4630013
|
d17bfe4c8e16ed0727ce5bf893db6d287045a6ec
|
/M3/kcb/db/__init__.py
|
52d0428c6210fc981d00752dafb4aa1f4f416803
|
[] |
no_license
|
248808194/python
|
de81da3779399c4647a8bc7d803b63cd2eb59dea
|
da44c4949ab921a7822f891a2901c08b487b3de6
|
refs/heads/master
| 2020-03-08T01:42:19.838894 | 2018-04-19T01:05:46 | 2018-04-19T01:05:46 | 127,838,082 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 696 |
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Zhoutao
#create_date:2017-02-14-13:49
# Python 3.5
#
# import pickle
#
# import os,sys,time,datetime,pickle,json
# # sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# # from core.schools import School
# # # with open('/home/zt/PycharmProjects/51cto_python_homework/M3/课程表/db/sh_school', 'rb') as f:
# # # shcool_obj = pickle.load(f)
# #
# #
# #
# #
# # with open('/home/zt/PycharmProjects/51cto_python_homework/M3/kcb/db/bj_school','rb') as f:
# # shcool_obj = pickle.load(f)
# # print(shcool_obj.SCHOOL_TECHER )
# # print(shcool_obj.SCHOOL_CLASS )
# # print(shcool_obj.SCHOOL_LESSON )
|
[
"[email protected]"
] | |
9d6f29cf29bdbe519853ad91f64c4a882a7ba5a5
|
7b6377050fba4d30f00e9fb5d56dfacb22d388e1
|
/brownies/LLNL/bin/bdflsToFluxes.py
|
58e8cbb093f0893f40ddcd99f615430519929bc6
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
LLNL/fudge
|
0a4fe8e3a68b66d58e42d1f4d209ea3f713c6370
|
6ba80855ae47cb32c37f635d065b228fadb03412
|
refs/heads/master
| 2023-08-16T21:05:31.111098 | 2023-08-01T22:09:32 | 2023-08-01T22:09:32 | 203,678,373 | 21 | 4 |
NOASSERTION
| 2023-06-28T20:51:02 | 2019-08-21T23:22:20 |
Python
|
UTF-8
|
Python
| false | false | 2,604 |
py
|
#! /usr/bin/env python3
# <<BEGIN-copyright>>
# Copyright 2022, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
import os
import argparse
from brownies.legacy.endl import bdfls as bdflsModule
from xData import XYs1d as XYs1dModule
addFluxExec = os.path.join( os.path.dirname( os.path.dirname( os.path.dirname( os.path.dirname( os.path.realpath( __file__ ) ) ) ) ), "bin", "addFlux.py" )
parser = argparse.ArgumentParser( description = "Converts one or more bdfls fluxesin a bdfls file into the new GNDS flux structure and outputs results to a file." )
parser.add_argument( 'bdfls', help = "The name of the bdfls file to extract flux data from." )
parser.add_argument( "output", help = "The name of the outputted flux file." )
parser.add_argument( "input", nargs = "?", default = None, help = "The file to read existing flux data from." )
parser.add_argument( '-f', '--fids', action = 'append', help = "Append the fid to the bdfls flux id to convert. If absent, all are converted (e.g., -f 1)." )
parser.add_argument( "--override", action = "store_true", help = "If label exists and option present, replace flux with new one; otherwise, execute a raise." )
if( __name__ == '__main__' ) :
args = parser.parse_args( )
bdfls = bdflsModule.bdfls( template = args.bdfls )
if( args.fids is None ) :
fids = [ flux.id for flux in bdfls.f ]
else :
fids = [ int( fid ) for fid in args.fids ]
override = ""
if( args.override ) : override = " --override"
input = ''
if( args.input is not None ) : input = args.input
for bdflsFlux in bdfls.f :
if( bdflsFlux.id in fids ) :
orders = []
grid = XYs1dModule.XYs1d( ) # Only used to get a comment energy grid.
for order in bdflsFlux.EF_l :
flux = XYs1dModule.XYs1d( order )
orders.append( flux )
grid += flux
flux = [ ]
for energy, dummy in grid :
energyLegendreCoefficients = '%s' % energy
for order in orders : energyLegendreCoefficients += ' %s' % order.evaluate( energy )
flux.append( energyLegendreCoefficients )
os.system( """%s %s --values %s "%s" %s %s""" % ( addFluxExec, override, bdflsFlux.name, "; ".join( flux ), args.output, input ) )
input = ''
|
[
"[email protected]"
] | |
7aef9623504a9ae338b05888b4715693e16c82be
|
abe0dd7786f8d0731ba871425bf07e3215391b68
|
/part1/LabRab/labrab-02/01.py
|
9c61e9666cb43d3e93adef69f534f58c61ea08df
|
[] |
no_license
|
Alekceyka-1/algopro21
|
aff1cef4f1ac9a80ee6d569ecb6a78d5c9fb1f32
|
f82e4582c1017c8043f399480104b3e7af4867ca
|
refs/heads/main
| 2023-08-25T17:31:36.682000 | 2021-10-19T08:14:57 | 2021-10-19T08:14:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 823 |
py
|
'''
Пользователь вводит два целых числа из диапазона [-100, +100].
Программа выводит последовательно по возрастанию
все нечётные числа от минимального, введённого пользьзователем, до максимального (включительно).
Пользователь может ввести сначала большее число, потом меньшее - программа должна работать корректно и в этом случае.
'''
a = int(input('Введите первое число - '))
b = int(input('Введите второе число - '))
if a > b:
a, b = b, a
for num in range(a, b):
if num % 2 != 0: # mod
print(num)
|
[
"[email protected]"
] | |
132daf0f45e263b277451e8d817a095e13b03485
|
9fa71d5834dae1c8900b3444f564b11326374d36
|
/packages/tools/compras/rotinas_envio/anl_processos.py
|
7c1b7f954066f2e225850a19e8a5f3f326a2da80
|
[] |
no_license
|
JoaoPauloLeal/toolbox
|
a85e726cfeb74603cb64d73c4af64757a9a60db7
|
924c063ba81395aeddc039a51f8365c02e527963
|
refs/heads/master
| 2023-06-07T02:17:42.069985 | 2021-06-28T19:06:40 | 2021-06-28T19:06:40 | 381,128,045 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 976 |
py
|
import bth.db_connector as db
tipo_registro = 'anl_processo_correcao'
def iniciar_processo_busca(param_exec, ano):
entidade_dsk = str(input('Entidade do desktop : '))
sql = f'''SELECT i_anl_processo as id , i_anl_processo, i_processo, i_ano_proc
FROM compras.anl_processos ap
WHERE i_responsaveis_atos IS NULL
AND i_entidades = {entidade_dsk}
ORDER BY i_ano_proc, i_processo, i_anl_processo'''
# x = db.consulta_sql(sql, index_col='i_anl_processo')
# print(x)
for x in db.consulta_sql(sql, index_col='id').to_dict('records'):
print(f"Anulação {x['i_anl_processo']} do processo {x['i_processo']}/{x['i_ano_proc']}")
correcao = str(input('Realizar a correção automática ? '))
if correcao in 'sSyYSIMsimYESyes1':
query = db.get_consulta(param_exec, f'{tipo_registro}.sql')
db.execute_sql(query)
elif correcao in 'nNnaoNAOnãoNÃO0':
return 'x'
|
[
"[email protected]"
] | |
41037a8a5daa9ae01366e348de9873ba9e6d665a
|
39b35326534d6efa8a60344ef59eac3d8cea562f
|
/formpj/form/views.py
|
927372f3f8aa02c6898a0501e4b2e54334d1208c
|
[] |
no_license
|
Hyo-gyeong/Django_review
|
8635e8311111cab56066c6b87429c7f57c5e42c3
|
8b59d717c0c8c4404230c8eaa42e6074cacdd712
|
refs/heads/master
| 2021-01-03T08:32:06.706689 | 2020-08-31T04:55:59 | 2020-08-31T04:55:59 | 240,000,924 | 0 | 0 | null | 2020-08-17T19:21:30 | 2020-02-12T11:53:19 |
Python
|
UTF-8
|
Python
| false | false | 3,486 |
py
|
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from django.core.paginator import Paginator
from .models import Blog
from .form import BlogPost
def index(request):
blog = Blog.objects.all()
#페이지 3개로 자르기
paginator = Paginator(blog, 3)
#request된 페이지 변수에 담음
page = request.GET.get('page')
posts = paginator.get_page(page)
return render(request, 'index.html', {'blogs':blog, 'posts':posts})
def detail(request, detail_id):
blog_detail = get_object_or_404(Blog, pk = detail_id)
return render(request, 'detail.html', {'detail':blog_detail})
# def new(request):
# return render(request, 'new.html')
# def create(request):
# blog = Blog()
# blog.title = request.POST['title']
# blog.body = request.POST['body']
# blog.photo = request.FILES['photo']
# blog.pub_date = timezone.datetime.now()
# blog.save()
# return redirect('/')
def new(request):
#1.입력된 내용을 처리하는 기능:POST
if request.method == 'POST':
form = BlogPost(request.POST, request.FILES) #POST로써 들어온 내용을 form에 담아줌
if form.is_valid():#잘 입력되어는지 확인
blogpost = form.save(commit = False)#블로그 객체를 가져오되 아직은 저장하지 말아라
#blogpost는 Blog객체가 됨
blogpost.pub_date = timezone.datetime.now()
blogpost.save()
return redirect('/detail/' + str(blogpost.id))
#2.빈페이지를 띄워주는 기능:GET
else:
form = BlogPost()
return render(request, 'new.html', {'form':form})
#처음 new.html에 들어갔을 때 빈 입력공간을 띄우기 : GET : form이라는것을 갖다줘야하니까 if
#이용자가 뭘 입력하면 그 입력값들을 처리하기 : POST : 데이터를 처리해줘 else
#정의한 모든 모델을 입력받고싶지 않을수 있음, 자동으로 입력되게 하고싶은것도 있음(예, 날짜)
#form.save(commit = False)#일단 저장하지 말고 form객체 말고 model객체에 접근, commit = False라는 인자때문에 model객체가 저장되지 않고 반환됨
#model객체 안의 date변수에 접근, 수정
#model객체 저장
# def update(request, blog_id):
# forms = get_object_or_404(Blog, pk=blog_id)
# if request.method == 'POST':
# forms.title = request.POST['title'] #name=title인 애한테 담긴 내용 저장
# forms.body = request.POST['body'] #name=body인 애한테 담긴내용 저장
# forms.save()
# return redirect('/blog/'+str(blog_id))
# else: #수정사항을 입력하려고 페이지에 접속하면
# return render(request, 'new.html', {'forms':forms})
def updateform(request, blog_id):
blog = get_object_or_404(Blog, pk = blog_id)
if request.method == 'POST':#이렇게 해야 기존 내용을 불러올 수 있어
form = BlogPost(request.POST, request.FILES, instance = blog)
if form.is_valid():
post = form.save(commit = False)
post.pub_date = timezone.now()#수정한 날짜로 저장
post.save()
return redirect('/detail/'+str(blog.id))
else:#"
form = BlogPost(instance = blog)#"
return render(request, 'new.html', {'form':form})
|
[
"[email protected]"
] | |
6329aed37c3a9bf860eb343334aa54620697c20c
|
caa457f7cfa221ac70eb1c67753d6d1aacc33ede
|
/is_multiple.py
|
ebec9eeae996a440cf9b417434ade23a1cb3d3c2
|
[] |
no_license
|
ibnahmadCoded/how_to_think_like_a_computer_scientist_Chapter_4
|
0b609aedb7581ef5e825b7e8fe5cb5bcf96d522a
|
4de3ea919432dc92a604e7ed2c0ace368c57328c
|
refs/heads/master
| 2021-05-21T05:25:54.616256 | 2020-04-02T21:16:21 | 2020-04-02T21:16:21 | 252,565,473 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 210 |
py
|
def is_multiple(m, n):
"""Checks if n is a multiple of m"""
if is_factor(n, m):
return True
return False
def is_factor(f, n):
"""checks if f is a factor of n"""
return (n % f) == 0
|
[
"[email protected]"
] | |
b1e911efb08abf5dc32d96522d6c397ec0742951
|
06e0c89781ae9c07a55090c43d8609e9dfefbb6f
|
/School_13/School_13/wsgi.py
|
efd7f899673f2e8ee018251af6f0faed3d133fcc
|
[] |
no_license
|
mfarzamalam/django
|
d6d4302910301ae3e135a95a9982f3bd01218260
|
935a60d3ac874b7adb4287b4c2d172b89c6551b9
|
refs/heads/master
| 2023-04-10T21:06:11.601436 | 2021-04-27T21:31:47 | 2021-04-27T21:31:47 | 345,129,992 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 395 |
py
|
"""
WSGI config for School_13 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'School_13.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
e206dfba614433ba86c5a71556ab64ab0bdb2fba
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03326/s671553368.py
|
2d00464fe05662acb9aa093915e6109294176329
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 592 |
py
|
n, m = map(int, input().split())
XYZ = []
JKL = []
Seihu = [-1, 1]
for i in range(n):
x, y, z = map(int, input().split())
XYZ.append([x,y,z])
for j in Seihu:
for k in Seihu:
for l in Seihu:
tot = x*j + y*k + z*l
XYZ[i].append(tot)
for j in Seihu:
for k in Seihu:
for l in Seihu:
JKL.append([j,k,l])
ans = 0
for i in range(8):
jkl = JKL[i]
XYZ = sorted(XYZ, key=lambda x:x[i+3], reverse=True)
score = 0
for x in range(m):
score += XYZ[x][i+3]
ans = max(ans, score)
print(ans)
|
[
"[email protected]"
] | |
340fc2ac99f6e641b00a094e9af72cbbf3ca0766
|
41a4eeaf62a36d7c57ad55393996787bb55ba6b7
|
/venv/lib/python3.7/site-packages/kubernetes/client/models/v1_object_meta.py
|
16566acc20aef3b19cd8d404cc2679eba03f6b2b
|
[] |
no_license
|
jicowan/group-operator
|
c7a20ff03584da9ace19489bc3d27b9fb22a066c
|
bac6e51aef0d9836679621e3ce7e55f4c1ead402
|
refs/heads/master
| 2021-07-14T11:45:30.062219 | 2019-09-26T15:26:52 | 2019-09-26T15:26:52 | 209,454,861 | 10 | 4 | null | 2021-07-01T17:23:07 | 2019-09-19T03:29:54 |
Python
|
UTF-8
|
Python
| false | false | 29,400 |
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ObjectMeta(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'annotations': 'dict(str, str)',
'cluster_name': 'str',
'creation_timestamp': 'datetime',
'deletion_grace_period_seconds': 'int',
'deletion_timestamp': 'datetime',
'finalizers': 'list[str]',
'generate_name': 'str',
'generation': 'int',
'initializers': 'V1Initializers',
'labels': 'dict(str, str)',
'managed_fields': 'list[V1ManagedFieldsEntry]',
'name': 'str',
'namespace': 'str',
'owner_references': 'list[V1OwnerReference]',
'resource_version': 'str',
'self_link': 'str',
'uid': 'str'
}
attribute_map = {
'annotations': 'annotations',
'cluster_name': 'clusterName',
'creation_timestamp': 'creationTimestamp',
'deletion_grace_period_seconds': 'deletionGracePeriodSeconds',
'deletion_timestamp': 'deletionTimestamp',
'finalizers': 'finalizers',
'generate_name': 'generateName',
'generation': 'generation',
'initializers': 'initializers',
'labels': 'labels',
'managed_fields': 'managedFields',
'name': 'name',
'namespace': 'namespace',
'owner_references': 'ownerReferences',
'resource_version': 'resourceVersion',
'self_link': 'selfLink',
'uid': 'uid'
}
def __init__(self, annotations=None, cluster_name=None, creation_timestamp=None, deletion_grace_period_seconds=None, deletion_timestamp=None, finalizers=None, generate_name=None, generation=None, initializers=None, labels=None, managed_fields=None, name=None, namespace=None, owner_references=None, resource_version=None, self_link=None, uid=None):
"""
V1ObjectMeta - a model defined in Swagger
"""
self._annotations = None
self._cluster_name = None
self._creation_timestamp = None
self._deletion_grace_period_seconds = None
self._deletion_timestamp = None
self._finalizers = None
self._generate_name = None
self._generation = None
self._initializers = None
self._labels = None
self._managed_fields = None
self._name = None
self._namespace = None
self._owner_references = None
self._resource_version = None
self._self_link = None
self._uid = None
self.discriminator = None
if annotations is not None:
self.annotations = annotations
if cluster_name is not None:
self.cluster_name = cluster_name
if creation_timestamp is not None:
self.creation_timestamp = creation_timestamp
if deletion_grace_period_seconds is not None:
self.deletion_grace_period_seconds = deletion_grace_period_seconds
if deletion_timestamp is not None:
self.deletion_timestamp = deletion_timestamp
if finalizers is not None:
self.finalizers = finalizers
if generate_name is not None:
self.generate_name = generate_name
if generation is not None:
self.generation = generation
if initializers is not None:
self.initializers = initializers
if labels is not None:
self.labels = labels
if managed_fields is not None:
self.managed_fields = managed_fields
if name is not None:
self.name = name
if namespace is not None:
self.namespace = namespace
if owner_references is not None:
self.owner_references = owner_references
if resource_version is not None:
self.resource_version = resource_version
if self_link is not None:
self.self_link = self_link
if uid is not None:
self.uid = uid
@property
def annotations(self):
"""
Gets the annotations of this V1ObjectMeta.
Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations
:return: The annotations of this V1ObjectMeta.
:rtype: dict(str, str)
"""
return self._annotations
@annotations.setter
def annotations(self, annotations):
"""
Sets the annotations of this V1ObjectMeta.
Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations
:param annotations: The annotations of this V1ObjectMeta.
:type: dict(str, str)
"""
self._annotations = annotations
@property
def cluster_name(self):
"""
Gets the cluster_name of this V1ObjectMeta.
The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.
:return: The cluster_name of this V1ObjectMeta.
:rtype: str
"""
return self._cluster_name
@cluster_name.setter
def cluster_name(self, cluster_name):
"""
Sets the cluster_name of this V1ObjectMeta.
The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.
:param cluster_name: The cluster_name of this V1ObjectMeta.
:type: str
"""
self._cluster_name = cluster_name
@property
def creation_timestamp(self):
"""
Gets the creation_timestamp of this V1ObjectMeta.
CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:return: The creation_timestamp of this V1ObjectMeta.
:rtype: datetime
"""
return self._creation_timestamp
@creation_timestamp.setter
def creation_timestamp(self, creation_timestamp):
"""
Sets the creation_timestamp of this V1ObjectMeta.
CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:param creation_timestamp: The creation_timestamp of this V1ObjectMeta.
:type: datetime
"""
self._creation_timestamp = creation_timestamp
@property
def deletion_grace_period_seconds(self):
"""
Gets the deletion_grace_period_seconds of this V1ObjectMeta.
Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.
:return: The deletion_grace_period_seconds of this V1ObjectMeta.
:rtype: int
"""
return self._deletion_grace_period_seconds
@deletion_grace_period_seconds.setter
def deletion_grace_period_seconds(self, deletion_grace_period_seconds):
"""
Sets the deletion_grace_period_seconds of this V1ObjectMeta.
Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.
:param deletion_grace_period_seconds: The deletion_grace_period_seconds of this V1ObjectMeta.
:type: int
"""
self._deletion_grace_period_seconds = deletion_grace_period_seconds
@property
def deletion_timestamp(self):
"""
Gets the deletion_timestamp of this V1ObjectMeta.
DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:return: The deletion_timestamp of this V1ObjectMeta.
:rtype: datetime
"""
return self._deletion_timestamp
@deletion_timestamp.setter
def deletion_timestamp(self, deletion_timestamp):
"""
Sets the deletion_timestamp of this V1ObjectMeta.
DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:param deletion_timestamp: The deletion_timestamp of this V1ObjectMeta.
:type: datetime
"""
self._deletion_timestamp = deletion_timestamp
@property
def finalizers(self):
"""
Gets the finalizers of this V1ObjectMeta.
Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.
:return: The finalizers of this V1ObjectMeta.
:rtype: list[str]
"""
return self._finalizers
@finalizers.setter
def finalizers(self, finalizers):
"""
Sets the finalizers of this V1ObjectMeta.
Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.
:param finalizers: The finalizers of this V1ObjectMeta.
:type: list[str]
"""
self._finalizers = finalizers
@property
def generate_name(self):
"""
Gets the generate_name of this V1ObjectMeta.
GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency
:return: The generate_name of this V1ObjectMeta.
:rtype: str
"""
return self._generate_name
@generate_name.setter
def generate_name(self, generate_name):
"""
Sets the generate_name of this V1ObjectMeta.
GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency
:param generate_name: The generate_name of this V1ObjectMeta.
:type: str
"""
self._generate_name = generate_name
@property
def generation(self):
"""
Gets the generation of this V1ObjectMeta.
A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.
:return: The generation of this V1ObjectMeta.
:rtype: int
"""
return self._generation
@generation.setter
def generation(self, generation):
"""
Sets the generation of this V1ObjectMeta.
A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.
:param generation: The generation of this V1ObjectMeta.
:type: int
"""
self._generation = generation
@property
def initializers(self):
"""
Gets the initializers of this V1ObjectMeta.
An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects. When an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user. DEPRECATED - initializers are an alpha field and will be removed in v1.15.
:return: The initializers of this V1ObjectMeta.
:rtype: V1Initializers
"""
return self._initializers
@initializers.setter
def initializers(self, initializers):
"""
Sets the initializers of this V1ObjectMeta.
An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects. When an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user. DEPRECATED - initializers are an alpha field and will be removed in v1.15.
:param initializers: The initializers of this V1ObjectMeta.
:type: V1Initializers
"""
self._initializers = initializers
@property
def labels(self):
"""
Gets the labels of this V1ObjectMeta.
Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels
:return: The labels of this V1ObjectMeta.
:rtype: dict(str, str)
"""
return self._labels
@labels.setter
def labels(self, labels):
"""
Sets the labels of this V1ObjectMeta.
Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels
:param labels: The labels of this V1ObjectMeta.
:type: dict(str, str)
"""
self._labels = labels
@property
def managed_fields(self):
"""
Gets the managed_fields of this V1ObjectMeta.
ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object. This field is alpha and can be changed or removed without notice.
:return: The managed_fields of this V1ObjectMeta.
:rtype: list[V1ManagedFieldsEntry]
"""
return self._managed_fields
@managed_fields.setter
def managed_fields(self, managed_fields):
"""
Sets the managed_fields of this V1ObjectMeta.
ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object. This field is alpha and can be changed or removed without notice.
:param managed_fields: The managed_fields of this V1ObjectMeta.
:type: list[V1ManagedFieldsEntry]
"""
self._managed_fields = managed_fields
@property
def name(self):
"""
Gets the name of this V1ObjectMeta.
Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names
:return: The name of this V1ObjectMeta.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1ObjectMeta.
Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names
:param name: The name of this V1ObjectMeta.
:type: str
"""
self._name = name
@property
def namespace(self):
"""
Gets the namespace of this V1ObjectMeta.
Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces
:return: The namespace of this V1ObjectMeta.
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""
Sets the namespace of this V1ObjectMeta.
Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces
:param namespace: The namespace of this V1ObjectMeta.
:type: str
"""
self._namespace = namespace
@property
def owner_references(self):
"""
Gets the owner_references of this V1ObjectMeta.
List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.
:return: The owner_references of this V1ObjectMeta.
:rtype: list[V1OwnerReference]
"""
return self._owner_references
@owner_references.setter
def owner_references(self, owner_references):
"""
Sets the owner_references of this V1ObjectMeta.
List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.
:param owner_references: The owner_references of this V1ObjectMeta.
:type: list[V1OwnerReference]
"""
self._owner_references = owner_references
@property
def resource_version(self):
"""
Gets the resource_version of this V1ObjectMeta.
An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
:return: The resource_version of this V1ObjectMeta.
:rtype: str
"""
return self._resource_version
@resource_version.setter
def resource_version(self, resource_version):
"""
Sets the resource_version of this V1ObjectMeta.
An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
:param resource_version: The resource_version of this V1ObjectMeta.
:type: str
"""
self._resource_version = resource_version
@property
def self_link(self):
"""
Gets the self_link of this V1ObjectMeta.
SelfLink is a URL representing this object. Populated by the system. Read-only.
:return: The self_link of this V1ObjectMeta.
:rtype: str
"""
return self._self_link
@self_link.setter
def self_link(self, self_link):
"""
Sets the self_link of this V1ObjectMeta.
SelfLink is a URL representing this object. Populated by the system. Read-only.
:param self_link: The self_link of this V1ObjectMeta.
:type: str
"""
self._self_link = self_link
@property
def uid(self):
"""
Gets the uid of this V1ObjectMeta.
UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids
:return: The uid of this V1ObjectMeta.
:rtype: str
"""
return self._uid
@uid.setter
def uid(self, uid):
"""
Sets the uid of this V1ObjectMeta.
UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids
:param uid: The uid of this V1ObjectMeta.
:type: str
"""
self._uid = uid
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ObjectMeta):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"[email protected]"
] | |
6b9f29c30d1fd0ae9fe3d68b9873beeff2a97383
|
b26ea6b32e5214c5ae32707ba00f5c441ba32da3
|
/Biblioteca/settings-prod.py
|
716224130602637a23fc30f9ae3f9fc037c2458b
|
[] |
no_license
|
Karlosnat/https-github.com-rctorr-Biblioteca
|
7818486b958998e7515bba140222c8d8da884248
|
828408b7ac4b06815e9e6137854345a74eb0d022
|
refs/heads/master
| 2020-06-18T08:45:57.928152 | 2019-07-04T02:58:08 | 2019-07-04T02:58:08 | 196,239,472 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,281 |
py
|
"""
Django settings for Biblioteca project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd-1qj@c5%d%a+ib=!krcwjr%_x4a0t@rz062pd9=fiqtw^tnrj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["rctorr.pythonanywhere.com"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'catalogo',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Biblioteca.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Biblioteca.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'es-MX'
TIME_ZONE = 'America/Mexico_City'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = "/home/rctorr/Biblioteca/static/"
# Se define la URL para login
LOGIN_URL = "/login/"
|
[
"[email protected]"
] | |
567f9beb3f0603e65a69f91b2c15d5f1de5f34b4
|
ceeaf1a4c22e82b344fff6f8aaf2f3d4f4ab4521
|
/suppliers/models.py
|
82ee5b34c5d38601f3583e9bc9fe3f22c43686a2
|
[
"MIT"
] |
permissive
|
CzechInvest/ciis
|
a14dc23c87fda473be0b6aaeee9e12251c4ce041
|
c6102598f564a717472e5e31e7eb894bba2c8104
|
refs/heads/master
| 2023-03-22T08:48:35.168956 | 2020-05-15T13:27:31 | 2020-05-15T13:27:31 | 110,870,857 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,046 |
py
|
from django.db import models
from contacts.models import ContactPerson as MyContactPerson
from contacts.models import Organisation as MyOrganisation
from django.utils.translation import ugettext_lazy as _
from django.contrib.gis.db import models as gis_models
# Create your models here.
class Supplier(models.Model):
id = models.IntegerField(default=-1, primary_key=True)
name = models.TextField(
help_text=_("Name"), blank=True)
address = models.TextField(
help_text=_("Adresa"),
blank=True)
ico = models.TextField(
help_text=_("IČO"),
blank=True)
url = models.URLField(
help_text=_("URL"),
blank=True)
core_business = models.TextField(
help_text=_("Core business"),
blank=True)
geom = gis_models.PointField(
help_text=_("Bod"),
blank=True)
def __str__(self):
return self.name
class Meta():
managed = False
db_table = 'domino\".\"suppliers'
|
[
"[email protected]"
] | |
78c60b999513905b844a3b936e14c88757bcbcbc
|
74e13532ba442e3deaa0f8ba41a9287fdf0517b0
|
/test_problems/u_grid_encoding.py
|
e23f56b8b4b5a44477ceaa55f3f16647b3f2b71d
|
[
"MIT"
] |
permissive
|
umautobots/osp
|
09afeddbb917381c8a468b08a3e36e5114937520
|
d055f1c846f907445186b9dea7da2d4dca4790a6
|
refs/heads/master
| 2022-12-03T20:40:51.036215 | 2020-08-17T18:03:27 | 2020-08-17T18:03:27 | 288,250,879 | 6 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,950 |
py
|
import numpy as np
EPS = 1e-3
def xy2mrx_v0(a_xy, b_pv, grid):
"""
Encode xy to activations on a grid
Activation symmetric so that flipping xy about
b's v yields same values
Clip activations that would be outside grid to map to last coordinate
:param a_xy: n, 2 | (x_x, x_y)
:param b_pv: n, 4 | (x_x, x_y, v_x, v_y)
:param grid: (l, m) | symmetric (about 0) grid in 1D
l | upper bound for coordinates
m | number of grid points
grid points are regularly spaced in [0, l]
:return: n, m | encoding for each (of n) agent
"""
n = a_xy.shape[0]
m = int(grid[-1])
mrx = np.zeros((n, m), dtype=np.float)
pos_dif = a_xy - b_pv[:, :2] # n, 2
pos_dif_dot_v = np.einsum('ij, ij -> i', pos_dif, b_pv[:, 2:]) # n
v_normsq = (b_pv[:, 2:] ** 2).sum(axis=1)
v_normsq[v_normsq < EPS] = EPS
perp_ds = (pos_dif ** 2).sum(axis=1) - (pos_dif_dot_v ** 2) / v_normsq
perp_ds = np.sqrt(perp_ds)
np.clip(perp_ds, a_min=None, a_max=grid[0]-EPS, out=perp_ds)
d = grid[0] / (grid[1] - 1)
for i in range(n):
a, r = np.divmod(perp_ds[i], d)
th = 1 - r/d
inds = np.array([a, a+1], dtype=np.int)
mrx[i, inds] = np.array([th, 1-th])
return mrx
def xy2mrx_v1(a_xy, b_pv, grid):
"""
Encode xy to activations on a grid
Activation symmetric so that flipping xy about
b's v yields same values
Clip activations that would be outside grid to map to last coordinate
:param a_xy: n, 2 | (x_x, x_y)
:param b_pv: n, 4 | (x_x, x_y, v_x, v_y)
:param grid: (l, m) | symmetric (about 0) grid in 1D
l | upper bound for coordinates
m | number of grid points
grid points are regularly spaced in [0, l]
:return: n, m | encoding for each (of n) agent
"""
n = a_xy.shape[0]
m = int(grid[-1])
mrx = np.zeros((n, m), dtype=np.float)
pos_dif = a_xy - b_pv[:, :2] # n, 2
pos_dif_dot_v = np.einsum('ij, ij -> i', pos_dif, b_pv[:, 2:]) # n
v_normsq = (b_pv[:, 2:] ** 2).sum(axis=1)
v_normsq[v_normsq < EPS] = EPS
perp_ds = (pos_dif ** 2).sum(axis=1) - (pos_dif_dot_v ** 2) / v_normsq
perp_ds = np.sqrt(perp_ds)
np.clip(perp_ds, a_min=None, a_max=grid[0]-EPS, out=perp_ds)
d = grid[0] / (grid[1] - 1)
a, r = np.divmod(perp_ds, d)
a = a.astype(np.int)
th = 1 - r/d
row_inds = np.arange(n)
mrx[row_inds, a] = th
mrx[row_inds, a + 1] = 1 - th
return mrx
def evaluate_v_v0(a_pv, b_pv, grid, u, q, b_inds):
"""
Evaluate velocity as
\hat{v} = vq + v(1-q)[mrx_{b_ind}(x).dot(u)]
:param a_pv: n, 4
:param b_pv: k, 4
:param grid: (l, m) | 2-tuple
:param u: m, | weights to apply to grid encodings
:param q: n, | q=0 for using grid encoding, q=1 for not
:param b_inds: n, | index of b \in [0, k-1]
(undefined for i st. q[i]=1)
:return: n, 2 | \hat{v} for each (of n) agent
"""
n = a_pv.shape[0]
v_hat = a_pv[:, 2:].copy()
d = grid[0] / (grid[1] - 1)
for i in range(n):
if q[i]:
continue
b_pv_i = b_pv[b_inds[i], :]
pos_dif = a_pv[i, :2] - b_pv_i[:2] # 2,
pos_dif_dot_v = pos_dif.dot(b_pv_i[2:])
v_normsq = (b_pv_i[2:] ** 2).sum(axis=-1)
v_normsq = EPS if v_normsq < EPS else v_normsq
perp_ds = (pos_dif ** 2).sum(axis=-1) - (pos_dif_dot_v ** 2) / v_normsq
perp_ds = np.sqrt(perp_ds)
perp_ds = np.clip(perp_ds, a_min=None, a_max=grid[0] - EPS)
a, r = np.divmod(perp_ds, d)
a = a.astype(np.int)
th = 1 - r / d
scaling = u[a] * th + u[a + 1] * (1 - th)
v_hat[i, :] *= scaling
return v_hat
def evaluate_v_v1(a_pv, b_pv, grid, u, q, b_inds):
"""
Evaluate velocity as
\hat{v} = vq + v(1-q)[mrx_{b_ind}(x).dot(u)]
:param a_pv: n, 4
:param b_pv: k, 4
:param grid: (l, m) | 2-tuple
:param u: m, | weights to apply to grid encodings
:param q: n, | q=0 for using grid encoding, q=1 for not
:param b_inds: n, | index of b \in [0, k-1]
:return: n, 2 | \hat{v} for each (of n) agent
"""
n = a_pv.shape[0]
v_hat = a_pv[:, 2:].copy()
d = grid[0] / (grid[1] - 1)
# n->k pe dist
pos_dif = a_pv[:, np.newaxis, :2] - b_pv[np.newaxis, :, :2]
# n, k
pos_dif_dot_v = np.einsum('ijk, jk -> ij', pos_dif, b_pv[:, 2:])
# k
v_normsq = (b_pv[:, 2:] ** 2).sum(axis=1)
v_normsq[v_normsq < EPS] = EPS
# n, k
pe_dist_sq = (pos_dif ** 2).sum(axis=-1) - (pos_dif_dot_v ** 2) / v_normsq
pe_dist = np.sqrt(pe_dist_sq)
np.clip(pe_dist, a_min=None, a_max=grid[0] - EPS, out=pe_dist)
# subset n, st. q=0: n_q0,
q0_mask = q == 0
# n_q0,
a, r = np.divmod(pe_dist[q0_mask, b_inds[q0_mask]], d)
a = a.astype(np.int)
th = 1 - r / d
scaling = u[a] * th + u[a + 1] * (1 - th)
v_hat[q0_mask, :] = (v_hat[q0_mask, :].T * scaling).T
return v_hat
def evaluate_v_particles_v0(a_pv, b_pv, grid, u, q, b_inds):
"""
Evaluate velocity as
\hat{v} = vq + v(1-q)[mrx_{b_ind}(x).dot(u)]
:param a_pv: n_p, n, 4
:param b_pv: k, 4
:param grid: (l, m) | 2-tuple
:param u: m, | weights to apply to grid encodings
:param q: n_p, n | q=0 for using grid encoding, q=1 for not
:param b_inds: n_p, n | index of b \in [0, k-1]
(undefined for i st. q[i]=1)
:return: n_p, n, 2 | \hat{v} for each (of n) agent
"""
n_p, n = a_pv.shape[:2]
v_hat = np.empty((n_p, n, 2))
for i in range(n_p):
v_hat[i, ...] = evaluate_v_v1(a_pv[i], b_pv, grid, u, q[i], b_inds[i])
return v_hat
def evaluate_v_particles_v1(a_pv, b_pv, grid, u, q, b_inds):
"""
Evaluate velocity as
\hat{v} = vq + v(1-q)[mrx_{b_ind}(x).dot(u)]
:param a_pv: n_p, n, 4
:param b_pv: k, 4
:param grid: (l, m) | 2-tuple
:param u: m, | weights to apply to grid encodings
:param q: n_p, n | q=0 for using grid encoding, q=1 for not
:param b_inds: n_p, n | index of b \in [0, k-1]
(undefined for i st. q[i]=1)
:return: n_p, n, 2 | \hat{v} for each (of n) agent
"""
n_p, n = a_pv.shape[:2]
v_hat = evaluate_v_v1(
a_pv.reshape(-1, 4), b_pv, grid, u, q.reshape(-1), b_inds.reshape(-1)).reshape(n_p, n, 2)
return v_hat
def main_evaluate_v_particles():
from timeit import timeit
seed = np.random.randint(0, 1000)
# seed = 0
np.random.seed(seed)
print('seed: {}'.format(seed))
n_p = 100
n = 20
k = 3
a_pv = np.random.randn(n_p, n, 4)
b_pv = np.random.randn(k, 4) * 2
grid = np.array([5., 6]) # [0, 1, ..., 5]
u = np.arange(grid[1]) / grid[1]
q = np.random.randn(n_p, n) > 0
b_inds = np.random.choice(k, n_p*n).reshape(n_p, n)
print('---------------')
x_true = evaluate_v_particles_v0(a_pv, b_pv, grid, u, q, b_inds)
x_hat = evaluate_v_particles_v1(a_pv, b_pv, grid, u, q, b_inds)
print('diff: {:0.4f}'.format(np.linalg.norm(x_true - x_hat)))
n_tries = 2
args = (a_pv, b_pv, grid, u, q, b_inds)
print(timeit('f(*args)', number=n_tries, globals=dict(f=evaluate_v_particles_v0, args=args))/n_tries)
print(timeit('f(*args)', number=n_tries, globals=dict(f=evaluate_v_particles_v1, args=args))/n_tries)
def main_evaluate_v():
from timeit import timeit
seed = np.random.randint(0, 1000)
# seed = 0
np.random.seed(seed)
print('seed: {}'.format(seed))
n = 200
k = 3
a_pv = np.random.randn(n, 4)
b_pv = np.random.randn(k, 4) * 2
grid = np.array([5., 6]) # [0, 1, ..., 5]
u = np.arange(grid[1]) / grid[1]
q = np.random.randn(n) > 0
b_inds = np.random.choice(k, n)
print('---------------')
x_true = evaluate_v_v0(a_pv, b_pv, grid, u, q, b_inds)
x_hat = evaluate_v_v1(a_pv, b_pv, grid, u, q, b_inds)
print('diff: {:0.4f}'.format(np.linalg.norm(x_true - x_hat)))
n_tries = 2
args = (a_pv, b_pv, grid, u, q, b_inds)
print(timeit('f(*args)', number=n_tries, globals=dict(f=evaluate_v_v0, args=args))/n_tries)
print(timeit('f(*args)', number=n_tries, globals=dict(f=evaluate_v_v1, args=args))/n_tries)
def main():
from timeit import timeit
seed = np.random.randint(0, 1000)
seed = 0
np.random.seed(seed)
print('seed: {}'.format(seed))
n = 300
a_xy = np.random.randn(n, 2) * 5
b_pv = np.random.randn(n, 4) * 2
grid = np.array([5., 6]) # [0, 1, ..., 5]
print('---------------')
x_true = xy2mrx_v0(a_xy, b_pv, grid)
x_hat = xy2mrx_v1(a_xy, b_pv, grid)
print('diff: {:0.4f}'.format(np.linalg.norm(x_true - x_hat)))
n_tries = 2
print(timeit('f(a, b, c)', number=n_tries, globals=dict(f=xy2mrx_v0, a=a_xy, b=b_pv, c=grid))/n_tries)
print(timeit('f(a, b, c)', number=n_tries, globals=dict(f=xy2mrx_v1, a=a_xy, b=b_pv, c=grid))/n_tries)
if __name__ == '__main__':
# main()
main_evaluate_v()
# main_evaluate_v_particles()
|
[
"[email protected]"
] | |
4fd7678e74d8b33f6305ad4d406a856ac171f8e8
|
72af42076bac692f9a42e0a914913e031738cc55
|
/01, 특강_210705_0706/02, source/CookData(2021.01.15)/Ex05-02.py
|
d57ee7fa6f09cf4eacaab57d083ddf0f1fbb88cf
|
[] |
no_license
|
goareum93/Algorithm
|
f0ab0ee7926f89802d851c2a80f98cba08116f6c
|
ec68f2526b1ea2904891b929a7bbc74139a6402e
|
refs/heads/master
| 2023-07-01T07:17:16.987779 | 2021-08-05T14:52:51 | 2021-08-05T14:52:51 | 376,908,264 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,050 |
py
|
## 클래스와 함수 선언 부분 ##
class Node2() :
def __init__ (self) :
self.plink = None # 앞쪽 링크
self.data = None
self.nlink = None # 뒤쪽 링크
def printNodes(start):
current = start
if current.nlink == None :
return
print("정방향 --> ", end=' ')
print(current.data, end=' ')
while current.nlink != None:
current = current.nlink
print(current.data, end=' ')
print()
print("역방향 --> ", end=' ')
print(current.data, end=' ')
while current.plink != None:
current = current.plink
print(current.data, end=' ')
## 전역 변수 선언 부분 ##
memory = []
head, current, pre = None, None, None
dataArray = ["다현", "정연", "쯔위", "사나", "지효"]
## 메인 코드 부분 ##
if __name__ == "__main__" :
node = Node2() # 첫 번째 노드
node.data = dataArray[0]
head = node
memory.append(node)
for data in dataArray[1:] : # 두 번째 이후 노드
pre = node
node = Node2()
node.data = data
pre.nlink = node
node.plink = pre
memory.append(node)
printNodes(head)
|
[
"[email protected]"
] | |
7f67e437dfbb9b5a80dde706cb6652c1645f976b
|
6dcaec1ea2c227eb84bfa02219e5a4ba5553c47c
|
/loja_template/apps/galerias/urls.py
|
6711636ac6a68ddfbf2c0882ff89d85a7928203d
|
[] |
no_license
|
silviolucenajunior/store-template
|
8f5319c178d82142e3a4e179aca5fc12a6622a3b
|
2dd7ffe8dbd894258225fef2b8b1e4b982a36260
|
refs/heads/master
| 2020-12-30T09:26:11.065198 | 2013-08-16T18:53:51 | 2013-08-16T18:53:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,499 |
py
|
#-*- coding: utf-8 -*-
from django.conf.urls.defaults import *
#plugin tiny_mce
urlpatterns = patterns("galerias.views",
(r'^cadastrar-foto-plugin/$', 'cadastrar_foto_plugin_view'),
(r'^cadastrar-foto-plugin-ajax/$', 'cadastrar_foto_plugin_ajax'),
)
urlpatterns += patterns("galerias.views",
(r'^adicionar_foto_galeria/(?P<id_foto>.*)/(?P<id_galeria>.*)/$', 'adicionar_foto_galeria_ajax'),
(r'^cadastrar_galeria/$', 'cadastrar_galeria'),
(r'^cropar_foto/(?P<foto>.*)/$', 'manipular_foto_view'),
(r'^deletar_foto/(?P<id_foto>.*)/$', 'deletar_foto_ajax'),
(r'^fotos_json/$', 'fotos_json'),
(r'^gerenciar/$', 'gerenciador_view'),
(r'^listar_galerias/$', 'listar_galerias_view'),
(r'^manipular_foto/redimensionar/(?P<foto>.*)/$', 'redimensionar_foto_view'),
(r'^manipular_foto/(?P<foto>.*)/$', 'manipular_foto_view'),
(r'^visualizar_galeria/categoria/(?P<categoria_slug>.*)/(?P<autor_username>.*)/$', 'visualizar_galerias_por_categoria_autor_view'),
(r'^visualizar_galeria/categoria/(?P<categoria_slug>.*)/$', 'visualizar_galerias_por_categoria_view'),
(r'^visualizar_galeria/(?P<galeria_id>.*)/$', 'visualizar_galeria_view'),
(r'^visualizar_galeria/autor/(?P<autor_username>.*)/$', 'visualizar_galerias_por_autor_view'),
(r'^previa_galeria/(?P<galeria_id>.*)/$', 'visualizar_galeria_previa_view'),
(r'^setar_ordem_foto/(?P<id_foto>.*)/(?P<nova_ordem>.*)/$', 'setar_ordem_foto_ajax'),
(r'^upload_progress/$', 'upload_progress')
)
|
[
"[email protected]"
] | |
ef79405a7d678f52d4a84fa7839c19a313c6db8b
|
1a573f905f074a2135e82a91acbc9ae1d417d50b
|
/python/netograph/__init__.py
|
0465509828e0e73f1b5758d11578ad7c2dbf3a3b
|
[] |
no_license
|
mhils/netograph-api
|
f72a7d44773f6902f48f82d6a5cd166799cd3478
|
57425919637d8c7237b160561f181b06ab187bb5
|
refs/heads/master
| 2021-07-05T12:48:01.319568 | 2018-09-18T08:45:44 | 2018-09-18T08:45:44 | 152,243,259 | 0 | 0 | null | 2018-10-09T11:58:51 | 2018-10-09T11:58:50 | null |
UTF-8
|
Python
| false | false | 888 |
py
|
import grpc
from netograph.dsetapi import dset_pb2_grpc
from netograph.userapi import user_pb2_grpc
def connect_dset(token):
channel = grpc.secure_channel(
'grpc.netograph.io:443',
grpc.composite_channel_credentials(
grpc.ssl_channel_credentials(),
grpc.access_token_call_credentials(token),
),
options=[
('grpc.ssl_target_name_override', "grpc.netograph.io"),
]
)
return dset_pb2_grpc.DsetStub(channel)
def connect_user(token):
channel = grpc.secure_channel(
'grpc.netograph.io:443',
grpc.composite_channel_credentials(
grpc.ssl_channel_credentials(),
grpc.access_token_call_credentials(token),
),
options=[
('grpc.ssl_target_name_override', "grpc.netograph.io"),
]
)
return user_pb2_grpc.UserStub(channel)
|
[
"[email protected]"
] | |
f03c704be6facd8abf5ad3c327fbff2e43f889a9
|
8a25ada37271acd5ea96d4a4e4e57f81bec221ac
|
/usr/share/pyshared/ajenti/plugins/dashboard/updater.py
|
7c6e008bcb22774ad8b2cf6f8f28d50f2db594fa
|
[
"Apache-2.0"
] |
permissive
|
lupyuen/RaspberryPiImage
|
65cebead6a480c772ed7f0c4d0d4e08572860f08
|
664e8a74b4628d710feab5582ef59b344b9ffddd
|
refs/heads/master
| 2021-01-20T02:12:27.897902 | 2016-11-17T17:32:30 | 2016-11-17T17:32:30 | 42,438,362 | 7 | 8 | null | null | null | null |
UTF-8
|
Python
| false | false | 908 |
py
|
import gevent
from ajenti.api import *
from ajenti.plugins.packages.api import PackageManager, PackageInfo
@plugin
class AjentiUpdater (BasePlugin):
AJENTI_PACKAGE_NAME = 'ajenti'
def run_update(self, packages):
packages = packages or [self.AJENTI_PACKAGE_NAME]
actions = []
mgr = PackageManager.get()
for name in packages:
p = PackageInfo()
p.name, p.action = name, 'i'
actions.append(p)
mgr.do(actions)
def check_for_updates(self, callback):
try:
mgr = PackageManager.get()
except NoImplementationsError:
return
def worker():
mgr.refresh()
r = []
for p in mgr.upgradeable:
if p.name.startswith(self.AJENTI_PACKAGE_NAME):
r.append(p.name)
callback(r)
gevent.spawn(worker)
|
[
"[email protected]"
] | |
7bbd018f9a34d91619ca2f6cf87822698dc58c22
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/QdEAMeXNJAivcTMiT_1.py
|
9bdb30c42b07d4edb70a8ec3942a35405ac86063
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 287 |
py
|
def boxes(weights):
arr = []
rem = 10
t = []
for x in weights:
if rem - x >= 0:
rem -= x
t.append(x)
else :
arr.append(t)
t = []
rem = 10
rem -= x
t.append(x)
if len(t) > 0:
arr.append(t)
print(arr)
return len(arr)
|
[
"[email protected]"
] | |
c3a8d0bc33ebcec0952e8c6bbd6c4036f56842d8
|
26d6c34df00a229dc85ad7326de6cb5672be7acc
|
/msgraph-cli-extensions/beta/education_beta/azext_education_beta/vendored_sdks/education/operations/_education_users_classes_operations.py
|
72c60d3f3410415743d9385b687fc2adff41a49a
|
[
"MIT"
] |
permissive
|
BrianTJackett/msgraph-cli
|
87f92471f68f85e44872939d876b9ff5f0ae6b2c
|
78a4b1c73a23b85c070fed2fbca93758733f620e
|
refs/heads/main
| 2023-06-23T21:31:53.306655 | 2021-07-09T07:58:56 | 2021-07-09T07:58:56 | 386,993,555 | 0 | 0 |
NOASSERTION
| 2021-07-17T16:56:05 | 2021-07-17T16:56:05 | null |
UTF-8
|
Python
| false | false | 4,304 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class EducationUsersClassesOperations(object):
"""EducationUsersClassesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~education.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def delta(
self,
education_user_id, # type: str
**kwargs # type: Any
):
# type: (...) -> List["models.MicrosoftGraphEducationClass"]
"""Invoke function delta.
Invoke function delta.
:param education_user_id: key: id of educationUser.
:type education_user_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of MicrosoftGraphEducationClass, or the result of cls(response)
:rtype: list[~education.models.MicrosoftGraphEducationClass]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["models.MicrosoftGraphEducationClass"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delta.metadata['url'] # type: ignore
path_format_arguments = {
'educationUser-id': self._serialize.url("education_user_id", education_user_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[MicrosoftGraphEducationClass]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delta.metadata = {'url': '/education/users/{educationUser-id}/classes/microsoft.graph.delta()'} # type: ignore
|
[
"[email protected]"
] | |
c83a0ea898a3f56450e1df946b05e71b733734e3
|
de7a0984af8ae2bb9706e256c79eb12af65f70f0
|
/contexts/rtscontext.py
|
e155026e0a9acbe9b2a885b98d0a9c0c73e52529
|
[
"MIT"
] |
permissive
|
grecoe/amlsdummy
|
3370eefdb42c6c6b5847d732d991c145384e0c3f
|
f83ade8f75bf972f574834eae2535cfda6e2711b
|
refs/heads/master
| 2021-08-07T15:35:26.997351 | 2021-01-09T15:34:37 | 2021-01-09T15:34:37 | 237,675,078 | 1 | 2 |
MIT
| 2020-02-28T11:13:00 | 2020-02-01T20:42:17 |
Python
|
UTF-8
|
Python
| false | false | 6,324 |
py
|
import shutil
from scripts.azure_utils import *
from contexts.basecontext import BaseContext
class RealTimeScoringContext(BaseContext):
'''
Model file and scoring script. These are constants and
probably no need to update them.
The remainder of the needed configuration comes from
the program arguments parsed in general_utils.py
'''
model_file = "model.pkl"
scoring_script_name = "./scoring.py"
scoring_script = "./paths/realtime/scoring/scoring.py"
'''
Contains the context needed to perform the tasks.
'''
def __init__(self, programArgs, userAuthorization, job_log = None):
super().__init__(programArgs, userAuthorization, job_log)
self.containerImage = None
self.computeTarget = None
self.webservice = None
self.webserviceapi = {}
def generateModel(self):
'''
Get an existing model by name or create new
'''
self.model = getOrRegisterModel(
self.workspace,
self.experiment,
self.programArguments.model_name,
RealTimeScoringContext.model_file,
self.job_log
)
if not self.model:
raise Exception("Model Creation Failed")
def generateImage(self):
'''
Generates a docker image, get name and version using:
print(image.name, image.version)
Logs here:
image.image_build_log_uri
Move the scoring script to the execution directory (which is a requirement for creating an image)
When done, remove the copy.
'''
shutil.copyfile(RealTimeScoringContext.scoring_script, RealTimeScoringContext.scoring_script_name)
self.containerImage = createImage(
self.workspace,
RealTimeScoringContext.scoring_script_name,
self.model,
self.programArguments.image_name,
self.job_log)
if not self.containerImage:
raise Exception("Container Image Creation Failed")
print("Container Creation Log: ", self.containerImage.image_build_log_uri)
def loadImage(self):
'''
In testing, I did NOT want to keep generating a model and generating an image,
if it loads then we've already done that step.
'''
if not self.containerImage:
self.containerImage = getExistingContainerImage(self.workspace, self.programArguments.image_name, self.job_log )
if self.containerImage != None:
'''
With CMK testing, we really need to check this....it's possible an image
was attempted but the actual build failed as it happens on ACR. This means
that AMLS will record that it has an image, but the image state comes back
failed.
'''
if self.containerImage.creation_state == "Failed":
raise Exception("Image exists but state is failed, terminating process...")
return self.containerImage != None
def generateComputeTarget(self, cluster_name = None, resource_group = None):
'''
Caller has to figure out if they are going to attach an existing cluster
or create a new one. Decided based on parameters
'''
if self.computeTarget:
return self.computeTarget
if cluster_name is None and resource_group is None:
print("Option is to create new compute target....")
self.computeTarget = getOrCreateComputeCluster(
self.workspace,
self.programArguments.region,
self.programArguments.aks_compute_name,
self.programArguments.aks_vm_size,
self.programArguments.aks_node_count,
self.programArguments.aks_non_prod,
self.job_log
)
else:
print("Option is to attach existing compute target....")
self.computeTarget = attachExistingCluster(
self.workspace,
cluster_name,
resource_group,
self.programArguments.aks_compute_name,
self.programArguments.aks_non_prod,
self.job_log
)
if not self.computeTarget:
raise Exception("Cannot create compute target.")
def deleteWebservice(self):
if not self.webservice:
raise Exception("No web service loaded")
print("Deleting web service...")
self.job_log.addInfo("Deleting web service")
self.webservice.delete()
self.webservice = None
self.job_log.addInfo("Web service deleted")
def loadWebservice(self):
'''
Retrieve an existing web service, used for deletion purposes.
'''
if not self.workspace:
raise Exception("You must load the workspace first")
if not self.containerImage:
raise Exception("You must load the conatiner image first")
if not self.webservice:
self.webservice = getExistingWebService(
self.workspace,
self.containerImage,
self.programArguments.aks_service_name,
self.job_log
)
return self.webservice != None
def generateWebService(self):
'''
Generate the web service
'''
if not self.webservice:
self.webservice = getOrCreateWebservice(
self.workspace,
self.containerImage,
self.programArguments.aks_service_name,
self.programArguments.aks_num_replicas,
self.programArguments.aks_cpu_cores,
self.computeTarget,
self.job_log
)
if not self.webservice:
raise Exception("Could not create the web service.")
self.webserviceapi["url"] = self.webservice.scoring_uri
self.webserviceapi["key"] = self.webservice.get_keys()[0]
def testWebService(self):
if self.webservice:
prediction = self.webservice.run(json.dumps({"name": "Dave"}))
print(prediction)
|
[
"[email protected]"
] | |
8d829cc9490f7662027662a0f9511a6e6e50f951
|
ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb
|
/python_modules/dagster-test/dagster_test/toys/asset_reconciliation/eager_reconciliation.py
|
5a49890cf839aa67d14aa36c0c71124187f55cb5
|
[
"Apache-2.0"
] |
permissive
|
dagster-io/dagster
|
6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a
|
fe21995e0402878437a828c6a4244025eac8c43b
|
refs/heads/master
| 2023-09-05T20:46:08.203794 | 2023-09-05T19:54:52 | 2023-09-05T19:54:52 | 131,619,646 | 8,565 | 1,154 |
Apache-2.0
| 2023-09-14T21:57:37 | 2018-04-30T16:30:04 |
Python
|
UTF-8
|
Python
| false | false | 602 |
py
|
from dagster import (
AutoMaterializePolicy,
Definitions,
asset,
load_assets_from_current_module,
)
@asset
def root1():
...
@asset
def root2():
...
@asset
def diamond_left(root1):
...
@asset
def diamond_right(root1):
...
@asset
def diamond_sink(diamond_left, diamond_right):
...
@asset
def after_both_roots(root1, root2):
...
defs = Definitions(
assets=load_assets_from_current_module(
group_name="eager_reconciliation",
key_prefix="eager_reconciliation",
auto_materialize_policy=AutoMaterializePolicy.eager(),
),
)
|
[
"[email protected]"
] | |
70ae2f6e338d2396feb7a1beeb897ae04eeedd1c
|
2c1429a1bd2d0477fd88119d4d778fc68c82adcf
|
/python/DeepSeaScene/Convert/ConvertContext.py
|
e2f0d6c637e2a6d11432c41f10137e9b1b27bf4a
|
[
"Apache-2.0"
] |
permissive
|
akb825/DeepSea
|
d7ac54f6d8243d43d6ea538159f3067ab7e79880
|
5a909b4f51717bc59682e51ad6aa598a25a9b965
|
refs/heads/master
| 2023-08-31T23:45:19.533393 | 2023-08-29T07:30:36 | 2023-08-29T07:30:43 | 142,716,767 | 10 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,708 |
py
|
# Copyright 2020-2022 Aaron Barany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .FullScreenResolveConvert import convertFullScreenResolve
from .GLTFModel import registerGLTFModelType
from .InstanceTransformDataConvert import convertInstanceTransformData
from .ModelListConvert import convertModelList
from .ModelNodeReconfigConvert import convertModelNodeReconfig
from .ModelNodeRemapConvert import convertModelNodeRemap
from .ModelNodeConvert import convertModelNode
from .NodeChildrenConvert import convertNodeChildren
from .OBJModel import registerOBJModelType
from .SceneNodeRefConvert import convertReferenceNode
from .TransformNodeConvert import convertTransformNode
from .ViewCullListConvert import convertViewCullList
from .ViewMipmapListConvert import convertViewMipmapList
from .ViewTransformDataConvert import convertViewTransformData
from .. import ObjectData
from .. import SceneItemList
class ConvertContext:
"""
Class containing information used when converting scene data.
Builtin types will all be registered with this automatically. Custom types may be registered to
extend scene conversion.
"""
def __init__(self, cuttlefishTool = 'cuttlefish', vfcTool = 'vfc', multithread = True):
"""
Initializes this with the paths to the cuttlefish tool (for texture conversion) and vfc tool
(for vertex format conversion). By default the tool names to use them on the PATH.
"""
self.cuttlefish = cuttlefishTool
self.vfc = vfcTool
self.multithread = multithread
self.nodeTypeMap = {
'ModelNode': convertModelNode,
'ModelNodeReconfig': convertModelNodeReconfig,
'ModelNodeRemap': convertModelNodeRemap,
'TransformNode': convertTransformNode,
'ReferenceNode': convertReferenceNode
}
self.itemListTypeMap = {
'FullScreenResolve': convertFullScreenResolve,
'ModelList': convertModelList,
'ViewCullList': convertViewCullList,
'ViewMipmapList': convertViewMipmapList,
'ViewTransformData': convertViewTransformData
}
self.instanceDataTypeMap = {
'InstanceTransformData': convertInstanceTransformData
}
self.customResourceTypeMap = dict()
self.resourceActionTypeMap = {
'NodeChildren': convertNodeChildren
}
# Model types are considered an extension. However, register the builtin model types here
# for convenience similar to the node and item list types.
registerGLTFModelType(self)
registerOBJModelType(self)
def addNodeType(self, typeName, convertFunc):
"""
Adds a node type with the name and the convert function. The function should take the
ConvertContext and dict for the data as parameters and return the flatbuffer bytes.
An exception will be raised if the type is already registered.
"""
if typeName in self.nodeTypeMap:
raise Exception('Node type "' + typeName + '" is already registered.')
self.nodeTypeMap[typeName] = convertFunc
def convertNode(self, builder, typeName, data):
"""
Converts a node based on its type and dict for the data. This will return the offset to the
ObjectData added to the builder.
"""
if typeName not in self.nodeTypeMap:
raise Exception('Node type "' + typeName + '" hasn\'t been registered.')
convertedData = self.nodeTypeMap[typeName](self, data)
typeNameOffset = builder.CreateString(typeName)
dataOffset = builder.CreateByteVector(convertedData)
ObjectData.Start(builder)
ObjectData.AddType(builder, typeNameOffset)
ObjectData.AddData(builder, dataOffset)
return ObjectData.End(builder)
def addItemListType(self, typeName, convertFunc):
"""
Adds an item list type with the name and the convert function. The function should take the
ConvertContext and dict for the data as parameters and return the flatbuffer bytes.
An exception will be raised if the type is already registered.
"""
if typeName in self.itemListTypeMap:
raise Exception('Item list type "' + typeName + '" is already registered.')
self.itemListTypeMap[typeName] = convertFunc
def convertItemList(self, builder, typeName, name, data):
"""
Converts an item list based on its type and dict for the data. This will return the offset
to the ObjectData added to the builder.
"""
if typeName not in self.itemListTypeMap:
raise Exception('Item list type "' + typeName + '" hasn\'t been registered.')
convertedData = self.itemListTypeMap[typeName](self, data)
typeNameOffset = builder.CreateString(typeName)
nameOffset = builder.CreateString(name)
dataOffset = builder.CreateByteVector(convertedData)
SceneItemList.Start(builder)
SceneItemList.AddType(builder, typeNameOffset)
SceneItemList.AddName(builder, nameOffset)
SceneItemList.AddData(builder, dataOffset)
return SceneItemList.End(builder)
def addInstanceDataType(self, typeName, convertFunc):
"""
Adds an instance data type with the name and the convert function. The function should take
the ConvertContext and dict for the data as parameters and return the flatbuffer bytes.
An exception will be raised if the type is already registered.
"""
if typeName in self.instanceDataTypeMap:
raise Exception('Instance data type "' + typeName + '" is already registered.')
self.instanceDataTypeMap[typeName] = convertFunc
def convertInstanceData(self, builder, typeName, data):
"""
Converts an instance based on its type and dict for the data. This will return the offset to
the ObjectData added to the builder.
"""
if typeName not in self.instanceDataTypeMap:
raise Exception('Instance data type "' + typeName + '" hasn\'t been registered.')
convertedData = self.instanceDataTypeMap[typeName](self, data)
typeNameOffset = builder.CreateString(typeName)
dataOffset = builder.CreateByteVector(convertedData)
ObjectData.Start(builder)
ObjectData.AddType(builder, typeNameOffset)
ObjectData.AddData(builder, dataOffset)
return ObjectData.End(builder)
def addCustomResourceType(self, typeName, convertFunc):
"""
Adds a custom resource type with the name and the convert function. The function should
take the ConvertContext and dict for the data as parameters and return the flatbuffer bytes.
An exception will be raised if the type is already registered.
"""
if typeName in self.customResourceTypeMap:
raise Exception('Custom resource type "' + typeName + '" is already registered.')
self.customResourceTypeMap[typeName] = convertFunc
def convertCustomResource(self, builder, typeName, data):
"""
Converts a custom resource based on its type and dict for the data. This will return the
offset to the ObjectData added to the builder.
"""
if typeName not in self.customResourceTypeMap:
raise Exception('Custom resource type "' + typeName + '" hasn\'t been registered.')
convertedData = self.customResourceTypeMap[typeName](self, data)
typeNameOffset = builder.CreateString(typeName)
dataOffset = builder.CreateByteVector(convertedData)
ObjectData.Start(builder)
ObjectData.AddType(builder, typeNameOffset)
ObjectData.AddData(builder, dataOffset)
return ObjectData.End(builder)
def addResourceActionType(self, typeName, convertFunc):
"""
Adds a resource action type with the name and the convert function. The function should
take the ConvertContext and dict for the data as parameters and return the flatbuffer bytes.
An exception will be raised if the type is already registered.
"""
if typeName in self.resourceActionTypeMap:
raise Exception('Resource action type "' + typeName + '" is already registered.')
self.resourceActionTypeMap[typeName] = convertFunc
def convertResourceAction(self, builder, typeName, data):
"""
Converts a resource action based on its type and dict for the data. This will return the
offset to the ObjectData added to the builder.
"""
if typeName not in self.resourceActionTypeMap:
raise Exception('Resource action type "' + typeName + '" hasn\'t been registered.')
convertedData = self.resourceActionTypeMap[typeName](self, data)
typeNameOffset = builder.CreateString(typeName)
dataOffset = builder.CreateByteVector(convertedData)
ObjectData.Start(builder)
ObjectData.AddType(builder, typeNameOffset)
ObjectData.AddData(builder, dataOffset)
return ObjectData.End(builder)
|
[
"[email protected]"
] | |
d9268e78fb09969aa65b9d05a32d31fdddf3ecd1
|
43424192529aa1fb554d26b07c7382ce67604729
|
/src/lib/datasets/dataset/jde.py
|
d5c6763aa95f9e87956135d597ed4cf276317bd0
|
[] |
no_license
|
byq-luo/CL-MOT
|
7e0a61401b0e8f5610a303c71465d06c8b509155
|
c79027e8cf5af1a2ea08fefcdaf55be08b1a5a4b
|
refs/heads/master
| 2022-11-04T22:27:57.614695 | 2020-06-25T22:39:24 | 2020-06-25T22:39:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 24,449 |
py
|
import glob
import math
import os
import os.path as osp
import random
import time
from collections import OrderedDict
import cv2
import json
import numpy as np
import torch
from torch.utils.data import Dataset
from torchvision.transforms import transforms as T
from cython_bbox import bbox_overlaps as bbox_ious
from opts import opts
from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
from utils.utils import xyxy2xywh, generate_anchors, xywh2xyxy, encode_delta
class LoadImages: # for inference
def __init__(self, path, img_size=(1088, 608)):
if os.path.isdir(path):
image_format = ['.jpg', '.jpeg', '.png', '.tif']
self.files = sorted(glob.glob('%s/*.*' % path))
self.files = list(filter(lambda x: os.path.splitext(x)[1].lower() in image_format, self.files))
elif os.path.isfile(path):
self.files = [path]
self.nF = len(self.files) # number of image files
self.width = img_size[0]
self.height = img_size[1]
self.count = 0
assert self.nF > 0, 'No images found in ' + path
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if self.count == self.nF:
raise StopIteration
img_path = self.files[self.count]
# Read image
img0 = cv2.imread(img_path) # BGR
assert img0 is not None, 'Failed to load ' + img_path
# Padded resize
img, _, _, _ = letterbox(img0, height=self.height, width=self.width)
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
# cv2.imwrite(img_path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return img_path, img, img0
def __getitem__(self, idx):
idx = idx % self.nF
img_path = self.files[idx]
# Read image
img0 = cv2.imread(img_path) # BGR
assert img0 is not None, 'Failed to load ' + img_path
# Padded resize
img, _, _, _ = letterbox(img0, height=self.height, width=self.width)
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
return img_path, img, img0
def __len__(self):
return self.nF # number of files
class LoadVideo: # for inference
def __init__(self, path, img_size=(1088, 608)):
self.cap = cv2.VideoCapture(path)
self.frame_rate = int(round(self.cap.get(cv2.CAP_PROP_FPS)))
self.vw = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.vh = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.vn = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
self.width = img_size[0]
self.height = img_size[1]
self.count = 0
self.w, self.h = 1920, 1080
print('Lenth of the video: {:d} frames'.format(self.vn))
def get_size(self, vw, vh, dw, dh):
wa, ha = float(dw) / vw, float(dh) / vh
a = min(wa, ha)
return int(vw * a), int(vh * a)
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if self.count == len(self):
raise StopIteration
# Read image
res, img0 = self.cap.read() # BGR
assert img0 is not None, 'Failed to load frame {:d}'.format(self.count)
img0 = cv2.resize(img0, (self.w, self.h))
# Padded resize
img, _, _, _ = letterbox(img0, height=self.height, width=self.width)
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
# cv2.imwrite(img_path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return self.count, img, img0
def __len__(self):
return self.vn # number of files
class LoadImagesAndLabels: # for training
def __init__(self, path, img_size=(1088, 608), augment=False, transforms=None):
with open(path, 'r') as file:
self.img_files = file.readlines()
self.img_files = [x.replace('\n', '') for x in self.img_files]
self.img_files = list(filter(lambda x: len(x) > 0, self.img_files))
self.label_files = [x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt')
for x in self.img_files]
self.nF = len(self.img_files) # number of image files
self.width = img_size[0]
self.height = img_size[1]
self.augment = augment
self.transforms = transforms
def __getitem__(self, files_index):
img_path = self.img_files[files_index]
label_path = self.label_files[files_index]
return self.get_data(img_path, label_path)
def get_data(self, img_path, label_path, unsup=False):
height = self.height
width = self.width
images = {}
labels = {}
# Load image
images['orig'] = cv2.imread(img_path) # BGR
if images['orig'] is None:
raise ValueError('File corrupt {}'.format(img_path))
h, w, _ = images['orig'] .shape
# create horizontally flipped image for contrastivelearning
if unsup:
images['flipped'] = cv2.flip(images['orig'], 1)
for key, img in images.items():
# Saturation and brightness augmentation by 50%
if self.augment:
fraction = 0.50
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
S = img_hsv[:, :, 1].astype(np.float32)
V = img_hsv[:, :, 2].astype(np.float32)
a = (random.random() * 2 - 1) * fraction + 1
S *= a
if a > 1:
np.clip(S, a_min=0, a_max=255, out=S)
a = (random.random() * 2 - 1) * fraction + 1
V *= a
if a > 1:
np.clip(V, a_min=0, a_max=255, out=V)
img_hsv[:, :, 1] = S.astype(np.uint8)
img_hsv[:, :, 2] = V.astype(np.uint8)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)
img, ratio, padw, padh = letterbox(img, height=height, width=width)
# Load labels
if os.path.isfile(label_path):
labels_ = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 6)
if key == 'flipped' and len(labels_) > 0:
labels_[:, 2] = 1 - labels_[:, 2]
# Normalized xywh to pixel xyxy format
labels[key] = labels_.copy()
labels[key][:, 2] = ratio * w * (labels_[:, 2] - labels_[:, 4] / 2) + padw
labels[key][:, 3] = ratio * h * (labels_[:, 3] - labels_[:, 5] / 2) + padh
labels[key][:, 4] = ratio * w * (labels_[:, 2] + labels_[:, 4] / 2) + padw
labels[key][:, 5] = ratio * h * (labels_[:, 3] + labels_[:, 5] / 2) + padh
else:
labels[key] = np.array([])
# Augment image and labels
if self.augment:
img, labels[key], M = random_affine(img, labels[key], degrees=(-5, 5), translate=(0.10, 0.10), scale=(0.50, 1.20))
nL = len(labels[key])
if nL > 0:
# convert xyxy to xywh
labels[key][:, 2:6] = xyxy2xywh(labels[key][:, 2:6].copy()) # / height
labels[key][:, 2] /= width
labels[key][:, 3] /= height
labels[key][:, 4] /= width
labels[key][:, 5] /= height
if not unsup and self.augment:
# random left-right flip during supervised learning
if random.random() > 0.5:
img = np.fliplr(img)
if nL > 0:
labels[key][:, 2] = 1 - labels[key][:, 2]
img = np.ascontiguousarray(img[:, :, ::-1]) # BGR to RGB
if self.transforms is not None:
img = self.transforms(img)
images[key] = img
if 'flipped' not in images:
images['flipped'] = None
labels['flipped'] = None
return images, labels, img_path, (h, w)
def format_gt_det(self, gt_det):
if len(gt_det['scores']) == 0:
gt_det = {'bboxes': np.array([[0, 0, 1, 1]], dtype=np.float32),
'scores': np.array([1], dtype=np.float32),
'clses': np.array([0], dtype=np.float32),
'cts': np.array([[0, 0]], dtype=np.float32),
'flipped_bboxes': np.array([[0, 0, 1, 1]], dtype=np.float32),
'flipped_cts': np.array([[0, 0]], dtype=np.float32)}
gt_det = {k: np.array(gt_det[k], dtype=np.float32) for k in gt_det}
return gt_det
def __len__(self):
return self.nF # number of images
def letterbox(img, height=608, width=1088, color=(127.5, 127.5, 127.5)):
# resize a rectangular image to a padded rectangular
shape = img.shape[:2] # shape = [height, width]
ratio = min(float(height) / shape[0], float(width) / shape[1])
new_shape = (round(shape[1] * ratio), round(shape[0] * ratio)) # new_shape = [width, height]
dw = (width - new_shape[0]) / 2 # width padding
dh = (height - new_shape[1]) / 2 # height padding
top, bottom = round(dh - 0.1), round(dh + 0.1)
left, right = round(dw - 0.1), round(dw + 0.1)
img = cv2.resize(img, new_shape, interpolation=cv2.INTER_AREA) # resized, no border
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # padded rectangular
return img, ratio, dw, dh
def random_affine(img, targets=None, degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-2, 2),
borderValue=(127.5, 127.5, 127.5)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
border = 0 # width of added border (optional)
height = img.shape[0]
width = img.shape[1]
# Rotation and Scale
R = np.eye(3)
a = random.random() * (degrees[1] - degrees[0]) + degrees[0]
# a += random.choice([-180, -90, 0, 90]) # 90deg rotations added to small rotations
s = random.random() * (scale[1] - scale[0]) + scale[0]
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = (random.random() * 2 - 1) * translate[0] * img.shape[0] + border # x translation (pixels)
T[1, 2] = (random.random() * 2 - 1) * translate[1] * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # y shear (deg)
M = S @ T @ R # Combined rotation matrix. ORDER IS IMPORTANT HERE!!
imw = cv2.warpPerspective(img, M, dsize=(width, height), flags=cv2.INTER_LINEAR,
borderValue=borderValue) # BGR order borderValue
# Return warped points also
if targets is not None:
if len(targets) > 0:
n = targets.shape[0]
points = targets[:, 2:6].copy()
area0 = (points[:, 2] - points[:, 0]) * (points[:, 3] - points[:, 1])
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = points[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# apply angle-based reduction
radians = a * math.pi / 180
reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
x = (xy[:, 2] + xy[:, 0]) / 2
y = (xy[:, 3] + xy[:, 1]) / 2
w = (xy[:, 2] - xy[:, 0]) * reduction
h = (xy[:, 3] - xy[:, 1]) * reduction
xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
np.clip(xy[:, 0], 0, width, out=xy[:, 0])
np.clip(xy[:, 2], 0, width, out=xy[:, 2])
np.clip(xy[:, 1], 0, height, out=xy[:, 1])
np.clip(xy[:, 3], 0, height, out=xy[:, 3])
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))
i = (w > 4) & (h > 4) & (area / (area0 + 1e-16) > 0.1) & (ar < 10)
targets = targets[i]
targets[:, 2:6] = xy[i]
return imw, targets, M
else:
return imw
def collate_fn(batch):
imgs, labels, paths, sizes = zip(*batch)
batch_size = len(labels)
imgs = torch.stack(imgs, 0)
max_box_len = max([l.shape[0] for l in labels])
labels = [torch.from_numpy(l) for l in labels]
filled_labels = torch.zeros(batch_size, max_box_len, 6)
labels_len = torch.zeros(batch_size)
for i in range(batch_size):
isize = labels[i].shape[0]
if len(labels[i]) > 0:
filled_labels[i, :isize, :] = labels[i]
labels_len[i] = isize
return imgs, filled_labels, paths, sizes, labels_len.unsqueeze(1)
class JointDataset(LoadImagesAndLabels): # for training
default_resolution = [1088, 608]
mean = None
std = None
num_classes = 1
def __init__(self, opt, root, paths, img_size=(1088, 608), augment=False, transforms=None):
self.opt = opt
dataset_names = paths.keys()
self.img_files = OrderedDict()
self.label_files = OrderedDict()
self.img_num = OrderedDict()
self.tid_num = OrderedDict()
self.tid_start_index = OrderedDict()
self.num_classes = 1
# If doing self-supervised training we need to create positive and negative
# examples of each image
self.unsup = opt.unsup
# Get image and annotation file names
for ds, path in paths.items():
with open(path, 'r') as file:
self.img_files[ds] = file.readlines()
self.img_files[ds] = [osp.join(root, x.strip()) for x in self.img_files[ds]]
self.img_files[ds] = list(filter(lambda x: len(x) > 0, self.img_files[ds]))
self.label_files[ds] = [
x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt')
for x in self.img_files[ds]]
# Counting unique identities in each dataset
for ds, label_paths in self.label_files.items():
max_index = -1
for lp in label_paths:
lb = np.loadtxt(lp)
if len(lb) < 1:
continue
if len(lb.shape) < 2:
img_max = lb[1]
else:
img_max = np.max(lb[:, 1])
if img_max > max_index:
max_index = img_max
self.tid_num[ds] = max_index + 1
# Finding the first identity (unique object) in each dataset
last_index = 0
for i, (k, v) in enumerate(self.tid_num.items()):
self.tid_start_index[k] = last_index
last_index += v
# Total identities in all datasets
self.nID = int(last_index + 1)
# Count images and find starting file index for each dataset
self.cds = []
self.nF = 0
for i, (ds, img_files) in enumerate(self.img_files.items()):
img_cnt = len(img_files)
self.img_num[ds] = img_cnt
self.nF += img_cnt
self.cds.append(self.nF - img_cnt)
self.width = img_size[0]
self.height = img_size[1]
self.max_objs = opt.K
self.augment = augment
self.transforms = transforms
print('Dataset Summary')
print('=' * 100)
print('Images per dataset: {}'.format(self.img_num))
print('Identities per dataset: {}'.format(self.tid_num))
print('Total images: {}'.format(self.nF))
print('Total identities: {}'.format(self.nID))
print('=' * 100)
def __getitem__(self, files_index):
# Find which dataset this index falls in
ds = None
start_index = 0
for i, c in enumerate(self.cds):
if files_index >= c:
ds = list(self.label_files.keys())[i]
start_index = c
# Get image and annotation file names
img_path = self.img_files[ds][files_index - start_index]
label_path = self.label_files[ds][files_index - start_index]
img_dict, lbl_dict, img_path, (input_h, input_w) = self.get_data(img_path, label_path, self.unsup)
img, labels = img_dict['orig'], lbl_dict['orig']
flipped_img, flipped_labels = img_dict['flipped'], lbl_dict['flipped']
# Offset object IDs with starting ID index for this dataset
for i, _ in enumerate(labels):
if labels[i, 1] > -1:
labels[i, 1] += self.tid_start_index[ds]
output_h = img.shape[1] // self.opt.down_ratio
output_w = img.shape[2] // self.opt.down_ratio
num_classes = self.num_classes
if labels.shape[0] != flipped_labels.shape[0]:
print(labels.shape[0], flipped_labels.shape[0])
num_objs = labels.shape[0]
# heat map representing object detections
hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32)
# width and height of each object
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
# object center offset due to resizing and decimal error
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
# contains object centers
ind = np.zeros((self.max_objs,), dtype=np.int64)
# mask representing the gt number of objects in this frame
reg_mask = np.zeros((self.max_objs,), dtype=np.uint8)
# object IDs
ids = np.zeros((self.max_objs,), dtype=np.int64)
gt_det = {'bboxes': [], 'scores': [], 'clses': [], 'cts': []}
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else draw_umich_gaussian
# Build ground truth labels
for k in range(num_objs):
label = labels[k] # [class, identity, x_center, y_center, width, height]
bbox = label[2:]
cls_id = int(label[0])
bbox[[0, 2]] = bbox[[0, 2]] * output_w
bbox[[1, 3]] = bbox[[1, 3]] * output_h
bbox[0] = np.clip(bbox[0], 0, output_w - 1)
bbox[1] = np.clip(bbox[1], 0, output_h - 1)
h = bbox[3]
w = bbox[2]
if h > 0 and w > 0:
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
radius = self.opt.hm_gauss if self.opt.mse_loss else radius
ct = np.array([bbox[0], bbox[1]], dtype=np.float32)
ct_int = ct.astype(np.int32)
draw_gaussian(hm[cls_id], ct_int, radius)
wh[k] = 1. * w, 1. * h
ind[k] = ct_int[1] * output_w + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1
ids[k] = label[1]
gt_det['bboxes'].append(
np.array([ct[0] - w / 2, ct[1] - h / 2,
ct[0] + w / 2, ct[1] + h / 2], dtype=np.float32))
gt_det['scores'].append(1)
gt_det['clses'].append(cls_id)
gt_det['cts'].append(ct)
ret = {'img': img, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh, 'reg': reg, 'ids': ids}
if flipped_img is not None and flipped_labels is not None:
flipped_ind = np.zeros((self.max_objs,), dtype=np.int64)
gt_det['flipped_bboxes'] = []
gt_det['flipped_cts'] = []
for k in range(num_objs):
flipped_label = flipped_labels[k]
bbox = flipped_label[2:]
bbox[[0, 2]] = bbox[[0, 2]] * output_w
bbox[[1, 3]] = bbox[[1, 3]] * output_h
bbox[0] = np.clip(bbox[0], 0, output_w - 1)
bbox[1] = np.clip(bbox[1], 0, output_h - 1)
h = bbox[3]
w = bbox[2]
if h > 0 and w > 0:
ct = np.array([bbox[0], bbox[1]], dtype=np.float32)
ct_int = ct.astype(np.int32)
flipped_ind[k] = ct_int[1] * output_w + ct_int[0]
gt_det['flipped_bboxes'].append(
np.array([ct[0] - w / 2, ct[1] - h / 2,
ct[0] + w / 2, ct[1] + h / 2], dtype=np.float32))
gt_det['flipped_cts'].append(ct)
ret['flipped_img'] = flipped_img
ret['flipped_ind'] = flipped_ind
ret['num_objs'] = torch.tensor([num_objs])
if self.opt.debug > 0:
gt_det = self.format_gt_det(gt_det)
meta = {'gt_det': gt_det, 'img_path': img_path}
ret['meta'] = meta
return ret
class DetDataset(LoadImagesAndLabels): # for training
def __init__(self, root, paths, img_size=(1088, 608), augment=False, transforms=None):
dataset_names = paths.keys()
self.img_files = OrderedDict()
self.label_files = OrderedDict()
self.tid_num = OrderedDict()
self.tid_start_index = OrderedDict()
for ds, path in paths.items():
with open(path, 'r') as file:
self.img_files[ds] = file.readlines()
self.img_files[ds] = [osp.join(root, x.strip()) for x in self.img_files[ds]]
self.img_files[ds] = list(filter(lambda x: len(x) > 0, self.img_files[ds]))
self.label_files[ds] = [
x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt')
for x in self.img_files[ds]]
for ds, label_paths in self.label_files.items():
max_index = -1
for lp in label_paths:
lb = np.loadtxt(lp)
if len(lb) < 1:
continue
if len(lb.shape) < 2:
img_max = lb[1]
else:
img_max = np.max(lb[:, 1])
if img_max > max_index:
max_index = img_max
self.tid_num[ds] = max_index + 1
last_index = 0
for i, (k, v) in enumerate(self.tid_num.items()):
self.tid_start_index[k] = last_index
last_index += v
self.nID = int(last_index + 1)
self.nds = [len(x) for x in self.img_files.values()]
self.cds = [sum(self.nds[:i]) for i in range(len(self.nds))]
self.nF = sum(self.nds)
self.width = img_size[0]
self.height = img_size[1]
self.augment = augment
self.transforms = transforms
print('=' * 80)
print('dataset summary')
print(self.tid_num)
print('total # identities:', self.nID)
print('start index')
print(self.tid_start_index)
print('=' * 80)
def __getitem__(self, files_index):
for i, c in enumerate(self.cds):
if files_index >= c:
ds = list(self.label_files.keys())[i]
start_index = c
img_path = self.img_files[ds][files_index - start_index]
label_path = self.label_files[ds][files_index - start_index]
if os.path.isfile(label_path):
labels0 = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 6)
imgs, labels, img_path, (h, w) = self.get_data(img_path, label_path)
for i, _ in enumerate(labels):
if labels[i, 1] > -1:
labels[i, 1] += self.tid_start_index[ds]
return imgs['orig'], labels0, img_path, (h, w)
|
[
"[email protected]"
] | |
e29b726c54adb05c37f2093deda259bec151ea6e
|
30fe7671b60825a909428a30e3793bdf16eaaf29
|
/.metadata/.plugins/org.eclipse.core.resources/.history/63/9010f07eaeea00161174a93fd5908e78
|
392d2088e7c6f05709c2717070f065f8bf4e2b58
|
[] |
no_license
|
abigdream84/PythonStudy
|
0fc7a3b6b4a03a293b850d0ed12d5472483c4fb1
|
059274d3ba6f34b62ff111cda3fb263bd6ca8bcb
|
refs/heads/master
| 2021-01-13T04:42:04.306730 | 2017-03-03T14:54:16 | 2017-03-03T14:54:16 | 79,123,274 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 243 |
#!/usr/bin/env python
#coding:UTF-8
import threading
import time
num = 0
def run():
time.sleep(1)
global num
num += 1
time.sleep(1)
print(num)
for i in range(100):
t = threading.Thread(target=run)
t.start()
|
[
"[email protected]"
] | ||
ae1e84aa9f819ff4d88b735878bf3a153afc31e3
|
aeeaf40350a652d96a392010071df8a486c6e79f
|
/archive/python/Python/unsorted/235.lowest-common-ancestor-of-a-binary-search-tree.py
|
2fe98c1dd49f5015b1f1abd8ba318f5bf60c1b0b
|
[
"MIT"
] |
permissive
|
linfengzhou/LeetCode
|
11e6c12ce43cf0053d86437b369a2337e6009be3
|
cb2ed3524431aea2b204fe66797f9850bbe506a9
|
refs/heads/master
| 2021-01-23T19:34:37.016755 | 2018-04-30T20:44:40 | 2018-04-30T20:44:40 | 53,916,868 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 721 |
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if not root or root == p or root == q:
return root
left = self.lowestCommonAncestor(root.left, p, q)
right = self.lowestCommonAncestor(root.right, p, q)
if left and right:
return root
if left:
return left
if right:
return right
return None
|
[
"[email protected]"
] | |
b068bc192b0b532ae5e273f0916abe8531e8e588
|
b7fab13642988c0e6535fb75ef6cb3548671d338
|
/tools/ydk-py-master/cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_fretta_bcm_dpa_drop_stats_oper.py
|
9ec88cc72dcf4d83caeadc1f04d8b6963aa17d73
|
[
"Apache-2.0"
] |
permissive
|
juancsosap/yangtraining
|
6ad1b8cf89ecdebeef094e4238d1ee95f8eb0824
|
09d8bcc3827575a45cb8d5d27186042bf13ea451
|
refs/heads/master
| 2022-08-05T01:59:22.007845 | 2019-08-01T15:53:08 | 2019-08-01T15:53:08 | 200,079,665 | 0 | 1 | null | 2021-12-13T20:06:17 | 2019-08-01T15:54:15 |
Python
|
UTF-8
|
Python
| false | false | 31,508 |
py
|
""" Cisco_IOS_XR_fretta_bcm_dpa_drop_stats_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR fretta\-bcm\-dpa\-drop\-stats package operational data.
This module contains definitions
for the following management objects\:
drop\: Drop stats data
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
from ydk.entity_utils import get_relative_entity_path as _get_relative_entity_path
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YPYError, YPYModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class Drop(Entity):
"""
Drop stats data
.. attribute:: nodes
Drop data per node
**type**\: :py:class:`Nodes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_drop_stats_oper.Drop.Nodes>`
"""
_prefix = 'fretta-bcm-dpa-drop-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Drop, self).__init__()
self._top_entity = None
self.yang_name = "drop"
self.yang_parent_name = "Cisco-IOS-XR-fretta-bcm-dpa-drop-stats-oper"
self.nodes = Drop.Nodes()
self.nodes.parent = self
self._children_name_map["nodes"] = "nodes"
self._children_yang_names.add("nodes")
class Nodes(Entity):
"""
Drop data per node
.. attribute:: node
Drop stats data for a particular node
**type**\: list of :py:class:`Node <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_drop_stats_oper.Drop.Nodes.Node>`
"""
_prefix = 'fretta-bcm-dpa-drop-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Drop.Nodes, self).__init__()
self.yang_name = "nodes"
self.yang_parent_name = "drop"
self.node = YList(self)
def __setattr__(self, name, value):
self._check_monkey_patching_error(name, value)
with _handle_type_error():
if name in self.__dict__ and isinstance(self.__dict__[name], YList):
raise YPYModelError("Attempt to assign value of '{}' to YList ldata. "
"Please use list append or extend method."
.format(value))
if isinstance(value, Enum.YLeaf):
value = value.name
if name in () and name in self.__dict__:
if isinstance(value, YLeaf):
self.__dict__[name].set(value.get())
elif isinstance(value, YLeafList):
super(Drop.Nodes, self).__setattr__(name, value)
else:
self.__dict__[name].set(value)
else:
if hasattr(value, "parent") and name != "parent":
if hasattr(value, "is_presence_container") and value.is_presence_container:
value.parent = self
elif value.parent is None and value.yang_name in self._children_yang_names:
value.parent = self
super(Drop.Nodes, self).__setattr__(name, value)
class Node(Entity):
"""
Drop stats data for a particular node
.. attribute:: node_name <key>
Node ID
**type**\: str
**pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+)
.. attribute:: npu_number_for_drop_stats
NPU drop stats
**type**\: :py:class:`NpuNumberForDropStats <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_drop_stats_oper.Drop.Nodes.Node.NpuNumberForDropStats>`
"""
_prefix = 'fretta-bcm-dpa-drop-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Drop.Nodes.Node, self).__init__()
self.yang_name = "node"
self.yang_parent_name = "nodes"
self.node_name = YLeaf(YType.str, "node-name")
self.npu_number_for_drop_stats = Drop.Nodes.Node.NpuNumberForDropStats()
self.npu_number_for_drop_stats.parent = self
self._children_name_map["npu_number_for_drop_stats"] = "npu-number-for-drop-stats"
self._children_yang_names.add("npu-number-for-drop-stats")
def __setattr__(self, name, value):
self._check_monkey_patching_error(name, value)
with _handle_type_error():
if name in self.__dict__ and isinstance(self.__dict__[name], YList):
raise YPYModelError("Attempt to assign value of '{}' to YList ldata. "
"Please use list append or extend method."
.format(value))
if isinstance(value, Enum.YLeaf):
value = value.name
if name in ("node_name") and name in self.__dict__:
if isinstance(value, YLeaf):
self.__dict__[name].set(value.get())
elif isinstance(value, YLeafList):
super(Drop.Nodes.Node, self).__setattr__(name, value)
else:
self.__dict__[name].set(value)
else:
if hasattr(value, "parent") and name != "parent":
if hasattr(value, "is_presence_container") and value.is_presence_container:
value.parent = self
elif value.parent is None and value.yang_name in self._children_yang_names:
value.parent = self
super(Drop.Nodes.Node, self).__setattr__(name, value)
class NpuNumberForDropStats(Entity):
"""
NPU drop stats
.. attribute:: npu_number_for_drop_stat
All drop stats for a particular NPU
**type**\: list of :py:class:`NpuNumberForDropStat <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_drop_stats_oper.Drop.Nodes.Node.NpuNumberForDropStats.NpuNumberForDropStat>`
"""
_prefix = 'fretta-bcm-dpa-drop-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Drop.Nodes.Node.NpuNumberForDropStats, self).__init__()
self.yang_name = "npu-number-for-drop-stats"
self.yang_parent_name = "node"
self.npu_number_for_drop_stat = YList(self)
def __setattr__(self, name, value):
self._check_monkey_patching_error(name, value)
with _handle_type_error():
if name in self.__dict__ and isinstance(self.__dict__[name], YList):
raise YPYModelError("Attempt to assign value of '{}' to YList ldata. "
"Please use list append or extend method."
.format(value))
if isinstance(value, Enum.YLeaf):
value = value.name
if name in () and name in self.__dict__:
if isinstance(value, YLeaf):
self.__dict__[name].set(value.get())
elif isinstance(value, YLeafList):
super(Drop.Nodes.Node.NpuNumberForDropStats, self).__setattr__(name, value)
else:
self.__dict__[name].set(value)
else:
if hasattr(value, "parent") and name != "parent":
if hasattr(value, "is_presence_container") and value.is_presence_container:
value.parent = self
elif value.parent is None and value.yang_name in self._children_yang_names:
value.parent = self
super(Drop.Nodes.Node.NpuNumberForDropStats, self).__setattr__(name, value)
class NpuNumberForDropStat(Entity):
"""
All drop stats for a particular NPU
.. attribute:: npu_id <key>
NPU number
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: drop_specific_stats_data
Second argument to the module
**type**\: list of :py:class:`DropSpecificStatsData <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fretta_bcm_dpa_drop_stats_oper.Drop.Nodes.Node.NpuNumberForDropStats.NpuNumberForDropStat.DropSpecificStatsData>`
"""
_prefix = 'fretta-bcm-dpa-drop-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Drop.Nodes.Node.NpuNumberForDropStats.NpuNumberForDropStat, self).__init__()
self.yang_name = "npu-number-for-drop-stat"
self.yang_parent_name = "npu-number-for-drop-stats"
self.npu_id = YLeaf(YType.int32, "npu-id")
self.drop_specific_stats_data = YList(self)
def __setattr__(self, name, value):
self._check_monkey_patching_error(name, value)
with _handle_type_error():
if name in self.__dict__ and isinstance(self.__dict__[name], YList):
raise YPYModelError("Attempt to assign value of '{}' to YList ldata. "
"Please use list append or extend method."
.format(value))
if isinstance(value, Enum.YLeaf):
value = value.name
if name in ("npu_id") and name in self.__dict__:
if isinstance(value, YLeaf):
self.__dict__[name].set(value.get())
elif isinstance(value, YLeafList):
super(Drop.Nodes.Node.NpuNumberForDropStats.NpuNumberForDropStat, self).__setattr__(name, value)
else:
self.__dict__[name].set(value)
else:
if hasattr(value, "parent") and name != "parent":
if hasattr(value, "is_presence_container") and value.is_presence_container:
value.parent = self
elif value.parent is None and value.yang_name in self._children_yang_names:
value.parent = self
super(Drop.Nodes.Node.NpuNumberForDropStats.NpuNumberForDropStat, self).__setattr__(name, value)
class DropSpecificStatsData(Entity):
"""
Second argument to the module
.. attribute:: drop_data <key>
Drop ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: count
count
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: id
id
**type**\: int
**range:** 0..4294967295
.. attribute:: name
name
**type**\: str
"""
_prefix = 'fretta-bcm-dpa-drop-stats-oper'
_revision = '2015-11-09'
def __init__(self):
super(Drop.Nodes.Node.NpuNumberForDropStats.NpuNumberForDropStat.DropSpecificStatsData, self).__init__()
self.yang_name = "drop-specific-stats-data"
self.yang_parent_name = "npu-number-for-drop-stat"
self.drop_data = YLeaf(YType.int32, "drop-data")
self.count = YLeaf(YType.uint64, "count")
self.id = YLeaf(YType.uint32, "id")
self.name = YLeaf(YType.str, "name")
def __setattr__(self, name, value):
self._check_monkey_patching_error(name, value)
with _handle_type_error():
if name in self.__dict__ and isinstance(self.__dict__[name], YList):
raise YPYModelError("Attempt to assign value of '{}' to YList ldata. "
"Please use list append or extend method."
.format(value))
if isinstance(value, Enum.YLeaf):
value = value.name
if name in ("drop_data",
"count",
"id",
"name") and name in self.__dict__:
if isinstance(value, YLeaf):
self.__dict__[name].set(value.get())
elif isinstance(value, YLeafList):
super(Drop.Nodes.Node.NpuNumberForDropStats.NpuNumberForDropStat.DropSpecificStatsData, self).__setattr__(name, value)
else:
self.__dict__[name].set(value)
else:
if hasattr(value, "parent") and name != "parent":
if hasattr(value, "is_presence_container") and value.is_presence_container:
value.parent = self
elif value.parent is None and value.yang_name in self._children_yang_names:
value.parent = self
super(Drop.Nodes.Node.NpuNumberForDropStats.NpuNumberForDropStat.DropSpecificStatsData, self).__setattr__(name, value)
def has_data(self):
return (
self.drop_data.is_set or
self.count.is_set or
self.id.is_set or
self.name.is_set)
def has_operation(self):
return (
self.yfilter != YFilter.not_set or
self.drop_data.yfilter != YFilter.not_set or
self.count.yfilter != YFilter.not_set or
self.id.yfilter != YFilter.not_set or
self.name.yfilter != YFilter.not_set)
def get_segment_path(self):
path_buffer = ""
path_buffer = "drop-specific-stats-data" + "[drop-data='" + self.drop_data.get() + "']" + path_buffer
return path_buffer
def get_entity_path(self, ancestor):
path_buffer = ""
if (ancestor is None):
raise YPYModelError("ancestor cannot be None as one of the ancestors is a list")
else:
path_buffer = _get_relative_entity_path(self, ancestor, path_buffer)
leaf_name_data = LeafDataList()
if (self.drop_data.is_set or self.drop_data.yfilter != YFilter.not_set):
leaf_name_data.append(self.drop_data.get_name_leafdata())
if (self.count.is_set or self.count.yfilter != YFilter.not_set):
leaf_name_data.append(self.count.get_name_leafdata())
if (self.id.is_set or self.id.yfilter != YFilter.not_set):
leaf_name_data.append(self.id.get_name_leafdata())
if (self.name.is_set or self.name.yfilter != YFilter.not_set):
leaf_name_data.append(self.name.get_name_leafdata())
entity_path = EntityPath(path_buffer, leaf_name_data)
return entity_path
def get_child_by_name(self, child_yang_name, segment_path):
child = self._get_child_by_seg_name([child_yang_name, segment_path])
if child is not None:
return child
return None
def has_leaf_or_child_of_name(self, name):
if(name == "drop-data" or name == "count" or name == "id" or name == "name"):
return True
return False
def set_value(self, value_path, value, name_space, name_space_prefix):
if(value_path == "drop-data"):
self.drop_data = value
self.drop_data.value_namespace = name_space
self.drop_data.value_namespace_prefix = name_space_prefix
if(value_path == "count"):
self.count = value
self.count.value_namespace = name_space
self.count.value_namespace_prefix = name_space_prefix
if(value_path == "id"):
self.id = value
self.id.value_namespace = name_space
self.id.value_namespace_prefix = name_space_prefix
if(value_path == "name"):
self.name = value
self.name.value_namespace = name_space
self.name.value_namespace_prefix = name_space_prefix
def has_data(self):
for c in self.drop_specific_stats_data:
if (c.has_data()):
return True
return self.npu_id.is_set
def has_operation(self):
for c in self.drop_specific_stats_data:
if (c.has_operation()):
return True
return (
self.yfilter != YFilter.not_set or
self.npu_id.yfilter != YFilter.not_set)
def get_segment_path(self):
path_buffer = ""
path_buffer = "npu-number-for-drop-stat" + "[npu-id='" + self.npu_id.get() + "']" + path_buffer
return path_buffer
def get_entity_path(self, ancestor):
path_buffer = ""
if (ancestor is None):
raise YPYModelError("ancestor cannot be None as one of the ancestors is a list")
else:
path_buffer = _get_relative_entity_path(self, ancestor, path_buffer)
leaf_name_data = LeafDataList()
if (self.npu_id.is_set or self.npu_id.yfilter != YFilter.not_set):
leaf_name_data.append(self.npu_id.get_name_leafdata())
entity_path = EntityPath(path_buffer, leaf_name_data)
return entity_path
def get_child_by_name(self, child_yang_name, segment_path):
child = self._get_child_by_seg_name([child_yang_name, segment_path])
if child is not None:
return child
if (child_yang_name == "drop-specific-stats-data"):
for c in self.drop_specific_stats_data:
segment = c.get_segment_path()
if (segment_path == segment):
return c
c = Drop.Nodes.Node.NpuNumberForDropStats.NpuNumberForDropStat.DropSpecificStatsData()
c.parent = self
local_reference_key = "ydk::seg::%s" % segment_path
self._local_refs[local_reference_key] = c
self.drop_specific_stats_data.append(c)
return c
return None
def has_leaf_or_child_of_name(self, name):
if(name == "drop-specific-stats-data" or name == "npu-id"):
return True
return False
def set_value(self, value_path, value, name_space, name_space_prefix):
if(value_path == "npu-id"):
self.npu_id = value
self.npu_id.value_namespace = name_space
self.npu_id.value_namespace_prefix = name_space_prefix
def has_data(self):
for c in self.npu_number_for_drop_stat:
if (c.has_data()):
return True
return False
def has_operation(self):
for c in self.npu_number_for_drop_stat:
if (c.has_operation()):
return True
return self.yfilter != YFilter.not_set
def get_segment_path(self):
path_buffer = ""
path_buffer = "npu-number-for-drop-stats" + path_buffer
return path_buffer
def get_entity_path(self, ancestor):
path_buffer = ""
if (ancestor is None):
raise YPYModelError("ancestor cannot be None as one of the ancestors is a list")
else:
path_buffer = _get_relative_entity_path(self, ancestor, path_buffer)
leaf_name_data = LeafDataList()
entity_path = EntityPath(path_buffer, leaf_name_data)
return entity_path
def get_child_by_name(self, child_yang_name, segment_path):
child = self._get_child_by_seg_name([child_yang_name, segment_path])
if child is not None:
return child
if (child_yang_name == "npu-number-for-drop-stat"):
for c in self.npu_number_for_drop_stat:
segment = c.get_segment_path()
if (segment_path == segment):
return c
c = Drop.Nodes.Node.NpuNumberForDropStats.NpuNumberForDropStat()
c.parent = self
local_reference_key = "ydk::seg::%s" % segment_path
self._local_refs[local_reference_key] = c
self.npu_number_for_drop_stat.append(c)
return c
return None
def has_leaf_or_child_of_name(self, name):
if(name == "npu-number-for-drop-stat"):
return True
return False
def set_value(self, value_path, value, name_space, name_space_prefix):
pass
def has_data(self):
return (
self.node_name.is_set or
(self.npu_number_for_drop_stats is not None and self.npu_number_for_drop_stats.has_data()))
def has_operation(self):
return (
self.yfilter != YFilter.not_set or
self.node_name.yfilter != YFilter.not_set or
(self.npu_number_for_drop_stats is not None and self.npu_number_for_drop_stats.has_operation()))
def get_segment_path(self):
path_buffer = ""
path_buffer = "node" + "[node-name='" + self.node_name.get() + "']" + path_buffer
return path_buffer
def get_entity_path(self, ancestor):
path_buffer = ""
if (ancestor is None):
path_buffer = "Cisco-IOS-XR-fretta-bcm-dpa-drop-stats-oper:drop/nodes/%s" % self.get_segment_path()
else:
path_buffer = _get_relative_entity_path(self, ancestor, path_buffer)
leaf_name_data = LeafDataList()
if (self.node_name.is_set or self.node_name.yfilter != YFilter.not_set):
leaf_name_data.append(self.node_name.get_name_leafdata())
entity_path = EntityPath(path_buffer, leaf_name_data)
return entity_path
def get_child_by_name(self, child_yang_name, segment_path):
child = self._get_child_by_seg_name([child_yang_name, segment_path])
if child is not None:
return child
if (child_yang_name == "npu-number-for-drop-stats"):
if (self.npu_number_for_drop_stats is None):
self.npu_number_for_drop_stats = Drop.Nodes.Node.NpuNumberForDropStats()
self.npu_number_for_drop_stats.parent = self
self._children_name_map["npu_number_for_drop_stats"] = "npu-number-for-drop-stats"
return self.npu_number_for_drop_stats
return None
def has_leaf_or_child_of_name(self, name):
if(name == "npu-number-for-drop-stats" or name == "node-name"):
return True
return False
def set_value(self, value_path, value, name_space, name_space_prefix):
if(value_path == "node-name"):
self.node_name = value
self.node_name.value_namespace = name_space
self.node_name.value_namespace_prefix = name_space_prefix
def has_data(self):
for c in self.node:
if (c.has_data()):
return True
return False
def has_operation(self):
for c in self.node:
if (c.has_operation()):
return True
return self.yfilter != YFilter.not_set
def get_segment_path(self):
path_buffer = ""
path_buffer = "nodes" + path_buffer
return path_buffer
def get_entity_path(self, ancestor):
path_buffer = ""
if (ancestor is None):
path_buffer = "Cisco-IOS-XR-fretta-bcm-dpa-drop-stats-oper:drop/%s" % self.get_segment_path()
else:
path_buffer = _get_relative_entity_path(self, ancestor, path_buffer)
leaf_name_data = LeafDataList()
entity_path = EntityPath(path_buffer, leaf_name_data)
return entity_path
def get_child_by_name(self, child_yang_name, segment_path):
child = self._get_child_by_seg_name([child_yang_name, segment_path])
if child is not None:
return child
if (child_yang_name == "node"):
for c in self.node:
segment = c.get_segment_path()
if (segment_path == segment):
return c
c = Drop.Nodes.Node()
c.parent = self
local_reference_key = "ydk::seg::%s" % segment_path
self._local_refs[local_reference_key] = c
self.node.append(c)
return c
return None
def has_leaf_or_child_of_name(self, name):
if(name == "node"):
return True
return False
def set_value(self, value_path, value, name_space, name_space_prefix):
pass
def has_data(self):
return (self.nodes is not None and self.nodes.has_data())
def has_operation(self):
return (
self.yfilter != YFilter.not_set or
(self.nodes is not None and self.nodes.has_operation()))
def get_segment_path(self):
path_buffer = ""
path_buffer = "Cisco-IOS-XR-fretta-bcm-dpa-drop-stats-oper:drop" + path_buffer
return path_buffer
def get_entity_path(self, ancestor):
path_buffer = ""
if (not ancestor is None):
raise YPYModelError("ancestor has to be None for top-level node")
path_buffer = self.get_segment_path()
leaf_name_data = LeafDataList()
entity_path = EntityPath(path_buffer, leaf_name_data)
return entity_path
def get_child_by_name(self, child_yang_name, segment_path):
child = self._get_child_by_seg_name([child_yang_name, segment_path])
if child is not None:
return child
if (child_yang_name == "nodes"):
if (self.nodes is None):
self.nodes = Drop.Nodes()
self.nodes.parent = self
self._children_name_map["nodes"] = "nodes"
return self.nodes
return None
def has_leaf_or_child_of_name(self, name):
if(name == "nodes"):
return True
return False
def set_value(self, value_path, value, name_space, name_space_prefix):
pass
def clone_ptr(self):
self._top_entity = Drop()
return self._top_entity
|
[
"[email protected]"
] | |
aabc884e84fcfc59e35fc4a13e9012ad096792c4
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03464/s278760514.py
|
08616e0b58be34c3cb585dbb67b7ff087e21dabd
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 322 |
py
|
n=int(input())
arr=list(map(int,input().split()))
arr=arr[::-1]
if arr[0]!=2:
print(-1)
else:
arr=arr[1:]
l=2
r=3
for val in arr:
if (l<=val*(l//val)<=r) or (l<=val*(r//val)<=r):
if l%val!=0:
l=val*(l//val+1)
r=val*(r//val+1)-1
else:
print(-1)
break
else:
print(l,r)
|
[
"[email protected]"
] | |
09202c09b7522bbd53167ab063440db35dd0fec8
|
44eb40bf7bbd006f441b22d149dbb06eebe97506
|
/src/chap02/04_XOR_gate.py
|
566ded8dc57e75ea16bfa6a035fade75e11729bb
|
[] |
no_license
|
hoonest/Deep_Learning
|
56939f983c81e75b79d5474c11649dd57bf7107b
|
dd94f46ff886f20a47b09a54593e5fd2d53f0ed4
|
refs/heads/master
| 2020-04-19T22:52:03.640247 | 2019-02-19T03:34:16 | 2019-02-19T03:34:16 | 168,481,590 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 944 |
py
|
import numpy as np
# from 02_AND_bias import AND
# AND gate
def AND(x1, x2):
x = np.array([x1, x2])
w = np.array([0.5, 0.5])
b = -0.7
tmp = np.sum(w * x) + b
if tmp <= 0:
return 0
else:
return 1
# NAND Gate
def NAND(x1, x2):
x = np.array([x1, x2])
w = np.array([-0.5, -0.5])
bias = 0.7
tmp = np.sum(w * x) + bias
if tmp <= 0:
return 0
else:
return 1
# OR Gate
def OR(x1, x2):
w = np.array([0.5, 0.5]) # AND와는 모두 같은 구조의 피셉트론
x = np.array([x1, x2])
b = -0.2
tmp = np.sum(w * x) + b
if tmp <= 0:
return 0
else:
return 1
# XOR Gate
def XOR(x1, x2):
s1 = NAND(x1, x2)
s2 = OR(x1, x2)
y = AND(s1, s2)
return y
if __name__ == "__main__":
x = XOR(0, 0) # 0
print(x)
x = XOR(0, 1) # 1
print(x)
x = XOR(1, 0) # 1
print(x)
x = XOR(1, 1) # 0
print(x)
|
[
"[email protected]"
] | |
ca42e2d3e0a4b22c42b77adcacf1ad86043a4836
|
ca33d58a472233f8a55b6a0e5571448694b8398a
|
/typeCooker/TypeCooker.roboFontExt/lib/typeCookerData.py
|
9c6fcfeb335e423572e4f73514ca45dd64df3a5f
|
[
"MIT"
] |
permissive
|
BlackFoundry/RoboFontExtensions
|
bd1ee80a9c9d5b6bb00d05d758cfc3b581db9483
|
e4a163fbd5e174a8e1270a2331f02337ce980afe
|
refs/heads/master
| 2021-04-03T08:37:31.325520 | 2018-03-11T20:35:09 | 2018-03-11T20:35:09 | 124,796,892 | 0 | 0 |
MIT
| 2018-03-11T20:33:18 | 2018-03-11T20:33:18 | null |
UTF-8
|
Python
| false | false | 30,876 |
py
|
parametersData = {
'keys': [
'width',
'weight',
'construction',
'stroke endings',
'ascender',
'descender',
'contrast type',
'contrast amount',
'stems',
'intended application',
'intended size',
'special'
],
'construction': [
{
'weight': 10,
'name': "roman",
'url': "parameters.html#construction",
'level': 1
},
{
'weight': 10,
'name': "capitals",
'url': "parameters.html#construction",
'level': 1
},
{
'weight': 10,
'name': "italic",
'url': "parameters.html#construction",
'level': 2
},
{
'weight': 2,
'name': "caps and smallcaps",
'url': "parameters.html#construction",
'level': 4
},
{
'weight': 5,
'name': "roman + capitals",
'url': "parameters.html#construction",
'level': 3
},
{
'weight': 5,
'name': "italic + capitals",
'url': "parameters.html#construction",
'level': 3
},
{
'weight': 2,
'name': "proportional oldstyle figures",
'url': "parameters.html#construction",
'level': 3
},
{
'weight': 2,
'name': "tabular oldstyle figures",
'url': "parameters.html#construction",
'level': 3
},
{
'weight': 2,
'name': "proportional lining figures",
'url': "parameters.html#construction",
'level': 3
},
{
'weight': 2,
'name': "tabular lining figures",
'url': "parameters.html#construction",
'level': 3
},
{
'weight': 2,
'name': "smallcaps figures",
'url': "parameters.html#construction",
'level': 3
},
{
'weight': 1,
'name': "nothing special",
'url': "parameters.html#construction",
'level': 1
}
],
'ascender': [
{
'weight': 5,
'name': "longer than normal",
'url': "parameters.html#ascender",
'level': 3
},
{
'weight': 5,
'name': "shorter than normal",
'url': "parameters.html#ascender",
'level': 3
},
{
'weight': 2,
'name': "much shorter than normal",
'url': "parameters.html#ascender",
'level': 4
},
{
'weight': 2,
'name': "much longer than normal",
'url': "parameters.html#ascender",
'level': 4
},
{
'weight': 1,
'name': "none at all",
'url': "parameters.html#ascender",
'level': 5
}
],
'descender': [
{
'weight': 5,
'name': "longer than normal",
'url': "parameters.html#descender",
'level': 3
},
{
'weight': 5,
'name': "shorter than normal",
'url': "parameters.html#descender",
'level': 3
},
{
'weight': 2,
'name': "much shorter than normal",
'url': "parameters.html#descender",
'level': 4
},
{
'weight': 1,
'name': "none",
'url': "parameters.html#descender",
'level': 5
}
],
'width': [
{
'weight': 2,
'name': "compressed",
'url': "parameters.html#width",
'level': 4
},
{
'weight': 3,
'name': "extra condensed",
'url': "parameters.html#width",
'level': 3
},
{
'weight': 3,
'name': "condensed",
'url': "parameters.html#width",
'level': 2
},
{
'weight': 4,
'name': "narrow",
'url': "parameters.html#width",
'level': 1
},
{
'weight': 5,
'name': "normal",
'url': "parameters.html#width",
'level': 1
},
{
'weight': 4,
'name': "extended",
'url': "parameters.html#width",
'level': 1
},
{
'weight': 3,
'name': "wide",
'url': "parameters.html#width",
'level': 2
},
{
'weight': 2,
'name': "very wide",
'url': "parameters.html#width",
'level': 2
},
{
'weight': 2,
'name': "monospaced",
'url': "parameters.html#width",
'level': 4
},
{
'weight': 1,
'name': "extremely wide",
'url': "parameters.html#width",
'level': 3
}
],
'contrast type': [
{
'weight': 5,
'name': "translation (broad nib)",
'url': "parameters.html#contrasttype",
'level': 2
},
{
'weight': 5,
'name': "expansion (pointed nib)",
'url': "parameters.html#contrasttype",
'level': 2
},
{
'weight': 5,
'name': "transitional",
'url': "parameters.html#contrasttype",
'level': 4
},
{
'weight': 5,
'name': "between translation and transitional",
'url': "parameters.html#contrasttype",
'level': 4
},
{
'weight': 5,
'name': "between expansion and transitional",
'url': "parameters.html#contrasttype",
'level': 4
},
{
'weight': 2,
'name': "speedball",
'url': "parameters.html#contrasttype",
'level': 4
},
{
'weight': 2,
'name': "brush",
'url': "parameters.html#contrasttype",
'level': 4
},
{
'weight': 2,
'name': "can't be determined",
'url': "parameters.html#contrasttype",
'level': 5
}
],
'contrast amount': [
{
'weight': 10,
'name': "inverted contrast",
'url': "parameters.html#contrastamount",
'level': 5
},
{
'weight': 10,
'name': "slightly inverted contrast",
'url': "parameters.html#contrastamount",
'level': 5
},
{
'weight': 10,
'name': "no contrast at all (thick == thin)",
'url': "parameters.html#contrastamount",
'level': 4
},
{
'weight': 10,
'name': "no visible contrast",
'url': "parameters.html#contrastamount",
'level': 3
},
{
'weight': 10,
'name': "very low contrast",
'url': "parameters.html#contrastamount",
'level': 3
},
{
'weight': 10,
'name': "low contrast",
'url': "parameters.html#contrastamount",
'level': 1
},
{
'weight': 10,
'name': "some contrast",
'url': "parameters.html#contrastamount",
'level': 1
},
{
'weight': 10,
'name': "visible contrast",
'url': "parameters.html#contrastamount",
'level': 2
},
{
'weight': 10,
'name': "quite some contrast",
'url': "parameters.html#contrastamount",
'level': 2
},
{
'weight': 10,
'name': "a lot of contrast",
'url': "parameters.html#contrastamount",
'level': 1
},
{
'weight': 10,
'name': "high contrast",
'url': "parameters.html#contrastamount",
'level': 2
},
{
'weight': 10,
'name': "very high contrast",
'url': "parameters.html#contrastamount",
'level': 4
},
{
'weight': 10,
'name': "extreme contrast",
'url': "parameters.html#contrastamount",
'level': 5
}
],
'stems': [
{
'weight': 10,
'name': "straight",
'url': "parameters.html#stems",
'level': 3
},
{
'weight': 10,
'name': "slightly concave",
'url': "parameters.html#stems",
'level': 3
},
{
'weight': 10,
'name': "visibly concave",
'url': "parameters.html#stems",
'level': 4
},
{
'weight': 10,
'name': "flaring",
'url': "parameters.html#stems",
'level': 4
},
{
'weight': 10,
'name': "convex",
'url': "parameters.html#stems",
'level': 5
}
],
'stroke endings': [
{
'weight': 10,
'name': "straight, no serif",
'url': "parameters.html#strokeendings",
'level': 1
},
{
'weight': 10,
'name': "a serif",
'url': "parameters.html#strokeendings",
'level': 1
},
{
'weight': 5,
'name': "rounded, no serif",
'url': "parameters.html#strokeendings",
'level': 3
},
{
'weight': 5,
'name': "serif with bracketing",
'url': "parameters.html#strokeendings",
'level': 3
},
{
'weight': 5,
'name': "asymmetric serif",
'url': "parameters.html#strokeendings",
'level': 4
},
{
'weight': 10,
'name': "wedge shaped serif",
'url': "parameters.html#strokeendings",
'level': 4
},
{
'weight': 5,
'name': "slab shaped serif",
'url': "parameters.html#strokeendings",
'level': 3
}
],
'weight': [
{
'weight': 2,
'name': "hairline",
'url': "parameters.html#strokeweight",
'level': 4
},
{
'weight': 3,
'name': "very thin",
'url': "parameters.html#strokeweight",
'level': 4
},
{
'weight': 4,
'name': "thin",
'url': "parameters.html#strokeweight",
'level': 3
},
{
'weight': 5,
'name': "extra light",
'url': "parameters.html#strokeweight",
'level': 3
},
{
'weight': 5,
'name': "light",
'url': "parameters.html#strokeweight",
'level': 1
},
{
'weight': 6,
'name': "book",
'url': "parameters.html#strokeweight",
'level': 2
},
{
'weight': 7,
'name': "plain",
'url': "parameters.html#strokeweight",
'level': 1
},
{
'weight': 6,
'name': "medium",
'url': "parameters.html#strokeweight",
'level': 3
},
{
'weight': 5,
'name': "semi bold",
'url': "parameters.html#strokeweight",
'level': 3
},
{
'weight': 4,
'name': "bold",
'url': "parameters.html#strokeweight",
'level': 1
},
{
'weight': 3,
'name': "extra bold",
'url': "parameters.html#strokeweight",
'level': 3
},
{
'weight': 2,
'name': "black",
'url': "parameters.html#strokeweight",
'level': 4
},
],
'intended application': [
{
'weight': 2,
'name': "unknown",
'url': "parameters.html#intendedapplication",
'level': 3
},
{
'weight': 10,
'name': "multi-purpose",
'url': "parameters.html#intendedapplication",
'level': 3
},
{
'weight': 10,
'name': "newsprint",
'url': "parameters.html#intendedapplication",
'level': 3
},
{
'weight': 10,
'name': "smooth offset printing",
'url': "parameters.html#intendedapplication",
'level': 3
},
{
'weight': 5,
'name': "engraving",
'url': "parameters.html#intendedapplication",
'level': 4
},
{
'weight': 10,
'name': "signage",
'url': "parameters.html#intendedapplication",
'level': 3
},
{
'weight': 5,
'name': "packaging",
'url': "parameters.html#intendedapplication",
'level': 4
},
{
'weight': 2,
'name': "subtitles on television",
'url': "parameters.html#intendedapplication",
'level': 5
},
{
'weight': 2,
'name': "antialiased bitmaps",
'url': "parameters.html#intendedapplication",
'level': 5
},
{
'weight': 2,
'name': "rubber stamps",
'url': "parameters.html#intendedapplication",
'level': 5
}
],
'intended size': [
{
'weight': 2,
'name': "use very small",
'url': "parameters.html#intendedsize",
'level': 4
},
{
'weight': 4,
'name': "reading sizes",
'url': "parameters.html#intendedsize",
'level': 4
},
{
'weight': 8,
'name': "display sizes",
'url': "parameters.html#intendedsize",
'level': 4
},
{
'weight': 4,
'name': "very large sizes",
'url': "parameters.html#intendedsize",
'level': 4
},
{
'weight': 4,
'name': "most sizes",
'url': "parameters.html#intendedsize",
'level': 4
}
],
'special': [
{
'weight': 5,
'name': "use only straight lines",
'url': "parameters.html#special",
'level': 4
},
{
'weight': 5,
'name': "draw curves as octagonals",
'url': "parameters.html#special",
'level': 4
},
{
'weight': 2,
'name': "rough contours",
'url': "parameters.html#special",
'level': 4
},
{
'weight': 2,
'name': "casual",
'url': "parameters.html#special",
'level': 4
},
{
'weight': 2,
'name': "sketchy",
'url': "parameters.html#special",
'level': 4
},
{
'weight': 5,
'name': "cut as a stencil",
'url': "parameters.html#special",
'level': 4
},
{
'weight': 8,
'name': "must contains at least 1 ligature",
'url': "parameters.html#special",
'level': 4
},
{
'weight': 8,
'name': "must contains at least 2 ligatures",
'url': "parameters.html#special",
'level': 4
},
{
'weight': 4,
'name': "inktraps for inside corners (white)",
'url': "parameters.html#special",
'level': 4
},
{
'weight': 4,
'name': "inktraps for outside corners (black)",
'url': "parameters.html#special",
'level': 4
},
{
'weight': 5,
'name': "initial and terminal swashes",
'url': "parameters.html#special",
'level': 4
}
]
}
stylesData = {
'name':"The one with Styles",
'description': "Draw key glyphs of the typeface, as if they were drawn by a different designer.",
'keys': [
'typeface',
'as drawn by',
'but as',
'and'
],
'typeface': [
{
'weight': 10,
'name': "Times",
"url": "http://www.identifont.com/find?font=times&q=Go",
'level': 1
},
{
'weight': 10,
'name': "Helvetica",
"url": "http://www.identifont.com/find?font=Helvetica&q=Go",
'level': 1
},
{
'weight': 10,
'name': "Akzidenz Grotesk",
"url": "http://www.identifont.com/find?font=Akzidenz+Grotesk&q=Go",
'level': 2
},
{
'weight': 10,
'name': "Garamond",
"url": "http://www.identifont.com/find?font=Garamond&q=Go",
'level': 3
},
{
'weight': 10,
'name': "Palatino",
'url': "http://www.identifont.com/find?font=palatino&q=Go",
'level': 2
},
{
'weight': 10,
'name': "Univers",
'url': "http://www.identifont.com/find?font=univers&q=Go",
'level': 1
},
{
'weight': 10,
'name': "Meta",
'url': "http://www.identifont.com/find?font=meta&q=Go",
'level': 2
},
{
'weight': 10,
'name': "a modern",
'url': "http://www.identifont.com/find?font=century&q=Go",
'level': 3
},
{
'weight': 10,
'name': "an oldstyle",
'url': "http://www.identifont.com/find?font=oldstyle&q=Go",
'level': 3
},
{
'weight': 10,
'name': "a gothic",
'url': "http://www.identifont.com/find?font=gothic&q=Go",
'level': 3
},
{
'weight': 10,
'name': "a blackletter",
'url': "http://www.identifont.com/find?font=blackletter&q=Go",
'level': 3
},
{
'weight': 10,
'name': "Courier",
'url': "http://www.identifont.com/find?font=courier&q=Go",
'level': 1
}
],
'as drawn by': [
{
'weight': 10,
'name': "Robert Granjon",
'url': "http://en.wikipedia.org/wiki/Granjon",
'level': 1
},
{
'weight': 10,
'name': "Erik Spiekermann",
'url': "http://en.wikipedia.org/wiki/Erik_Spiekermann",
'level': 1
},
{
'weight': 10,
'name': "Miedinger & Hoffman",
'url': "http://en.wikipedia.org/wiki/Max_Miedinger",
'level': 1
},
{
'weight': 10,
'name': "Roger Excoffon",
'url': "http://en.wikipedia.org/wiki/Roger_Excoffon",
'level': 1
},
{
'weight': 10,
'name': "Hermann Zapf",
'url':"http://en.wikipedia.org/wiki/Hermann_Zapf",
'level': 2
},
{
'weight': 10,
'name': "Giambattista Bodoni",
'url': "http://en.wikipedia.org/wiki/Giambattista_Bodoni",
'level': 1
},
{
'weight': 10,
'name': "Jan van Krimpen",
'url': "http://en.wikipedia.org/wiki/Jan_van_Krimpen",
'level': 1
},
{
'weight': 10,
'name': "Gerrit Noordzij",
'url': "http://letterror.com/noordzij/",
'level': 1
},
{
'weight': 10,
'name': "Adrian Frutiger",
'url': "http://en.wikipedia.org/wiki/Frutiger",
'level': 2
},
{
'weight': 10,
'name': "Paul Renner",
'url': "http://en.wikipedia.org/wiki/Paul_Renner",
'level': 2
},
{
'weight': 10,
'name': "Wim Crouwel",
'url': "http://en.wikipedia.org/wiki/Wim_Crouwel",
'level': 4
},
{
'weight': 10,
'name': "William Caslon",
'url': "http://en.wikipedia.org/wiki/William_Caslon",
'level': 4
},
{
'weight': 10,
'name': "Gerard Unger",
'url': "http://en.wikipedia.org/wiki/Gerard_Unger",
'level': 4
},
{
'weight': 10,
'name': "Matthew Carter",
'url': "http://en.wikipedia.org/wiki/Matthew_Carter",
'level': 5
},
{
'weight': 10,
'name': "Luc(as) de Groot",
'url': "http://en.wikipedia.org/wiki/Lucas_de_Groot",
'level': 4
},
],
'but as': [
{
'weight': 10,
'name': "a geometric sans",
'level': 3
},
{
'weight': 10,
'name': "a humanist sans",
'level': 3
},
{
'weight': 10,
'name': "an antiqua",
'level': 4
},
{
'weight': 10,
'name': "a grunge face",
'level': 4
},
{
'weight': 10,
'name': "a titling face",
'level': 4
},
{
'weight': 10,
'name': "a display face",
'level': 4
},
{
'weight': 10,
'name': "a script",
'level': 4
},
{
'weight': 10,
'name': "a typewriter face",
'level': 4
},
{
'weight': 10,
'name': "a bitmap",
'level': 4
},
{
'weight': 10,
'name': "a smallcap face",
'level': 4
}
],
'and': [
{
'weight': 10,
'name': "monospaced",
'level': 4
},
{
'weight': 10,
'name': "swash",
'level': 4
}
]
}
|
[
"[email protected]"
] | |
ae349ac80acca792276ae618b437905cce9acb03
|
945d957bde025c0aa96df08d151252c3d2b682cb
|
/dynamicNetworkConfig/transport/wsgi/middleware/__init__.py
|
ffd513f3f042ae197e107ffc17b10c682bbcccae
|
[
"Apache-2.0"
] |
permissive
|
ClockwerksSoftware/dynamicNetworkConfig
|
85d8c16d44dbcd8361dba17fe01d5474d2e3a7c5
|
c785f437edb912be0915822184d3967c71225849
|
refs/heads/master
| 2021-01-19T12:51:56.336000 | 2017-02-25T19:36:37 | 2017-02-25T19:36:37 | 82,368,281 | 0 | 1 | null | 2017-02-25T19:36:38 | 2017-02-18T07:00:42 | null |
UTF-8
|
Python
| false | false | 190 |
py
|
from dynamicNetworkConfig.transport.wsgi.middleware.auth import (
AuthenticationMiddleware
)
from dynamicNetworkConfig.transport.wsgi.middleware.context import (
ContextMiddleware
)
|
[
"[email protected]"
] | |
b4b1aae7471ab0b781047b9e07c70747713c5a23
|
2635edb96afa8117d4584a470061e447b79adc6e
|
/mybook/urls.py
|
e264e1f09a359e34ec1c24f8104f5d3d73414430
|
[] |
no_license
|
Mark-Seaman/Sensei-2018
|
673609731ecb5ebb782dab94b2cf3d7c22940424
|
06b02892cfe1bf1d25cb4224e86eb693c82b0f29
|
refs/heads/master
| 2022-02-18T19:14:10.343093 | 2022-01-15T20:06:21 | 2022-01-15T20:06:21 | 158,728,468 | 0 | 0 | null | 2022-01-16T21:06:09 | 2018-11-22T16:51:55 |
HTML
|
UTF-8
|
Python
| false | false | 1,778 |
py
|
from django.conf.urls import url
from django.contrib.auth import login, logout
from .seaman import DocFileIndex, DocList, Leverage, MarkSeaman, PrivateDoc, SeamansLog
from .guide import SeamansGuide
from .views import *
from .spiritual import SpiritualDoc, SpiritualSelect
urlpatterns = [
# Documents
url(r'^$', DocRoot.as_view()),
url(r'^(?P<title>[\w/\-_.]*)/Missing$', DocMissing.as_view()),
url(r'^(?P<title>[\w/\-_.]*)/Random$', DocRandom.as_view()),
url(r'^(?P<title>[\w/\-_.]*)/List$', DocList.as_view()),
url(r'^(?P<title>[\w/\-_.]*)/Files$', DocFileIndex.as_view()),
# Authentication
# url(r'^login', login, name='login'),
# url(r'^logout$', logout, {'next_page': '/login'}),
# MarkSeaman
#url(r'^MarkSeaman/booknotes/(?P<title>[\w/\-.]*)$', BookNotes.as_view()),
url(r'MarkSeaman/(?P<title>[\w/\-.]*)$', MarkSeaman.as_view()),
# Guide
url(r'^guide/(?P<title>[\w/\-_.]*)$', SeamansGuide.as_view()),
# Private Pages
url(r'^info/(?P<title>[\w/\-_.]*)$', PrivateDoc.as_view()),
# Seaman's Log
url(r'^seamanslog$', SeamansLog.as_view()),
url(r'^seamanslog/(?P<title>[\w/\-_.]*)$', SeamansLog.as_view()),
# Shrinking World
url(r'shrinkingworld/Leverage/(?P<title>[\w/\-.]*)$', Leverage.as_view()),
# Spiritual
url(r'^spiritual/Index$', SpiritualDoc.as_view()),
url(r'^spiritual/(?P<title>[\w\-_.]*)$', SpiritualSelect.as_view()),
url(r'^spiritual/(?P<title>[\w/\-_.]*)$', SpiritualDoc.as_view()),
# Documents
url(r'^(?P<title>[\w/\-_.]*)$', DocDisplay.as_view()),
]
|
[
"[email protected]"
] | |
e19a2f8a101c73150cc1964bcf8808cb147c50a7
|
99d17ddba93db1105e8941b1b592d9d9e22864fb
|
/tests/integration_tests/dashboards/commands_tests.py
|
d382a5f50d1b27f4e29c826805ac43d2ef198793
|
[
"Apache-2.0",
"OFL-1.1"
] |
permissive
|
apache-superset/incubator-superset
|
f376dc15d6e2187d0b65a0dc5476d6c6c3378f21
|
0945d4a2f46667aebb9b93d0d7685215627ad237
|
refs/heads/master
| 2023-03-15T04:12:40.478792 | 2022-07-25T14:44:43 | 2022-07-25T14:44:43 | 146,225,581 | 21 | 20 |
Apache-2.0
| 2023-03-13T16:00:14 | 2018-08-26T23:56:08 |
TypeScript
|
UTF-8
|
Python
| false | false | 26,088 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import itertools
import json
from unittest.mock import MagicMock, patch
import pytest
import yaml
from werkzeug.utils import secure_filename
from superset import db, security_manager
from superset.commands.exceptions import CommandInvalidError
from superset.commands.importers.exceptions import IncorrectVersionError
from superset.connectors.sqla.models import SqlaTable
from superset.dashboards.commands.exceptions import DashboardNotFoundError
from superset.dashboards.commands.export import (
append_charts,
ExportDashboardsCommand,
get_default_position,
)
from superset.dashboards.commands.importers import v0, v1
from superset.models.core import Database
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
from tests.integration_tests.base_tests import SupersetTestCase
from tests.integration_tests.fixtures.importexport import (
chart_config,
dashboard_config,
dashboard_export,
dashboard_metadata_config,
database_config,
dataset_config,
dataset_metadata_config,
)
from tests.integration_tests.fixtures.world_bank_dashboard import (
load_world_bank_dashboard_with_slices,
load_world_bank_data,
)
class TestExportDashboardsCommand(SupersetTestCase):
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
@patch("superset.security.manager.g")
@patch("superset.views.base.g")
def test_export_dashboard_command(self, mock_g1, mock_g2):
mock_g1.user = security_manager.find_user("admin")
mock_g2.user = security_manager.find_user("admin")
example_dashboard = (
db.session.query(Dashboard).filter_by(slug="world_health").one()
)
command = ExportDashboardsCommand([example_dashboard.id])
contents = dict(command.run())
expected_paths = {
"metadata.yaml",
f"dashboards/World_Banks_Data_{example_dashboard.id}.yaml",
"datasets/examples/wb_health_population.yaml",
"databases/examples.yaml",
}
for chart in example_dashboard.slices:
chart_slug = secure_filename(chart.slice_name)
expected_paths.add(f"charts/{chart_slug}_{chart.id}.yaml")
assert expected_paths == set(contents.keys())
metadata = yaml.safe_load(
contents[f"dashboards/World_Banks_Data_{example_dashboard.id}.yaml"]
)
# remove chart UUIDs from metadata so we can compare
for chart_info in metadata["position"].values():
if isinstance(chart_info, dict) and "uuid" in chart_info.get("meta", {}):
del chart_info["meta"]["chartId"]
del chart_info["meta"]["uuid"]
assert metadata == {
"dashboard_title": "World Bank's Data",
"description": None,
"css": None,
"slug": "world_health",
"uuid": str(example_dashboard.uuid),
"position": {
"CHART-36bfc934": {
"children": [],
"id": "CHART-36bfc934",
"meta": {"height": 25, "sliceName": "Region Filter", "width": 2},
"type": "CHART",
},
"CHART-37982887": {
"children": [],
"id": "CHART-37982887",
"meta": {
"height": 25,
"sliceName": "World's Population",
"width": 2,
},
"type": "CHART",
},
"CHART-17e0f8d8": {
"children": [],
"id": "CHART-17e0f8d8",
"meta": {
"height": 92,
"sliceName": "Most Populated Countries",
"width": 3,
},
"type": "CHART",
},
"CHART-2ee52f30": {
"children": [],
"id": "CHART-2ee52f30",
"meta": {"height": 38, "sliceName": "Growth Rate", "width": 6},
"type": "CHART",
},
"CHART-2d5b6871": {
"children": [],
"id": "CHART-2d5b6871",
"meta": {"height": 52, "sliceName": "% Rural", "width": 7},
"type": "CHART",
},
"CHART-0fd0d252": {
"children": [],
"id": "CHART-0fd0d252",
"meta": {
"height": 50,
"sliceName": "Life Expectancy VS Rural %",
"width": 8,
},
"type": "CHART",
},
"CHART-97f4cb48": {
"children": [],
"id": "CHART-97f4cb48",
"meta": {"height": 38, "sliceName": "Rural Breakdown", "width": 3},
"type": "CHART",
},
"CHART-b5e05d6f": {
"children": [],
"id": "CHART-b5e05d6f",
"meta": {
"height": 50,
"sliceName": "World's Pop Growth",
"width": 4,
},
"type": "CHART",
},
"CHART-e76e9f5f": {
"children": [],
"id": "CHART-e76e9f5f",
"meta": {"height": 50, "sliceName": "Box plot", "width": 4},
"type": "CHART",
},
"CHART-a4808bba": {
"children": [],
"id": "CHART-a4808bba",
"meta": {"height": 50, "sliceName": "Treemap", "width": 8},
"type": "CHART",
},
"COLUMN-071bbbad": {
"children": ["ROW-1e064e3c", "ROW-afdefba9"],
"id": "COLUMN-071bbbad",
"meta": {"background": "BACKGROUND_TRANSPARENT", "width": 9},
"type": "COLUMN",
},
"COLUMN-fe3914b8": {
"children": ["CHART-36bfc934", "CHART-37982887"],
"id": "COLUMN-fe3914b8",
"meta": {"background": "BACKGROUND_TRANSPARENT", "width": 2},
"type": "COLUMN",
},
"GRID_ID": {
"children": ["ROW-46632bc2", "ROW-3fa26c5d", "ROW-812b3f13"],
"id": "GRID_ID",
"type": "GRID",
},
"HEADER_ID": {
"id": "HEADER_ID",
"meta": {"text": "World's Bank Data"},
"type": "HEADER",
},
"ROOT_ID": {"children": ["GRID_ID"], "id": "ROOT_ID", "type": "ROOT"},
"ROW-1e064e3c": {
"children": ["COLUMN-fe3914b8", "CHART-2d5b6871"],
"id": "ROW-1e064e3c",
"meta": {"background": "BACKGROUND_TRANSPARENT"},
"type": "ROW",
},
"ROW-3fa26c5d": {
"children": ["CHART-b5e05d6f", "CHART-0fd0d252"],
"id": "ROW-3fa26c5d",
"meta": {"background": "BACKGROUND_TRANSPARENT"},
"type": "ROW",
},
"ROW-46632bc2": {
"children": ["COLUMN-071bbbad", "CHART-17e0f8d8"],
"id": "ROW-46632bc2",
"meta": {"background": "BACKGROUND_TRANSPARENT"},
"type": "ROW",
},
"ROW-812b3f13": {
"children": ["CHART-a4808bba", "CHART-e76e9f5f"],
"id": "ROW-812b3f13",
"meta": {"background": "BACKGROUND_TRANSPARENT"},
"type": "ROW",
},
"ROW-afdefba9": {
"children": ["CHART-2ee52f30", "CHART-97f4cb48"],
"id": "ROW-afdefba9",
"meta": {"background": "BACKGROUND_TRANSPARENT"},
"type": "ROW",
},
"DASHBOARD_VERSION_KEY": "v2",
},
"metadata": {"mock_key": "mock_value"},
"version": "1.0.0",
}
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
@patch("superset.security.manager.g")
@patch("superset.views.base.g")
def test_export_dashboard_command_no_access(self, mock_g1, mock_g2):
"""Test that users can't export datasets they don't have access to"""
mock_g1.user = security_manager.find_user("gamma")
mock_g2.user = security_manager.find_user("gamma")
example_dashboard = (
db.session.query(Dashboard).filter_by(slug="world_health").one()
)
command = ExportDashboardsCommand([example_dashboard.id])
contents = command.run()
with self.assertRaises(DashboardNotFoundError):
next(contents)
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
@patch("superset.security.manager.g")
@patch("superset.views.base.g")
def test_export_dashboard_command_invalid_dataset(self, mock_g1, mock_g2):
"""Test that an error is raised when exporting an invalid dataset"""
mock_g1.user = security_manager.find_user("admin")
mock_g2.user = security_manager.find_user("admin")
command = ExportDashboardsCommand([-1])
contents = command.run()
with self.assertRaises(DashboardNotFoundError):
next(contents)
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
@patch("superset.security.manager.g")
@patch("superset.views.base.g")
def test_export_dashboard_command_key_order(self, mock_g1, mock_g2):
"""Test that they keys in the YAML have the same order as export_fields"""
mock_g1.user = security_manager.find_user("admin")
mock_g2.user = security_manager.find_user("admin")
example_dashboard = (
db.session.query(Dashboard).filter_by(slug="world_health").one()
)
command = ExportDashboardsCommand([example_dashboard.id])
contents = dict(command.run())
metadata = yaml.safe_load(
contents[f"dashboards/World_Banks_Data_{example_dashboard.id}.yaml"]
)
assert list(metadata.keys()) == [
"dashboard_title",
"description",
"css",
"slug",
"uuid",
"position",
"metadata",
"version",
]
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
@patch("superset.dashboards.commands.export.suffix")
def test_append_charts(self, mock_suffix):
"""Test that orphaned charts are added to the dashboard position"""
# return deterministic IDs
mock_suffix.side_effect = (str(i) for i in itertools.count(1))
position = get_default_position("example")
chart_1 = db.session.query(Slice).filter_by(slice_name="Region Filter").one()
new_position = append_charts(position, {chart_1})
assert new_position == {
"DASHBOARD_VERSION_KEY": "v2",
"ROOT_ID": {"children": ["GRID_ID"], "id": "ROOT_ID", "type": "ROOT"},
"GRID_ID": {
"children": ["ROW-N-2"],
"id": "GRID_ID",
"parents": ["ROOT_ID"],
"type": "GRID",
},
"HEADER_ID": {
"id": "HEADER_ID",
"meta": {"text": "example"},
"type": "HEADER",
},
"ROW-N-2": {
"children": ["CHART-1"],
"id": "ROW-N-2",
"meta": {"0": "ROOT_ID", "background": "BACKGROUND_TRANSPARENT"},
"type": "ROW",
"parents": ["ROOT_ID", "GRID_ID"],
},
"CHART-1": {
"children": [],
"id": "CHART-1",
"meta": {
"chartId": chart_1.id,
"height": 50,
"sliceName": "Region Filter",
"uuid": str(chart_1.uuid),
"width": 4,
},
"type": "CHART",
"parents": ["ROOT_ID", "GRID_ID", "ROW-N-2"],
},
}
chart_2 = (
db.session.query(Slice).filter_by(slice_name="World's Population").one()
)
new_position = append_charts(new_position, {chart_2})
assert new_position == {
"DASHBOARD_VERSION_KEY": "v2",
"ROOT_ID": {"children": ["GRID_ID"], "id": "ROOT_ID", "type": "ROOT"},
"GRID_ID": {
"children": ["ROW-N-2", "ROW-N-4"],
"id": "GRID_ID",
"parents": ["ROOT_ID"],
"type": "GRID",
},
"HEADER_ID": {
"id": "HEADER_ID",
"meta": {"text": "example"},
"type": "HEADER",
},
"ROW-N-2": {
"children": ["CHART-1"],
"id": "ROW-N-2",
"meta": {"0": "ROOT_ID", "background": "BACKGROUND_TRANSPARENT"},
"type": "ROW",
"parents": ["ROOT_ID", "GRID_ID"],
},
"ROW-N-4": {
"children": ["CHART-3"],
"id": "ROW-N-4",
"meta": {"0": "ROOT_ID", "background": "BACKGROUND_TRANSPARENT"},
"type": "ROW",
"parents": ["ROOT_ID", "GRID_ID"],
},
"CHART-1": {
"children": [],
"id": "CHART-1",
"meta": {
"chartId": chart_1.id,
"height": 50,
"sliceName": "Region Filter",
"uuid": str(chart_1.uuid),
"width": 4,
},
"type": "CHART",
"parents": ["ROOT_ID", "GRID_ID", "ROW-N-2"],
},
"CHART-3": {
"children": [],
"id": "CHART-3",
"meta": {
"chartId": chart_2.id,
"height": 50,
"sliceName": "World's Population",
"uuid": str(chart_2.uuid),
"width": 4,
},
"type": "CHART",
"parents": ["ROOT_ID", "GRID_ID", "ROW-N-4"],
},
}
position = {"DASHBOARD_VERSION_KEY": "v2"}
new_position = append_charts(position, [chart_1, chart_2])
assert new_position == {
"CHART-5": {
"children": [],
"id": "CHART-5",
"meta": {
"chartId": chart_1.id,
"height": 50,
"sliceName": "Region Filter",
"uuid": str(chart_1.uuid),
"width": 4,
},
"type": "CHART",
},
"CHART-6": {
"children": [],
"id": "CHART-6",
"meta": {
"chartId": chart_2.id,
"height": 50,
"sliceName": "World's Population",
"uuid": str(chart_2.uuid),
"width": 4,
},
"type": "CHART",
},
"DASHBOARD_VERSION_KEY": "v2",
}
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
@patch("superset.security.manager.g")
@patch("superset.views.base.g")
def test_export_dashboard_command_no_related(self, mock_g1, mock_g2):
"""
Test that only the dashboard is exported when export_related=False.
"""
mock_g1.user = security_manager.find_user("admin")
mock_g2.user = security_manager.find_user("admin")
example_dashboard = (
db.session.query(Dashboard).filter_by(slug="world_health").one()
)
command = ExportDashboardsCommand([example_dashboard.id], export_related=False)
contents = dict(command.run())
expected_paths = {
"metadata.yaml",
f"dashboards/World_Banks_Data_{example_dashboard.id}.yaml",
}
assert expected_paths == set(contents.keys())
class TestImportDashboardsCommand(SupersetTestCase):
def test_import_v0_dashboard_cli_export(self):
num_dashboards = db.session.query(Dashboard).count()
num_charts = db.session.query(Slice).count()
num_datasets = db.session.query(SqlaTable).count()
num_databases = db.session.query(Database).count()
contents = {
"20201119_181105.json": json.dumps(dashboard_export),
}
command = v0.ImportDashboardsCommand(contents)
command.run()
new_num_dashboards = db.session.query(Dashboard).count()
new_num_charts = db.session.query(Slice).count()
new_num_datasets = db.session.query(SqlaTable).count()
new_num_databases = db.session.query(Database).count()
assert new_num_dashboards == num_dashboards + 1
assert new_num_charts == num_charts + 1
assert new_num_datasets == num_datasets + 1
assert new_num_databases == num_databases
dashboard = (
db.session.query(Dashboard).filter_by(dashboard_title="Births 2").one()
)
assert len(dashboard.slices) == 1
chart = dashboard.slices[0]
assert chart.slice_name == "Number of California Births"
dataset = chart.table
assert dataset.table_name == "birth_names_2"
database = dataset.database
assert database.database_name == "examples"
db.session.delete(dashboard)
db.session.delete(chart)
db.session.delete(dataset)
db.session.commit()
@patch("superset.dashboards.commands.importers.v1.utils.g")
def test_import_v1_dashboard(self, mock_g):
"""Test that we can import a dashboard"""
mock_g.user = security_manager.find_user("admin")
contents = {
"metadata.yaml": yaml.safe_dump(dashboard_metadata_config),
"databases/imported_database.yaml": yaml.safe_dump(database_config),
"datasets/imported_dataset.yaml": yaml.safe_dump(dataset_config),
"charts/imported_chart.yaml": yaml.safe_dump(chart_config),
"dashboards/imported_dashboard.yaml": yaml.safe_dump(dashboard_config),
}
command = v1.ImportDashboardsCommand(contents)
command.run()
dashboard = (
db.session.query(Dashboard).filter_by(uuid=dashboard_config["uuid"]).one()
)
assert len(dashboard.slices) == 1
chart = dashboard.slices[0]
assert str(chart.uuid) == chart_config["uuid"]
new_chart_id = chart.id
assert dashboard.dashboard_title == "Test dash"
assert dashboard.description is None
assert dashboard.css == ""
assert dashboard.slug is None
assert json.loads(dashboard.position_json) == {
"CHART-SVAlICPOSJ": {
"children": [],
"id": "CHART-SVAlICPOSJ",
"meta": {
"chartId": new_chart_id,
"height": 50,
"sliceName": "Number of California Births",
"uuid": "0c23747a-6528-4629-97bf-e4b78d3b9df1",
"width": 4,
},
"parents": ["ROOT_ID", "GRID_ID", "ROW-dP_CHaK2q"],
"type": "CHART",
},
"DASHBOARD_VERSION_KEY": "v2",
"GRID_ID": {
"children": ["ROW-dP_CHaK2q"],
"id": "GRID_ID",
"parents": ["ROOT_ID"],
"type": "GRID",
},
"HEADER_ID": {
"id": "HEADER_ID",
"meta": {"text": "Test dash"},
"type": "HEADER",
},
"ROOT_ID": {"children": ["GRID_ID"], "id": "ROOT_ID", "type": "ROOT"},
"ROW-dP_CHaK2q": {
"children": ["CHART-SVAlICPOSJ"],
"id": "ROW-dP_CHaK2q",
"meta": {"0": "ROOT_ID", "background": "BACKGROUND_TRANSPARENT"},
"parents": ["ROOT_ID", "GRID_ID"],
"type": "ROW",
},
}
assert json.loads(dashboard.json_metadata) == {
"color_scheme": None,
"default_filters": "{}",
"expanded_slices": {str(new_chart_id): True},
"filter_scopes": {
str(new_chart_id): {
"region": {"scope": ["ROOT_ID"], "immune": [new_chart_id]}
},
},
"import_time": 1604342885,
"refresh_frequency": 0,
"remote_id": 7,
"timed_refresh_immune_slices": [new_chart_id],
}
dataset = chart.table
assert str(dataset.uuid) == dataset_config["uuid"]
database = dataset.database
assert str(database.uuid) == database_config["uuid"]
assert dashboard.owners == [mock_g.user]
dashboard.owners = []
chart.owners = []
dataset.owners = []
database.owners = []
db.session.delete(dashboard)
db.session.delete(chart)
db.session.delete(dataset)
db.session.delete(database)
db.session.commit()
def test_import_v1_dashboard_multiple(self):
"""Test that a dashboard can be imported multiple times"""
num_dashboards = db.session.query(Dashboard).count()
contents = {
"metadata.yaml": yaml.safe_dump(dashboard_metadata_config),
"databases/imported_database.yaml": yaml.safe_dump(database_config),
"datasets/imported_dataset.yaml": yaml.safe_dump(dataset_config),
"charts/imported_chart.yaml": yaml.safe_dump(chart_config),
"dashboards/imported_dashboard.yaml": yaml.safe_dump(dashboard_config),
}
command = v1.ImportDashboardsCommand(contents, overwrite=True)
command.run()
command.run()
new_num_dashboards = db.session.query(Dashboard).count()
assert new_num_dashboards == num_dashboards + 1
dashboard = (
db.session.query(Dashboard).filter_by(uuid=dashboard_config["uuid"]).one()
)
chart = dashboard.slices[0]
dataset = chart.table
database = dataset.database
db.session.delete(dashboard)
db.session.delete(chart)
db.session.delete(dataset)
db.session.delete(database)
db.session.commit()
def test_import_v1_dashboard_validation(self):
"""Test different validations applied when importing a dashboard"""
# metadata.yaml must be present
contents = {
"databases/imported_database.yaml": yaml.safe_dump(database_config),
"datasets/imported_dataset.yaml": yaml.safe_dump(dataset_config),
"charts/imported_chart.yaml": yaml.safe_dump(chart_config),
"dashboards/imported_dashboard.yaml": yaml.safe_dump(dashboard_config),
}
command = v1.ImportDashboardsCommand(contents)
with pytest.raises(IncorrectVersionError) as excinfo:
command.run()
assert str(excinfo.value) == "Missing metadata.yaml"
# version should be 1.0.0
contents["metadata.yaml"] = yaml.safe_dump(
{
"version": "2.0.0",
"type": "Database",
"timestamp": "2020-11-04T21:27:44.423819+00:00",
}
)
command = v1.ImportDashboardsCommand(contents)
with pytest.raises(IncorrectVersionError) as excinfo:
command.run()
assert str(excinfo.value) == "Must be equal to 1.0.0."
# type should be Database
contents["metadata.yaml"] = yaml.safe_dump(dataset_metadata_config)
command = v1.ImportDashboardsCommand(contents)
with pytest.raises(CommandInvalidError) as excinfo:
command.run()
assert str(excinfo.value) == "Error importing dashboard"
assert excinfo.value.normalized_messages() == {
"metadata.yaml": {"type": ["Must be equal to Dashboard."]}
}
# must also validate datasets
broken_config = dataset_config.copy()
del broken_config["table_name"]
contents["metadata.yaml"] = yaml.safe_dump(dashboard_metadata_config)
contents["datasets/imported_dataset.yaml"] = yaml.safe_dump(broken_config)
command = v1.ImportDashboardsCommand(contents)
with pytest.raises(CommandInvalidError) as excinfo:
command.run()
assert str(excinfo.value) == "Error importing dashboard"
assert excinfo.value.normalized_messages() == {
"datasets/imported_dataset.yaml": {
"table_name": ["Missing data for required field."],
}
}
|
[
"[email protected]"
] | |
8d2fd166695b224a0c4a3383603705671eaf20b8
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03767/s405465871.py
|
e340c69019eb1ab19c7c4ffec2a97b3f548737f3
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 166 |
py
|
n = int(input())
a = sorted(map(int,input().split()),reverse=True)
ans = 0
t = (n*3)//3
cnt = 0
i = 1
while cnt<t:
cnt += 1
ans += a[i]
i += 2
print(ans)
|
[
"[email protected]"
] | |
7f93026775a2e9e95f6b22976a0c2f2e247b946a
|
930309163b930559929323647b8d82238724f392
|
/DSL_3_C.py
|
12248c6e6506b9376befc0c7bcdb09205d16bb8f
|
[] |
no_license
|
GINK03/atcoder-solvers
|
874251dffc9f23b187faa77c439b445e53f8dfe1
|
b1e7ac6e9d67938de9a85df4a2f9780fb1fbcee7
|
refs/heads/master
| 2021-11-07T14:16:52.138894 | 2021-09-12T13:32:29 | 2021-09-12T13:32:29 | 11,724,396 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,398 |
py
|
class Segtree:
n = 1
size = 1
log = 2
d = [0]
op = None
e = 10 ** 15
def __init__(self, V: "List", OP: "function", E: "基底"):
self.n = len(V)
self.op = OP
self.e = E
self.log = (self.n - 1).bit_length()
self.size = 1 << self.log
self.d = [E for i in range(2 * self.size)]
for i in range(self.n):
self.d[self.size + i] = V[i]
for i in range(self.size - 1, 0, -1):
self.update(i)
def set(self, p, x): # 1
assert 0 <= p and p < self.n
p += self.size
self.d[p] = x
[self.update(p >> i) for i in range(1, self.log + 1)]
def get(self, p): # 2
assert 0 <= p and p < self.n
return self.d[p + self.size]
def prod(self, l, r): # 3
assert 0 <= l and l <= r and r <= self.n
sml = smr = self.e
l += self.size; r += self.size
while l < r:
if l & 1:
sml = self.op(sml, self.d[l])
l += 1
if r & 1:
smr = self.op(self.d[r - 1], smr)
r -= 1
l >>= 1; r >>= 1
return self.op(sml, smr)
def all_prod(self): # 4
return self.d[1]
def max_right(self, l, f): # 5
assert 0 <= l and l <= self.n
assert f(self.e)
if l == self.n:
return self.n
l += self.size
sm = self.e
while 1:
while l % 2 == 0:
l >>= 1
if not (f(self.op(sm, self.d[l]))):
while l < self.size:
l = 2 * l
if f(self.op(sm, self.d[l])):
sm = self.op(sm, self.d[l])
l += 1
return l - self.size
sm = self.op(sm, self.d[l])
l += 1
if (l & -l) == l:
break
return self.n
def min_left(self, r, f): # 6
assert 0 <= r and r < self.n
assert f(self.e)
if r == 0:
return 0
r += self.size
sm = self.e
while 1:
r -= 1
while r > 1 & (r % 2):
r >>= 1
if not (f(self.op(self.d[r], sm))):
while r < self.size:
r = 2 * r + 1
if f(self.op(self.d[r], sm)):
sm = self.op(self.d[r], sm)
r -= 1
return r + 1 - self.size
sm = self.op(self.d[r], sm)
if (r & -r) == r:
break
return 0
def update(self, k): # 7
self.d[k] = self.op(self.d[2 * k], self.d[2 * k + 1])
N,Q=map(int,input().split())
*A,=map(int,input().split())
*X,=map(int,input().split())
'''
stree = Segtree(V=A, OP=lambda x,y:x+y, E=0)
import collections
cnt = collections.defaultdict(int)
for left in range(N):
for right in range(left, N):
p = stree.prod(left, right+1)
cnt[p] += 1
for x in X:
print(sum([f for p, f in cnt.items() if p <= x]))
'''
def shakutori(X):
left, right = 0,0 # 動かす変数
ans = 0
for left in range(N):
tmp = 0
right = left
while right < N and tmp + A[right] <= X:
tmp += A[right]
right += 1
ans += right - left
#ans += right - left + 1
print(ans)
for x in X:
shakutori(x)
|
[
"[email protected]"
] | |
853e439c56d563a22f2a948eb8b7b9eed9488bda
|
b6a84594f8c29d968014faaddd49abeb7537a5fc
|
/python/349.intersection-of-two-arrays.py
|
7a451d2011ff6765e416ab3f944dd25ec1210da3
|
[] |
no_license
|
nickyfoto/lc
|
8a6af3df114e693e265d0ede03f4d4e1283e010e
|
3633b4df3e24968057c7d684689b931c5a8032d3
|
refs/heads/master
| 2020-09-16T19:23:07.765917 | 2020-06-07T17:18:06 | 2020-06-07T17:18:06 | 223,866,098 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,496 |
py
|
#
# @lc app=leetcode id=349 lang=python3
#
# [349] Intersection of Two Arrays
#
# https://leetcode.com/problems/intersection-of-two-arrays/description/
#
# algorithms
# Easy (54.07%)
# Total Accepted: 216.7K
# Total Submissions: 398.2K
# Testcase Example: '[1,2,2,1]\n[2,2]'
#
# Given two arrays, write a function to compute their intersection.
#
# Example 1:
#
#
# Input: nums1 = [1,2,2,1], nums2 = [2,2]
# Output: [2]
#
#
#
# Example 2:
#
#
# Input: nums1 = [4,9,5], nums2 = [9,4,9,8,4]
# Output: [9,4]
#
#
# Note:
#
#
# Each element in the result must be unique.
# The result can be in any order.
#
#
#
#
#
class Solution:
# def intersection(self, nums1: List[int], nums2: List[int]) -> List[int]:
def intersection(self, nums1, nums2):
d1 = {}
d2 = {}
for i in nums1:
if i not in d1:
d1[i] = 0
for i in nums2:
if i not in d2:
d2[i] = 0
l1, l2 = len(d1), len(d2)
if l1 < l2:
for k in d1:
if k in d2:
d1[k] = 1
return [k for (k, v) in d1.items() if d1[k]]
else:
for k in d2:
if k in d1:
d2[k] = 1
return [k for (k, v) in d2.items() if d2[k]]
# s = Solution()
# nums1 = [1,2,2,1]
# nums2 = [2,2]
# print(s.intersection(nums1, nums2))
# nums1 = [4,9,5]
# nums2 = [9,4,9,8,4]
# print(s.intersection(nums1, nums2))
|
[
"[email protected]"
] | |
c1ebd8b8d228c003d0a4407c67ebe600f881f249
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02733/s121858438.py
|
185f83ced0156e8f9333a88aeced5f9f6a733be6
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 231 |
py
|
h,*s=open(0)
h,w,k,*m=map(int,h.split())
b=w
while b:
b-=1;r=t=j=0;d=[0]*h
while w-j:
i=c=0;j+=1
while h-i:
d[c]+=s[i][~j]>'0'
if d[c]>k:d=[0]*h;f=t<j;r-=h*w*~-f-1;t=j;j-=f;break
c+=b>>i&1;i+=1
m+=r+c,
print(min(m))
|
[
"[email protected]"
] | |
bab41f705343005511627b25159e282df8e0bf4c
|
466ac2f617422572ee37a1b0eac9825a141e26fa
|
/thormang3/display_result.py
|
0e9c2b648da72fbd5bbeef54c99eecfdfc421d9a
|
[] |
no_license
|
ahanjaya/Classifying-3D-objects-with-CNN
|
d667bd15dbbdbb5533033d33e93ccc24899788e6
|
03a5edd1ef5aca759e49c49af476470d9e2e37cb
|
refs/heads/master
| 2020-09-28T10:41:24.090638 | 2020-04-17T07:38:19 | 2020-04-17T07:38:19 | 226,761,422 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,884 |
py
|
#!/usr/bin/env python3
import os
import sys
import yaml
import pickle
import itertools
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
class Results:
def __init__(self):
self.n_folder = 9
self.res_folder = "result/{}".format(self.n_folder)
plt.style.use('seaborn-deep')
plt.rcParams.update({'font.size': 22})
def plot_single_graph(self, mode, vcnn1, voxnet, mvcnn):
plt.style.use('seaborn-deep')
plt.rcParams.update({'font.size': 22})
fig, ax = plt.subplots(1, 1, figsize=(12,8))
# mode = 'val_loss' or 'val_accuracy' or 'accuracy' or 'loss'])
if 'loss' in mode:
val_vcnn1 = np.min( np.array(vcnn1.history[mode]) )
val_voxnet = np.min( np.array(voxnet.history[mode]) )
val_mvcnn1 = np.min( np.array(mvcnn.history[mode]) )
else:
val_vcnn1 = np.max( np.array(vcnn1.history[mode]) )
val_voxnet = np.max( np.array(voxnet.history[mode]) )
val_mvcnn1 = np.max( np.array(mvcnn.history[mode]) )
epochs = range(1,self.epochs + 1)
ax.plot(epochs, vcnn1.history [mode], 'r', label='VCNN1 - {0:.2f}' .format(val_vcnn1))
ax.plot(epochs, voxnet.history[mode], 'b', label='VoxNet - {0:.2f}'.format(val_voxnet))
ax.plot(epochs, mvcnn.history[mode], 'g', label='MVCNN - {0:.2f}'.format(val_mvcnn1))
ax.legend()
ax.grid()
ax.set_xlabel('Epochs')
ax.set_ylabel(mode)
return fig
def plot_double_graph(self, mode, vcnn1, voxnet, mvcnn):
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12,8))
# mode = 'val_loss' or 'val_accuracy' or 'accuracy' or 'loss'])
if 'loss' in mode:
train_vcnn1 = np.min( np.array(vcnn1. history['loss']) )
train_voxnet = np.min( np.array(voxnet.history['loss']) )
train_mvcnn1 = np.min( np.array(mvcnn.history['loss']) )
val_vcnn1 = np.min( np.array(vcnn1. history['val_loss']) )
val_voxnet = np.min( np.array(voxnet.history['val_loss']) )
val_mvcnn1 = np.min( np.array(mvcnn.history['val_loss']) )
ax1.set_ylabel('Training Loss')
ax2.set_ylabel('Validation Loss')
else:
train_vcnn1 = np.max( np.array(vcnn1. history['accuracy']) )
train_voxnet = np.max( np.array(voxnet.history['accuracy']) )
train_mvcnn1 = np.max( np.array(mvcnn.history['accuracy']) )
val_vcnn1 = np.max( np.array(vcnn1. history['val_accuracy']) )
val_voxnet = np.max( np.array(voxnet.history['val_accuracy']) )
val_mvcnn1 = np.max( np.array(mvcnn.history['val_accuracy']) )
ax1.set_ylabel('Training Accuracy')
ax2.set_ylabel('Validation Accuracy')
epochs = range(1,self.epochs + 1)
ax1.plot(epochs, vcnn1.history [mode], 'r', label='VCNN1 - {0:.2f}' .format(train_vcnn1))
ax1.plot(epochs, voxnet.history[mode], 'b', label='VoxNet - {0:.2f}'.format(train_voxnet))
ax1.plot(epochs, mvcnn.history[mode], 'g', label='MVCNN - {0:.2f}'.format(train_mvcnn1))
ax2.plot(epochs, vcnn1.history ['val_'+mode], 'r', label='VCNN1 - {0:.2f}' .format(val_vcnn1))
ax2.plot(epochs, voxnet.history['val_'+mode], 'b', label='VoxNet - {0:.2f}'.format(val_voxnet))
ax2.plot(epochs, mvcnn.history['val_'+mode], 'g', label='MVCNN - {0:.2f}'.format(val_mvcnn1))
ax1.legend()
ax2.legend()
ax1.grid()
ax2.grid()
ax1.set_xlabel('Epochs')
ax2.set_xlabel('Epochs')
fig.tight_layout()
def plot_confusion_matrix(self, name, cm, classes, normalize=False, cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
title='Normalized confusion matrix'
else:
title='Confusion matrix'
# plt.figure(self.plt_num, figsize=(7.5, 6))
# plt.figure(plt_num, figsize=(12, 8))
fig, ax = plt.subplots(1, 1, figsize=(12,8))
# plt.imshow(cm, interpolation='nearest', cmap=cmap)
# plt.title(title)
# plt.colorbar()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
fig.colorbar(im, ax=ax)
tick_marks = np.arange(len(classes))
ax.set_xticks(tick_marks)
ax.set_xticklabels(classes)
plt.xticks(rotation=45)
ax.set_yticks(tick_marks)
ax.set_yticklabels(classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
ax.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
ax.set_ylabel('True label')
ax.set_xlabel('Predicted label')
# fig_name = '{}/{}_{}.png'.format(self.res_folder, self.n_folder, name)
# fig.savefig(fig_name, dpi=fig.dpi)
def run(self):
# load pickle
self.pickle_file = "{}/{}_history.p".format(self.res_folder, self.n_folder)
with open(self.pickle_file, 'rb') as filehandle:
data = pickle.load(filehandle)
cm = data['cm']
# classes = [ 'big_suitcase', 'black_chair', 'blue_chair', 'small_suitcase', 'table']
classes = [ 'big_suitcase', 'black_chair', 'blue_chair', 'small_suitcase']
self.plot_confusion_matrix('MVCNN', cm, classes=classes)
plt.show(block=False)
input('Close: ')
plt.close('all')
if __name__ == '__main__':
res = Results()
res.run()
|
[
"[email protected]"
] | |
1b8b05482735ac92647346a3e0e9bdb226651375
|
d7a68c636e6128533b17975655bd6b46ed222916
|
/adapter-transformers-adapters3.1.0/tests_adapters/test_adapter_trainer.py
|
5849bc7bfe0fb582570626343ee39662971a6b11
|
[
"Apache-2.0"
] |
permissive
|
cambridgeltl/autopeft
|
69179f8faf2cc4d2164ff78e544dc3fe2d39c331
|
d8ad6bea93aa413a54d0e09fe25bdd62b46cfcf5
|
refs/heads/main
| 2023-05-23T09:21:59.912941 | 2023-04-25T14:35:31 | 2023-04-25T14:35:31 | 594,316,585 | 26 | 4 |
Apache-2.0
| 2023-04-25T14:35:32 | 2023-01-28T06:39:25 |
Python
|
UTF-8
|
Python
| false | false | 14,876 |
py
|
import os
import unittest
from tempfile import TemporaryDirectory
import torch
from transformers import (
AutoAdapterModel,
AutoModelForSequenceClassification,
AutoTokenizer,
BertConfig,
BertForSequenceClassification,
GlueDataset,
GlueDataTrainingArguments,
Trainer,
TrainingArguments,
)
from transformers.adapters.composition import Fuse, Stack
from transformers.adapters.trainer import AdapterTrainer, logger
from transformers.testing_utils import slow
class TestAdapterTrainer(unittest.TestCase):
def test_resume_training(self):
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir="./tests/fixtures/tests_samples/MRPC", overwrite_cache=True
)
train_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="train")
model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model.add_adapter("adapter")
model.add_adapter("additional_adapter")
model.set_active_adapters("adapter")
model.train_adapter("adapter")
training_args = TrainingArguments(
output_dir="./output",
do_train=True,
learning_rate=0.1,
logging_steps=1,
max_steps=1,
save_steps=1,
remove_unused_columns=False,
)
trainer = AdapterTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
)
trainer.train()
# create second model that should resume the training of the first
model_resume = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model_resume.add_adapter("adapter")
model_resume.add_adapter("additional_adapter")
model_resume.set_active_adapters("adapter")
model_resume.train_adapter("adapter")
trainer_resume = AdapterTrainer(
model=model_resume,
args=TrainingArguments(do_train=True, max_steps=1, output_dir="./output"),
train_dataset=train_dataset,
)
trainer_resume.train(resume_from_checkpoint=True)
self.assertEqual(model.config.adapters.adapters, model_resume.config.adapters.adapters)
for ((k1, v1), (k2, v2)) in zip(trainer.model.state_dict().items(), trainer_resume.model.state_dict().items()):
self.assertEqual(k1, k2)
if "adapter" in k1:
self.assertTrue(torch.equal(v1, v2), k1)
def test_resume_training_with_fusion(self):
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir="./tests/fixtures/tests_samples/MRPC", overwrite_cache=True
)
train_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="train")
model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model.add_adapter("adapter")
model.add_adapter("additional_adapter")
model.add_adapter_fusion(Fuse("adapter", "additional_adapter"))
model.set_active_adapters(Fuse("adapter", "additional_adapter"))
model.train_fusion(Fuse("adapter", "additional_adapter"))
training_args = TrainingArguments(
output_dir="./output",
do_train=True,
learning_rate=0.1,
logging_steps=1,
max_steps=1,
save_steps=1,
remove_unused_columns=False,
)
trainer = AdapterTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
)
trainer.train()
model_resume = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model_resume.add_adapter("adapter")
model_resume.add_adapter("additional_adapter")
model_resume.add_adapter_fusion(Fuse("adapter", "additional_adapter"))
model_resume.set_active_adapters(Fuse("adapter", "additional_adapter"))
model_resume.train_fusion(Fuse("adapter", "additional_adapter"))
trainer_resume = AdapterTrainer(
model=model_resume,
args=TrainingArguments(do_train=True, max_steps=1, output_dir="./output"),
train_dataset=train_dataset,
)
trainer_resume.train(resume_from_checkpoint=True)
self.assertEqual(model.config.adapters.adapters, model_resume.config.adapters.adapters)
for ((k1, v1), (k2, v2)) in zip(
trainer.model.to("cpu").state_dict().items(), trainer_resume.model.to("cpu").state_dict().items()
):
self.assertEqual(k1, k2)
if "adapter" in k1:
self.assertTrue(torch.equal(v1, v2), k1)
def test_auto_set_save_adapters(self):
model = BertForSequenceClassification(
BertConfig(
hidden_size=32,
num_hidden_layers=4,
num_attention_heads=4,
intermediate_size=37,
)
)
model.add_adapter("adapter1")
model.add_adapter("adapter2")
model.add_adapter_fusion(Fuse("adapter1", "adapter2"))
model.train_adapter_fusion(Fuse("adapter1", "adapter2"))
training_args = TrainingArguments(
output_dir="./output",
)
trainer = AdapterTrainer(
model=model,
args=training_args,
)
self.assertTrue(trainer.train_adapter_fusion)
@slow
def test_training_load_best_model_at_end_full_model(self):
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir="./tests/fixtures/tests_samples/MRPC", overwrite_cache=True
)
train_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="train")
eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev")
model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model.add_adapter("adapter")
model.train_adapter("adapter")
training_args = TrainingArguments(
output_dir="./output",
do_train=True,
learning_rate=0.001,
max_steps=1,
save_steps=1,
remove_unused_columns=False,
load_best_model_at_end=True,
evaluation_strategy="epoch",
save_strategy="epoch",
num_train_epochs=2,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
)
trainer.train()
self.assertIsNotNone(trainer.model.active_adapters)
def test_training_load_best_model_at_end_adapter(self):
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir="./tests/fixtures/tests_samples/MRPC", overwrite_cache=True
)
train_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="train")
eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev")
model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model.add_adapter("adapter")
model.train_adapter("adapter")
training_args = TrainingArguments(
output_dir="./output",
do_train=True,
learning_rate=0.001,
max_steps=1,
save_steps=1,
remove_unused_columns=False,
load_best_model_at_end=True,
evaluation_strategy="epoch",
save_strategy="epoch",
num_train_epochs=2,
)
trainer = AdapterTrainer(
model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset
)
with self.assertLogs(logger) as cm:
trainer.train()
self.assertTrue(any("Loading best adapter(s) from" in line for line in cm.output))
self.assertEqual(Stack("adapter"), trainer.model.active_adapters)
def test_training_load_best_model_at_end_fusion(self):
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir="./tests/fixtures/tests_samples/MRPC", overwrite_cache=True
)
train_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="train")
eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev")
model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model.add_adapter("fuse_adapter_1")
model.add_adapter("fuse_adapter_2")
model.add_adapter_fusion(Fuse("fuse_adapter_1", "fuse_adapter_2"))
model.train_adapter_fusion(Fuse("fuse_adapter_1", "fuse_adapter_2"))
training_args = TrainingArguments(
output_dir="./output",
do_train=True,
learning_rate=0.001,
max_steps=1,
save_steps=1,
remove_unused_columns=False,
load_best_model_at_end=True,
evaluation_strategy="epoch",
save_strategy="epoch",
num_train_epochs=2,
)
trainer = AdapterTrainer(
model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset
)
with self.assertLogs(logger) as cm:
trainer.train()
self.assertTrue(any("Loading best adapter fusion(s) from" in line for line in cm.output))
self.assertEqual(Fuse("fuse_adapter_1", "fuse_adapter_2"), trainer.model.active_adapters)
def test_reloading_prediction_head(self):
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir="./tests/fixtures/tests_samples/MRPC", overwrite_cache=True
)
train_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="train")
model = AutoAdapterModel.from_pretrained("bert-base-uncased")
model.add_classification_head("adapter", num_labels=3)
model.add_classification_head("dummy", num_labels=2)
# add the adapters to be fused
model.add_adapter("adapter")
model.add_adapter("additional_adapter")
# setup fusion
adapter_setup = Fuse("adapter", "additional_adapter")
model.add_adapter_fusion(adapter_setup)
model.train_adapter_fusion(adapter_setup)
model.set_active_adapters(adapter_setup)
self.assertEqual(adapter_setup, model.active_adapters)
self.assertEqual("dummy", model.active_head)
with TemporaryDirectory() as tempdir:
training_args = TrainingArguments(
output_dir=tempdir,
do_train=True,
learning_rate=0.1,
logging_steps=1,
max_steps=1,
save_steps=1,
remove_unused_columns=False,
)
trainer = AdapterTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
)
trainer.train()
# create second model that should resume the training of the first
model_resume = AutoAdapterModel.from_pretrained("bert-base-uncased")
model_resume.add_classification_head("adapter", num_labels=3)
model_resume.add_classification_head("dummy", num_labels=2)
model_resume.add_adapter("adapter")
model_resume.add_adapter("additional_adapter")
# setup fusion
adapter_setup = Fuse("adapter", "additional_adapter")
model_resume.add_adapter_fusion(adapter_setup)
model_resume.train_adapter_fusion(adapter_setup)
model_resume.set_active_adapters(adapter_setup)
trainer_resume = AdapterTrainer(
model=model_resume,
args=TrainingArguments(do_train=True, max_steps=1, output_dir=tempdir),
train_dataset=train_dataset,
)
trainer_resume.train(resume_from_checkpoint=True)
self.assertEqual("dummy", model.active_head)
self.assertEqual(model.config.adapters.adapters, model_resume.config.adapters.adapters)
for ((k1, v1), (k2, v2)) in zip(
trainer.model.to("cpu").state_dict().items(), trainer_resume.model.to("cpu").state_dict().items()
):
self.assertEqual(k1, k2)
if "adapter" in k1 or "dummy" in k1:
self.assertTrue(torch.equal(v1, v2), k1)
def test_general(self):
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir="./tests/fixtures/tests_samples/MRPC", overwrite_cache=True
)
train_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="train")
model = AutoAdapterModel.from_pretrained("bert-base-uncased")
model.add_classification_head("task", num_labels=3)
# add the adapters to be fused
model.add_adapter("task")
model.add_adapter("additional_adapter")
model.train_adapter("task")
self.assertEqual("task", model.active_head)
self.assertEqual(Stack("task"), model.active_adapters)
with TemporaryDirectory() as tempdir:
training_args = TrainingArguments(
output_dir=tempdir,
do_train=True,
learning_rate=0.1,
logging_steps=1,
max_steps=1,
save_steps=1,
remove_unused_columns=False,
)
trainer = AdapterTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
)
trainer.train()
# Check that adapters are actually saved but the full model is not
files_dir_checkpoint = [file_or_dir for file_or_dir in os.listdir(os.path.join(tempdir, "checkpoint-1"))]
self.assertTrue("task" in files_dir_checkpoint)
self.assertTrue("additional_adapter" in files_dir_checkpoint)
# Check that full model weights are not stored
self.assertFalse("pytorch_model.bin" in files_dir_checkpoint)
# this should always be false in the adapter trainer
self.assertFalse(trainer.args.remove_unused_columns)
self.assertEqual("task", model.active_head)
self.assertEqual(Stack("task"), model.active_adapters)
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
bd4492e32b8c961efd74ceda5ee7197a84dbcbcc
|
2ae24d0c6d91df960a2ca68a0b7a754a69d4fe18
|
/web/exmr/apps/merchant_tools/migrations/0011_multipayment.py
|
d15d79ad3a8e90ae65584c71692fb62d2c618350
|
[] |
no_license
|
exmrcoin/project-gcps.io
|
2fc2a0a207ce1282d616a8a680aef938fbcf5352
|
c0071e63406845a5f3dbbe33ae65673cacc271f8
|
refs/heads/master
| 2023-01-04T02:08:47.893847 | 2020-10-29T17:03:34 | 2020-10-29T17:03:34 | 121,253,614 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 962 |
py
|
# Generated by Django 2.0.2 on 2018-07-19 09:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('coins', '0007_auto_20180718_1131'),
('merchant_tools', '0010_posqrmaker'),
]
operations = [
migrations.CreateModel(
name='MultiPayment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('paid_amount', models.CharField(max_length=512)),
('eq_usd', models.CharField(blank=True, max_length=512, null=True)),
('paid_unique_id', models.CharField(blank=True, max_length=512, null=True)),
('transaction_id', models.CharField(max_length=64)),
('paid_in', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='coins.Coin')),
],
),
]
|
[
"[email protected]"
] | |
64bc6044c4ab411bede2d052e9166ee92b5e82f2
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02633/s580079860.py
|
d8cc6fb47910c5df1bc706994a741571fb146b87
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 85 |
py
|
from math import gcd
x = int(input())
lcm = (x * 360)/gcd(x, 360)
print(int(lcm/x))
|
[
"[email protected]"
] | |
859b839c18cb19d795cf2a8c7cc2205a86b061a6
|
e22e03d9761f5c6d581b5af2e77343e8ee4b201d
|
/edk2/BaseTools/Scripts/PackageDocumentTools/packagedocapp.pyw
|
df3b46c49fd23477f3507593a06a26bafcbc9d5a
|
[
"OpenSSL",
"BSD-2-Clause"
] |
permissive
|
SamuelTulach/SecureFakePkg
|
759975fcc84d62f05ac577da48353752e5334878
|
f34080a6c0efb6ca3dd755365778d0bcdca6b991
|
refs/heads/main
| 2023-08-17T07:51:22.175924 | 2021-10-01T10:46:14 | 2021-10-01T10:46:14 | 410,938,306 | 94 | 14 | null | null | null | null |
UTF-8
|
Python
| false | false | 46,029 |
pyw
|
## @file
# This file is used to define common string related functions used in parsing
# process
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
import os, sys, wx, logging
import wx.stc
import wx.lib.newevent
import wx.lib.agw.genericmessagedialog as GMD
from plugins.EdkPlugins.edk2.model import baseobject
from plugins.EdkPlugins.edk2.model import doxygengen
if hasattr(sys, "frozen"):
appPath = os.path.abspath(os.path.dirname(sys.executable))
else:
appPath = os.path.abspath(os.path.dirname(__file__))
AppCallBackEvent, EVT_APP_CALLBACK = wx.lib.newevent.NewEvent()
LogEvent, EVT_LOG = wx.lib.newevent.NewEvent()
class PackageDocApp(wx.App):
def OnInit(self):
logfile = os.path.join(appPath, 'log.txt')
logging.basicConfig(format='%(name)-8s %(levelname)-8s %(message)s',
filename=logfile, level=logging.ERROR)
self.SetAppName('Package Doxygen Generate Application')
frame = PackageDocMainFrame(None, "Package Document Generation Application!")
self.SetTopWindow(frame)
frame.Show(True)
EVT_APP_CALLBACK( self, self.OnAppCallBack)
return True
def GetLogger(self):
return logging.getLogger('')
def ForegroundProcess(self, function, args):
wx.PostEvent(self, AppCallBackEvent(callback=function, args=args))
def OnAppCallBack(self, event):
try:
event.callback(*event.args)
except:
self._logger.exception( 'OnAppCallBack<%s.%s>\n' %
(event.callback.__module__, event.callback.__name__ ))
class PackageDocMainFrame(wx.Frame):
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, -1, title, size=(550, 290), style=wx.MINIMIZE_BOX|wx.SYSTEM_MENU|wx.CAPTION|wx.CLOSE_BOX )
panel = wx.Panel(self)
sizer = wx.BoxSizer(wx.VERTICAL)
subsizer = wx.GridBagSizer(5, 10)
subsizer.AddGrowableCol(1)
subsizer.Add(wx.StaticText(panel, -1, "Workspace Location : "), (0, 0), flag=wx.ALIGN_CENTER_VERTICAL)
self._workspacePathCtrl = wx.ComboBox(panel, -1)
list = self.GetConfigure("WorkspacePath")
if len(list) != 0:
for item in list:
self._workspacePathCtrl.Append(item)
self._workspacePathCtrl.SetValue(list[len(list) - 1])
subsizer.Add(self._workspacePathCtrl, (0, 1), flag=wx.ALIGN_CENTER_VERTICAL|wx.EXPAND)
self._workspacePathBt = wx.BitmapButton(panel, -1, bitmap=wx.ArtProvider_GetBitmap(wx.ART_FILE_OPEN))
subsizer.Add(self._workspacePathBt, (0, 2), flag=wx.ALIGN_CENTER_VERTICAL)
wx.EVT_BUTTON(self._workspacePathBt, self._workspacePathBt.GetId(), self.OnBrowsePath)
subsizer.Add(wx.StaticText(panel, -1, "Package DEC Location : "), (1, 0), flag=wx.ALIGN_CENTER_VERTICAL|wx.EXPAND)
self._packagePathCtrl = wx.ComboBox(panel, -1)
list = self.GetConfigure("PackagePath")
if len(list) != 0:
for item in list:
self._packagePathCtrl.Append(item)
self._packagePathCtrl.SetValue(list[len(list) - 1])
subsizer.Add(self._packagePathCtrl, (1, 1), flag=wx.ALIGN_CENTER_VERTICAL|wx.EXPAND)
self._packagePathBt = wx.BitmapButton(panel, -1, bitmap=wx.ArtProvider_GetBitmap(wx.ART_FILE_OPEN))
subsizer.Add(self._packagePathBt, (1, 2), flag=wx.ALIGN_CENTER_VERTICAL)
wx.EVT_BUTTON(self._packagePathBt, self._packagePathBt.GetId(), self.OnBrowsePath)
subsizer.Add(wx.StaticText(panel, -1, "Doxygen Tool Location : "), (2, 0), flag=wx.ALIGN_CENTER_VERTICAL)
self._doxygenPathCtrl = wx.TextCtrl(panel, -1)
list = self.GetConfigure('DoxygenPath')
if len(list) != 0:
self._doxygenPathCtrl.SetValue(list[0])
else:
if wx.Platform == '__WXMSW__':
self._doxygenPathCtrl.SetValue('C:\\Program Files\\Doxygen\\bin\\doxygen.exe')
else:
self._doxygenPathCtrl.SetValue('/usr/bin/doxygen')
self._doxygenPathBt = wx.BitmapButton(panel, -1, bitmap=wx.ArtProvider_GetBitmap(wx.ART_FILE_OPEN))
subsizer.Add(self._doxygenPathCtrl, (2, 1), flag=wx.ALIGN_CENTER_VERTICAL|wx.EXPAND)
subsizer.Add(self._doxygenPathBt, (2, 2), flag=wx.ALIGN_CENTER_VERTICAL)
wx.EVT_BUTTON(self._doxygenPathBt, self._doxygenPathBt.GetId(), self.OnBrowsePath)
subsizer.Add(wx.StaticText(panel, -1, "CHM Tool Location : "), (3, 0), flag=wx.ALIGN_CENTER_VERTICAL)
self._chmPathCtrl = wx.TextCtrl(panel, -1)
list = self.GetConfigure('CHMPath')
if len(list) != 0:
self._chmPathCtrl.SetValue(list[0])
else:
self._chmPathCtrl.SetValue('C:\\Program Files\\HTML Help Workshop\\hhc.exe')
self._chmPathBt = wx.BitmapButton(panel, -1, bitmap=wx.ArtProvider_GetBitmap(wx.ART_FILE_OPEN))
subsizer.Add(self._chmPathCtrl, (3, 1), flag=wx.ALIGN_CENTER_VERTICAL|wx.EXPAND)
subsizer.Add(self._chmPathBt, (3, 2), flag=wx.ALIGN_CENTER_VERTICAL)
wx.EVT_BUTTON(self._chmPathBt, self._chmPathBt.GetId(), self.OnBrowsePath)
subsizer.Add(wx.StaticText(panel, -1, "Output Location : "), (4, 0), flag=wx.ALIGN_CENTER_VERTICAL)
self._outputPathCtrl = wx.ComboBox(panel, -1)
list = self.GetConfigure("OutputPath")
if len(list) != 0:
for item in list:
self._outputPathCtrl.Append(item)
self._outputPathCtrl.SetValue(list[len(list) - 1])
subsizer.Add(self._outputPathCtrl, (4, 1), flag=wx.ALIGN_CENTER_VERTICAL|wx.EXPAND)
self._outputPathBt = wx.BitmapButton(panel, -1, bitmap=wx.ArtProvider_GetBitmap(wx.ART_FILE_OPEN))
subsizer.Add(self._outputPathBt, (4, 2), flag=wx.ALIGN_CENTER_VERTICAL|wx.EXPAND)
wx.EVT_BUTTON(self._outputPathBt, self._outputPathBt.GetId(), self.OnBrowsePath)
subsizer.Add(wx.StaticText(panel, -1, "Architecture Specified : "), (5, 0), flag=wx.ALIGN_CENTER_VERTICAL)
self._archCtrl = wx.ComboBox(panel, -1, value='ALL', choices=['ALL', 'IA32/MSFT', 'IA32/GNU', 'X64/INTEL', 'X64/GNU', 'IPF/MSFT', 'IPF/GNU', 'EBC/INTEL'],
style=wx.CB_READONLY)
self._archCtrl.Bind(wx.EVT_COMBOBOX, self.OnArchtectureSelectChanged)
subsizer.Add(self._archCtrl, (5, 1), (1, 2), flag=wx.ALIGN_CENTER_VERTICAL|wx.EXPAND)
sizer.Add(subsizer, 0, wx.EXPAND|wx.TOP|wx.BOTTOM|wx.LEFT|wx.RIGHT, 5)
sizer6 = wx.BoxSizer(wx.HORIZONTAL)
self._modesel = wx.RadioBox(panel, -1, 'Generated Document Mode', majorDimension=2, choices=['CHM', 'HTML'], style=wx.RA_SPECIFY_COLS)
self._modesel.SetStringSelection('HTML')
self._includeonlysel = wx.CheckBox(panel, -1, 'Only document public include')
sizer6.Add(self._modesel, 0 , wx.EXPAND)
sizer6.Add(self._includeonlysel, 0, wx.EXPAND|wx.LEFT, 5)
sizer.Add(sizer6, 0, wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT, 5)
self._generateBt = wx.Button(panel, -1, "Generate Package Document!")
self._generateBt.Bind(wx.EVT_BUTTON, self.OnGenerate)
sizer.Add(self._generateBt, 0, wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT, 5)
panel.SetSizer(sizer)
panel.Layout()
panel.SetAutoLayout(True)
self.CenterOnScreen()
def SaveConfigure(self, name, value):
if value ==None or len(value) == 0:
return
config = wx.ConfigBase_Get()
oldvalues = config.Read(name, '').split(';')
if len(oldvalues) >= 10:
oldvalues.remove(oldvalues[0])
if value not in oldvalues:
oldvalues.append(value)
else:
oldvalues.remove(value)
oldvalues.append(value)
config.Write(name, ';'.join(oldvalues))
def GetConfigure(self, name):
config = wx.ConfigBase_Get()
values = config.Read(name, '').split(';')
list = []
for item in values:
if len(item) != 0:
list.append(item)
return list
def OnBrowsePath(self, event):
id = event.GetId()
editctrl = None
startdir = ''
isFile = False
if id == self._packagePathBt.GetId():
dlgTitle = "Choose package path:"
editctrl = self._packagePathCtrl
isFile = True
if os.path.exists(self.GetWorkspace()):
startdir = self.GetWorkspace()
elif id == self._workspacePathBt.GetId():
dlgTitle = "Choose workspace path:"
editctrl = self._workspacePathCtrl
startdir = editctrl.GetValue()
elif id == self._doxygenPathBt.GetId():
isFile = True
dlgTitle = "Choose doxygen installation path:"
editctrl = self._doxygenPathCtrl
startdir = editctrl.GetValue()
elif id == self._outputPathBt.GetId():
dlgTitle = "Choose document output path:"
editctrl = self._outputPathCtrl
if os.path.exists(self.GetWorkspace()):
startdir = self.GetWorkspace()
startdir = editctrl.GetValue()
elif id == self._chmPathBt.GetId():
isFile = True
dlgTitle = "Choose installation path for Microsoft HTML workshop software"
editctrl = self._chmPathCtrl
startdir = editctrl.GetValue()
else:
return
if not isFile:
dlg = wx.DirDialog(self, dlgTitle, defaultPath=startdir)
else:
dlg = wx.FileDialog(self, dlgTitle, defaultDir=startdir)
if dlg.ShowModal() == wx.ID_OK:
editctrl.SetValue(dlg.GetPath())
dlg.Destroy()
def OnArchtectureSelectChanged(self, event):
str = ''
selarch = self._archCtrl.GetValue()
if selarch == 'ALL':
str += 'MDE_CPU_IA32 MDE_CPU_X64 MDE_CPU_EBC MDE_CPU_IPF _MSC_EXTENSIONS __GNUC__ __INTEL_COMPILER'
elif selarch == 'IA32/MSFT':
str += 'MDE_CPU_IA32 _MSC_EXTENSIONS'
elif selarch == 'IA32/GNU':
str += 'MDE_CPU_IA32 __GNUC__'
elif selarch == 'X64/MSFT':
str += 'MDE_CPU_X64 _MSC_EXTENSIONS'
elif selarch == 'X64/GNU':
str += 'MDE_CPU_X64 __GNUC__'
elif selarch == 'IPF/MSFT':
str += 'MDE_CPU_IPF _MSC_EXTENSIONS'
elif selarch == 'IPF/GNU':
str += 'MDE_CPU_IPF __GNUC__'
elif selarch == 'EBC/INTEL':
str += 'MDE_CPU_EBC __INTEL_COMPILER'
str += ' ASM_PFX= OPTIONAL= '
def OnMacroText(self, event):
str = ''
selarch = self._archCtrl.GetValue()
if selarch == 'ALL':
str += 'MDE_CPU_IA32 MDE_CPU_X64 MDE_CPU_EBC MDE_CPU_IPF _MSC_EXTENSIONS __GNUC__ __INTEL_COMPILER'
elif selarch == 'IA32/MSFT':
str += 'MDE_CPU_IA32 _MSC_EXTENSIONS'
elif selarch == 'IA32/GNU':
str += 'MDE_CPU_IA32 __GNUC__'
elif selarch == 'X64/MSFT':
str += 'MDE_CPU_X64 _MSC_EXTENSIONS'
elif selarch == 'X64/GNU':
str += 'MDE_CPU_X64 __GNUC__'
elif selarch == 'IPF/MSFT':
str += 'MDE_CPU_IPF _MSC_EXTENSIONS'
elif selarch == 'IPF/GNU':
str += 'MDE_CPU_IPF __GNUC__'
elif selarch == 'EBC/INTEL':
str += 'MDE_CPU_EBC __INTEL_COMPILER'
str += ' ASM_PFX= OPTIONAL= '
def OnGenerate(self, event):
if not self.CheckInput(): return
dlg = ProgressDialog(self)
dlg.ShowModal()
dlg.Destroy()
def CheckInput(self):
pPath = self.GetPackagePath()
wPath = self.GetWorkspace()
dPath = self.GetDoxygenToolPath()
cPath = self.GetChmToolPath()
oPath = self.GetOutputPath()
if len(wPath) == 0 or not os.path.exists(wPath):
self._Error("Please input existing workspace path!")
return False
else:
self.SaveConfigure('WorkspacePath', wPath)
if len(pPath) == 0 or not os.path.exists(pPath) or not pPath.lower().endswith('.dec'):
self._Error("Please input existing package file location!")
return False
elif pPath.lower().find(wPath.lower()) == -1:
self._Error("Package patch should starts with workspace path, such as if workspace path is c:\\edk2, package patch could be c:\\edk2\MdePkg")
return False
else:
self.SaveConfigure('PackagePath', pPath)
if len(dPath) == 0 or not os.path.exists(dPath):
self._Error("Can not find doxygen tool from path %s! Please download it from www.stack.nl/~dimitri/doxygen/download.html" % dPath)
return False
else:
self.SaveConfigure('DoxygenPath', dPath)
if self._modesel.GetStringSelection() == 'CHM':
if (len(cPath) == 0 or not os.path.exists(cPath)):
self._Error("You select CHM mode to generate document, but can not find software of Microsoft HTML Help Workshop.\nPlease\
download it from http://www.microsoft.com/downloads/details.aspx?FamilyID=00535334-c8a6-452f-9aa0-d597d16580cc&displaylang=en\n\
and install!")
return False
else:
self.SaveConfigure('CHMPath', cPath)
if len(oPath) == 0:
self._Error("You must specific document output path")
return False
else:
self.SaveConfigure('OutputPath', oPath)
if os.path.exists(oPath):
# add checking whether there is old doxygen config file here
files = os.listdir(oPath)
for file in files:
if os.path.isfile(os.path.join(oPath,file)):
basename, ext = os.path.splitext(file)
if ext.lower() == '.doxygen_config':
dlg = GMD.GenericMessageDialog(self, "Existing doxygen document in output directory will be overwritten\n, Are you sure?",
"Info", wx.ICON_WARNING|wx.YES_NO)
if dlg.ShowModal() == wx.ID_YES:
break
else:
return False
else:
try:
os.makedirs(oPath)
except:
self._Error("Fail to create output directory, please select another output directory!")
return False
return True
def _Error(self, message):
dlg = GMD.GenericMessageDialog(self, message,
"Error", wx.ICON_ERROR|wx.OK)
dlg.ShowModal()
dlg.Destroy()
def GetWorkspace(self):
return os.path.normpath(self._workspacePathCtrl.GetValue())
def GetPackagePath(self):
return os.path.normpath(self._packagePathCtrl.GetValue())
def GetOutputPath(self):
return os.path.normpath(self._outputPathCtrl.GetValue())
def GetDoxygenToolPath(self):
return os.path.normpath(self._doxygenPathCtrl.GetValue())
def GetChmToolPath(self):
return os.path.normpath(self._chmPathCtrl.GetValue())
def GetDocumentMode(self):
return self._modesel.GetStringSelection()
def GetArchitecture(self):
value = self._archCtrl.GetValue()
return value.split('/')[0]
def GetToolTag(self):
value = self._archCtrl.GetValue()
if value == 'ALL':
return 'ALL'
return value.split('/')[1]
def GetIsOnlyDocumentInclude(self):
return self._includeonlysel.IsChecked()
class ProgressDialog(wx.Dialog):
def __init__(self, parent, id=wx.ID_ANY):
title = "Generate Document for " + parent.GetPackagePath()
wx.Dialog.__init__(self, parent, id, title=title, style=wx.CAPTION, size=(600, 300))
self.Freeze()
sizer = wx.BoxSizer(wx.VERTICAL)
self._textCtrl = wx.StaticText(self, -1, "Start launching!")
self._gaugeCtrl = wx.Gauge(self, -1, 100, size=(-1, 10))
self._resultCtrl = wx.stc.StyledTextCtrl(self, -1)
self._closeBt = wx.Button(self, -1, "Close")
self._gotoOuputBt = wx.Button(self, -1, "Goto Output")
# clear all margin
self._resultCtrl.SetMarginWidth(0, 0)
self._resultCtrl.SetMarginWidth(1, 0)
self._resultCtrl.SetMarginWidth(2, 0)
sizer.Add(self._textCtrl, 0, wx.EXPAND|wx.LEFT|wx.TOP|wx.RIGHT, 5)
sizer.Add(self._gaugeCtrl, 0, wx.EXPAND|wx.LEFT|wx.TOP|wx.RIGHT, 5)
sizer.Add(self._resultCtrl, 1, wx.EXPAND|wx.LEFT|wx.TOP|wx.RIGHT, 5)
btsizer = wx.BoxSizer(wx.HORIZONTAL)
btsizer.Add(self._gotoOuputBt, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.LEFT|wx.TOP|wx.LEFT|wx.BOTTOM, 5)
btsizer.Add(self._closeBt, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.LEFT|wx.TOP|wx.LEFT|wx.BOTTOM, 5)
sizer.Add(btsizer, 0, wx.ALIGN_CENTER_HORIZONTAL)
self.SetSizer(sizer)
self.CenterOnScreen()
self.Thaw()
self._logger = logging.getLogger('')
self._loghandle = ResultHandler(self)
logging.getLogger('edk').addHandler(self._loghandle)
logging.getLogger('').addHandler(self._loghandle)
logging.getLogger('app').addHandler(self._loghandle)
wx.EVT_BUTTON(self._closeBt, self._closeBt.GetId(), self.OnButtonClose)
wx.EVT_UPDATE_UI(self, self._closeBt.GetId(), self.OnUpdateCloseButton)
wx.EVT_BUTTON(self._gotoOuputBt, self._gotoOuputBt.GetId(), self.OnGotoOutput)
EVT_LOG(self, self.OnPostLog)
self._process = None
self._pid = None
self._input = None
self._output = None
self._error = None
self._inputThread = None
self._errorThread = None
self._isBusy = True
self._pObj = None
wx.CallAfter(self.GenerateAction)
def OnUpdateCloseButton(self, event):
self._closeBt.Enable(not self._isBusy)
return True
def OnButtonClose(self, event):
if self._isBusy:
self._InfoDialog("Please don't close in progressing...")
return
if self._process != None:
self._process.CloseOutput()
if self._inputThread:
self._inputThread.Terminate()
if self._errorThread:
self._errorThread.Terminate()
if self._pid != None:
wx.Process.Kill(self._pid, wx.SIGKILL, wx.KILL_CHILDREN)
logging.getLogger('edk').removeHandler(self._loghandle)
logging.getLogger('').removeHandler(self._loghandle)
logging.getLogger('app').removeHandler(self._loghandle)
if self._pObj != None:
self._pObj.Destroy()
self.EndModal(0)
def OnGotoOutput(self, event):
output = self.GetParent().GetOutputPath()
if os.path.exists(output):
if wx.Platform == '__WXMSW__':
os.startfile(self.GetParent().GetOutputPath())
else:
import webbrowser
webbrowser.open(self.GetParent().GetOutputPath())
else:
self._ErrorDialog("Output directory does not exist!")
def _ErrorDialog(self, message):
dlg = GMD.GenericMessageDialog(self, message,
"Error", wx.ICON_ERROR|wx.OK)
dlg.ShowModal()
dlg.Destroy()
def _InfoDialog(self, message):
dlg = GMD.GenericMessageDialog(self, message,
"Info", wx.ICON_INFORMATION|wx.OK)
dlg.ShowModal()
dlg.Destroy()
def _LogStep(self, index, message):
stepstr = "Step %d: %s" % (index, message)
self._textCtrl.SetLabel(stepstr)
self.LogMessage(os.linesep + stepstr + os.linesep)
self._gaugeCtrl.SetValue(index * 100 / 6 )
def OnPostLog(self, event):
self.LogMessage(event.message)
def GenerateAction(self):
self._LogStep(1, "Create Package Object Model")
wsPath = self.GetParent().GetWorkspace()
pkPath = self.GetParent().GetPackagePath()[len(wsPath) + 1:]
try:
pObj = baseobject.Package(None, self.GetParent().GetWorkspace())
pObj.Load(pkPath)
except:
self._ErrorDialog("Fail to create package object model! Please check log.txt under this application folder!")
self._isBusy = False
return
self._pObj = pObj
self.LogMessage(str(pObj.GetPcds()))
self._LogStep(2, "Preprocess and Generate Doxygen Config File")
try:
action = doxygengen.PackageDocumentAction(self.GetParent().GetDoxygenToolPath(),
self.GetParent().GetChmToolPath(),
self.GetParent().GetOutputPath(),
pObj,
self.GetParent().GetDocumentMode(),
self.LogMessage,
self.GetParent().GetArchitecture(),
self.GetParent().GetToolTag(),
self.GetParent().GetIsOnlyDocumentInclude(),
True)
except:
self._ErrorDialog("Fail to preprocess! Please check log.txt under this application folder!")
self._isBusy = False
return
action.RegisterCallbackDoxygenProcess(self.CreateDoxygeProcess)
try:
if not action.Generate():
self._isBusy = False
self.LogMessage("Fail to generate package document! Please check log.txt under this application folder!", 'error')
except:
import traceback
message = traceback.format_exception(*sys.exc_info())
logging.getLogger('').error(''.join(message))
self._isBusy = False
self._ErrorDialog("Fail to generate package document! Please check log.txt under this application folder!")
def LogMessage(self, message, level='info'):
self._resultCtrl.DocumentEnd()
self._resultCtrl.SetReadOnly(False)
self._resultCtrl.AppendText(message)
self._resultCtrl.Home()
self._resultCtrl.Home()
self._resultCtrl.SetReadOnly(True)
if level == 'error':
wx.GetApp().GetLogger().error(message)
def CreateDoxygeProcess(self, doxPath, configFile):
self._LogStep(3, "Launch Doxygen Tool and Generate Package Document")
cmd = '"%s" %s' % (doxPath, configFile)
try:
self._process = DoxygenProcess()
self._process.SetParent(self)
self._process.Redirect()
self._pid = wx.Execute(cmd, wx.EXEC_ASYNC, self._process)
self._input = self._process.GetInputStream()
self._output = self._process.GetOutputStream()
self._error = self._process.GetErrorStream()
except:
self._ErrorDialog('Fail to launch doxygen cmd %s! Please check log.txt under this application folder!' % cmd)
self._isBusy = False
return False
self._inputThread = MonitorThread(self._input, self.LogMessage)
self._errorThread = MonitorThread(self._error, self.LogMessage)
self._inputThread.start()
self._errorThread.start()
return True
def OnTerminateDoxygenProcess(self):
if self._inputThread:
self._inputThread.Terminate()
self._inputThread = None
if self._errorThread:
self._errorThread.Terminate()
self._errorThread = None
if self._error:
while self._error.CanRead():
text = self._error.read()
self.LogMessage(text)
if self._input:
while self._input.CanRead():
text = self._input.read()
self.LogMessage(text)
self._process.Detach()
self._process.CloseOutput()
self._process = None
self._pid = None
self.DocumentFixup()
if self.GetParent().GetDocumentMode().lower() == 'chm':
hhcfile = os.path.join(self.GetParent().GetOutputPath(), 'html', 'index.hhc')
hhpfile = os.path.join(self.GetParent().GetOutputPath(), 'html', 'index.hhp')
self.FixDecDoxygenFileLink(hhcfile, None)
if not self.CreateCHMProcess(self.GetParent().GetChmToolPath(), hhpfile):
self._ErrorDialog("Fail to Create %s process for %s" % (self.GetParent().GetChmToolPath(), hhpfile))
self._isBusy = False
else:
self._LogStep(6, "Finished Document Generation!")
self._isBusy = False
indexpath = os.path.realpath(os.path.join(self.GetParent().GetOutputPath(), 'html', 'index.html'))
if wx.Platform == '__WXMSW__':
os.startfile(indexpath)
else:
import webbrowser
webbrowser.open(indexpath)
self._InfoDialog('Success create HTML doxgen document %s' % indexpath)
def CreateCHMProcess(self, chmPath, hhpfile):
self.LogMessage(" >>>>>> Start Microsoft HTML workshop process...Zzz...\n")
cmd = '"%s" %s' % (chmPath, hhpfile)
try:
self._process = CHMProcess()
self._process.SetParent(self)
self._process.Redirect()
self._pid = wx.Execute(cmd, wx.EXEC_ASYNC, self._process)
self._input = self._process.GetInputStream()
self._output = self._process.GetOutputStream()
self._error = self._process.GetErrorStream()
except:
self.LogMessage('\nFail to launch hhp cmd %s!\n' % cmd)
self._isBusy = False
return False
self._inputThread = MonitorThread(self._input, self.LogMessage)
self._errorThread = MonitorThread(self._error, self.LogMessage)
self._inputThread.start()
self._errorThread.start()
return True
def OnTerminateCHMProcess(self):
if self._inputThread:
self._inputThread.Terminate()
self._inputThread = None
if self._errorThread:
self._errorThread.Terminate()
self._errorThread = None
if self._error:
while self._error.CanRead():
text = self._error.read()
self.LogMessage(text)
if self._input:
while self._input.CanRead():
text = self._input.read()
self.LogMessage(text)
self._process.Detach()
self._process.CloseOutput()
self._process = None
self._pid = None
self._isBusy = False
indexpath = os.path.realpath(os.path.join(self.GetParent().GetOutputPath(), 'html', 'index.chm'))
if os.path.exists(indexpath):
if wx.Platform == '__WXMSW__':
os.startfile(indexpath)
else:
import webbrowser
webbrowser.open(indexpath)
self._LogStep(6, "Finished Document Generation!")
self.LogMessage('\nSuccess create CHM doxgen document %s\n' % indexpath)
self._InfoDialog('Success create CHM doxgen document %s' % indexpath)
def DocumentFixup(self):
# find BASE_LIBRARY_JUMP_BUFFER structure reference page
self._LogStep(4, "Fixup Package Document!")
self.LogMessage('\n >>> Start fixup document \n')
for root, dirs, files in os.walk(os.path.join(self.GetParent().GetOutputPath(), 'html')):
for dir in dirs:
if dir.lower() in ['.svn', '_svn', 'cvs']:
dirs.remove(dir)
for file in files:
wx.YieldIfNeeded()
if not file.lower().endswith('.html'): continue
fullpath = os.path.join(self.GetParent().GetOutputPath(), root, file)
try:
f = open(fullpath, 'r')
text = f.read()
f.close()
except:
self.LogMessage('\nFail to open file %s\n' % fullpath)
continue
if text.find('BASE_LIBRARY_JUMP_BUFFER Struct Reference') != -1 and self.GetParent().GetArchitecture() == 'ALL':
self.FixPageBASE_LIBRARY_JUMP_BUFFER(fullpath, text)
if text.find('MdePkg/Include/Library/BaseLib.h File Reference') != -1 and self.GetParent().GetArchitecture() == 'ALL':
self.FixPageBaseLib(fullpath, text)
if text.find('IA32_IDT_GATE_DESCRIPTOR Union Reference') != -1 and self.GetParent().GetArchitecture() == 'ALL':
self.FixPageIA32_IDT_GATE_DESCRIPTOR(fullpath, text)
if text.find('MdePkg/Include/Library/UefiDriverEntryPoint.h File Reference') != -1:
self.FixPageUefiDriverEntryPoint(fullpath, text)
if text.find('MdePkg/Include/Library/UefiApplicationEntryPoint.h File Reference') != -1:
self.FixPageUefiApplicationEntryPoint(fullpath, text)
if text.lower().find('.s.dox') != -1 or \
text.lower().find('.asm.dox') != -1 or \
text.lower().find('.uni.dox') != -1:
self.FixDoxFileLink(fullpath, text)
self.RemoveFileList()
self.LogMessage(' >>> Finish all document fixing up! \n')
def RemoveFileList(self):
path_html = os.path.join(self.GetParent().GetOutputPath(), "html", "tree.html")
path_chm = os.path.join(self.GetParent().GetOutputPath(), "html", "index.hhc")
if os.path.exists(path_html):
self.LogMessage(' >>>Remove FileList item from generated HTML document.\n');
lines = []
f = open (path_html, "r")
lines = f.readlines()
f.close()
bfound = False
for index in range(len(lines)):
if lines[index].find('<a class="el" href="files.html" target="basefrm">File List</a>') != -1:
lines[index] = "<!-- %s" % lines[index]
bfound = True
continue
if bfound:
if lines[index].find('</div>') != -1:
lines[index] = "%s -->" % lines[index]
break
if bfound:
f = open(path_html, "w")
f.write("".join(lines))
f.close()
else:
self.LogMessage (' !!!Can not found FileList item in HTML document!\n')
if os.path.exists(path_chm):
self.LogMessage(" >>>Warning: Can not remove FileList for CHM files!\n");
"""
self.LogMessage(' >>>Remove FileList item from generated CHM document!\n');
lines = []
f = open (path_chm, "r")
lines = f.readlines()
f.close()
bfound = False
for index in xrange(len(lines)):
if not bfound:
if lines[index].find('<param name="Local" value="files.html">') != -1:
lines[index] = '<!-- %s' % lines[index]
bfound = True
continue
if bfound:
if lines[index].find('</UL>') != -1:
lines[index] = '%s -->\n' % lines[index].rstrip()
break
if bfound:
f = open(path_chm, "w")
f.write("".join(lines))
f.close()
import time
time.sleep(2)
else:
self.LogMessage(' !!!Can not found the FileList item in CHM document!')
"""
def FixPageBaseLib(self, path, text):
self.LogMessage(' >>> Fixup BaseLib file page at file %s \n' % path)
lines = text.split('\n')
lastBaseJumpIndex = -1
lastIdtGateDescriptor = -1
for index in range(len(lines) - 1, -1, -1):
line = lines[index]
if line.strip() == '<td class="memname">#define BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT 4 </td>':
lines[index] = '<td class="memname">#define BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT 4 [IA32] </td>'
if line.strip() == '<td class="memname">#define BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT 0x10 </td>':
lines[index] = '<td class="memname">#define BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT 0x10 [IPF] </td>'
if line.strip() == '<td class="memname">#define BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT 8 </td>':
lines[index] = '<td class="memname">#define BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT 9 [EBC, x64] </td>'
if line.find('BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a> 4') != -1:
lines[index] = lines[index].replace('BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a> 4',
'BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a> 4 [IA32]')
if line.find('BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a> 0x10') != -1:
lines[index] = lines[index].replace('BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a> 0x10',
'BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a> 0x10 [IPF]')
if line.find('BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a> 8') != -1:
lines[index] = lines[index].replace('BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a> 8',
'BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a> 8 [x64, EBC]')
if line.find('>BASE_LIBRARY_JUMP_BUFFER</a>') != -1:
if lastBaseJumpIndex != -1:
del lines[lastBaseJumpIndex]
lastBaseJumpIndex = index
if line.find('>IA32_IDT_GATE_DESCRIPTOR</a></td>') != -1:
if lastIdtGateDescriptor != -1:
del lines[lastIdtGateDescriptor]
lastIdtGateDescriptor = index
try:
f = open(path, 'w')
f.write('\n'.join(lines))
f.close()
except:
self._isBusy = False
self.LogMessage(" <<< Fail to fixup file %s\n" % path)
self.LogMessage(" <<< Finish to fixup file %s\n" % path)
def FixPageIA32_IDT_GATE_DESCRIPTOR(self, path, text):
self.LogMessage(' >>> Fixup structure reference IA32_IDT_GATE_DESCRIPTOR at file %s \n' % path)
lines = text.split('\n')
for index in range(len(lines) - 1, -1, -1):
line = lines[index].strip()
if line.find('struct {</td>') != -1 and lines[index - 2].find('>Uint64</a></td>') != -1:
lines.insert(index, '<tr><td colspan="2"><br><h2>Data Fields For X64</h2></td></tr>')
if line.find('struct {</td>') != -1 and lines[index - 1].find('Data Fields') != -1:
lines.insert(index, '<tr><td colspan="2"><br><h2>Data Fields For IA32</h2></td></tr>')
try:
f = open(path, 'w')
f.write('\n'.join(lines))
f.close()
except:
self._isBusy = False
self.LogMessage(" <<< Fail to fixup file %s\n" % path)
self.LogMessage(" <<< Finish to fixup file %s\n" % path)
def FixPageBASE_LIBRARY_JUMP_BUFFER(self, path, text):
self.LogMessage(' >>> Fixup structure reference BASE_LIBRARY_JUMP_BUFFER at file %s \n' % path)
lines = text.split('\n')
bInDetail = True
bNeedRemove = False
for index in range(len(lines) - 1, -1, -1):
line = lines[index]
if line.find('Detailed Description') != -1:
bInDetail = False
if line.startswith('EBC context buffer used by') and lines[index - 1].startswith('x64 context buffer'):
lines[index] = "IA32/IPF/X64/" + line
bNeedRemove = True
if line.startswith("x64 context buffer") or line.startswith('IPF context buffer used by') or \
line.startswith('IA32 context buffer used by'):
if bNeedRemove:
lines.remove(line)
if line.find('>R0</a>') != -1 and not bInDetail:
if lines[index - 1] != '<tr><td colspan="2"><br><h2>Data Fields For EBC</h2></td></tr>':
lines.insert(index, '<tr><td colspan="2"><br><h2>Data Fields For EBC</h2></td></tr>')
if line.find('>Rbx</a>') != -1 and not bInDetail:
if lines[index - 1] != '<tr><td colspan="2"><br><h2>Data Fields For X64</h2></td></tr>':
lines.insert(index, '<tr><td colspan="2"><br><h2>Data Fields For X64</h2></td></tr>')
if line.find('>F2</a>') != -1 and not bInDetail:
if lines[index - 1] != '<tr><td colspan="2"><br><h2>Data Fields For IPF</h2></td></tr>':
lines.insert(index, '<tr><td colspan="2"><br><h2>Data Fields For IPF</h2></td></tr>')
if line.find('>Ebx</a>') != -1 and not bInDetail:
if lines[index - 1] != '<tr><td colspan="2"><br><h2>Data Fields For IA32</h2></td></tr>':
lines.insert(index, '<tr><td colspan="2"><br><h2>Data Fields For IA32</h2></td></tr>')
try:
f = open(path, 'w')
f.write('\n'.join(lines))
f.close()
except:
self._isBusy = False
self.LogMessage(" <<< Fail to fixup file %s" % path)
self.LogMessage(" <<< Finish to fixup file %s\n" % path)
def FixPageUefiDriverEntryPoint(self, path, text):
self.LogMessage(' >>> Fixup file reference MdePkg/Include/Library/UefiDriverEntryPoint.h at file %s \n' % path)
lines = text.split('\n')
bInModuleEntry = False
bInEfiMain = False
ModuleEntryDlCount = 0
ModuleEntryDelStart = 0
ModuleEntryDelEnd = 0
EfiMainDlCount = 0
EfiMainDelStart = 0
EfiMainDelEnd = 0
for index in range(len(lines)):
line = lines[index].strip()
if line.find('EFI_STATUS</a> EFIAPI _ModuleEntryPoint </td>') != -1:
bInModuleEntry = True
if line.find('EFI_STATUS</a> EFIAPI EfiMain </td>') != -1:
bInEfiMain = True
if line.startswith('<p>References <a'):
if bInModuleEntry:
ModuleEntryDelEnd = index - 1
bInModuleEntry = False
elif bInEfiMain:
EfiMainDelEnd = index - 1
bInEfiMain = False
if bInModuleEntry:
if line.startswith('</dl>'):
ModuleEntryDlCount = ModuleEntryDlCount + 1
if ModuleEntryDlCount == 1:
ModuleEntryDelStart = index + 1
if bInEfiMain:
if line.startswith('</dl>'):
EfiMainDlCount = EfiMainDlCount + 1
if EfiMainDlCount == 1:
EfiMainDelStart = index + 1
if EfiMainDelEnd > EfiMainDelStart:
for index in range(EfiMainDelEnd, EfiMainDelStart, -1):
del lines[index]
if ModuleEntryDelEnd > ModuleEntryDelStart:
for index in range(ModuleEntryDelEnd, ModuleEntryDelStart, -1):
del lines[index]
try:
f = open(path, 'w')
f.write('\n'.join(lines))
f.close()
except:
self._isBusy = False
self.LogMessage(" <<< Fail to fixup file %s" % path)
self.LogMessage(" <<< Finish to fixup file %s\n" % path)
def FixPageUefiApplicationEntryPoint(self, path, text):
self.LogMessage(' >>> Fixup file reference MdePkg/Include/Library/UefiApplicationEntryPoint.h at file %s \n' % path)
lines = text.split('\n')
bInModuleEntry = False
bInEfiMain = False
ModuleEntryDlCount = 0
ModuleEntryDelStart = 0
ModuleEntryDelEnd = 0
EfiMainDlCount = 0
EfiMainDelStart = 0
EfiMainDelEnd = 0
for index in range(len(lines)):
line = lines[index].strip()
if line.find('EFI_STATUS</a> EFIAPI _ModuleEntryPoint </td>') != -1:
bInModuleEntry = True
if line.find('EFI_STATUS</a> EFIAPI EfiMain </td>') != -1:
bInEfiMain = True
if line.startswith('<p>References <a'):
if bInModuleEntry:
ModuleEntryDelEnd = index - 1
bInModuleEntry = False
elif bInEfiMain:
EfiMainDelEnd = index - 1
bInEfiMain = False
if bInModuleEntry:
if line.startswith('</dl>'):
ModuleEntryDlCount = ModuleEntryDlCount + 1
if ModuleEntryDlCount == 1:
ModuleEntryDelStart = index + 1
if bInEfiMain:
if line.startswith('</dl>'):
EfiMainDlCount = EfiMainDlCount + 1
if EfiMainDlCount == 1:
EfiMainDelStart = index + 1
if EfiMainDelEnd > EfiMainDelStart:
for index in range(EfiMainDelEnd, EfiMainDelStart, -1):
del lines[index]
if ModuleEntryDelEnd > ModuleEntryDelStart:
for index in range(ModuleEntryDelEnd, ModuleEntryDelStart, -1):
del lines[index]
try:
f = open(path, 'w')
f.write('\n'.join(lines))
f.close()
except:
self._isBusy = False
self.LogMessage(" <<< Fail to fixup file %s" % path)
self.LogMessage(" <<< Finish to fixup file %s\n" % path)
def FixDoxFileLink(self, path, text):
self.LogMessage(' >>> Fixup .dox postfix for file %s \n' % path)
try:
fd = open(path, 'r')
text = fd.read()
fd.close()
except Exception as e:
self.LogMessage (" <<<Fail to open file %s" % path)
return
text = text.replace ('.s.dox', '.s')
text = text.replace ('.S.dox', '.S')
text = text.replace ('.asm.dox', '.asm')
text = text.replace ('.Asm.dox', '.Asm')
text = text.replace ('.uni.dox', '.uni')
text = text.replace ('.Uni.dox', '.Uni')
try:
fd = open(path, 'w')
fd.write(text)
fd.close()
except Exception as e:
self.LogMessage (" <<<Fail to fixup file %s" % path)
return
self.LogMessage(' >>> Finish to fixup .dox postfix for file %s \n' % path)
def FixDecDoxygenFileLink(self, path, text):
self.LogMessage(' >>> Fixup .decdoxygen postfix for file %s \n' % path)
try:
fd = open(path, 'r')
lines = fd.readlines()
fd.close()
except Exception as e:
self.LogMessage (" <<<Fail to open file %s" % path)
return
for line in lines:
if line.find('.decdoxygen') != -1:
lines.remove(line)
break
try:
fd = open(path, 'w')
fd.write("".join(lines))
fd.close()
except Exception as e:
self.LogMessage (" <<<Fail to fixup file %s" % path)
return
self.LogMessage(' >>> Finish to fixup .decdoxygen postfix for file %s \n' % path)
import threading
class MonitorThread(threading.Thread):
def __init__(self, pipe, callback):
threading.Thread.__init__(self)
self._pipe = pipe
self._callback = callback
self._isCancel = False
def run(self):
while (not self._isCancel):
self._pipe.Peek()
if self._pipe.LastRead() == 0:
break
text = self._pipe.read()
if len(text.strip()) != 0:
wx.GetApp().ForegroundProcess(self._callback, (text,))
def Terminate(self):
self._pipe.flush()
self._isCancel = True
class DoxygenProcess(wx.Process):
def OnTerminate(self, id, status):
self._parent.OnTerminateDoxygenProcess()
def SetParent(self, parent):
self._parent = parent
class CHMProcess(wx.Process):
def OnTerminate(self, id, status):
self._parent.OnTerminateCHMProcess()
def SetParent(self, parent):
self._parent = parent
class ResultHandler:
def __init__(self, parent):
self._parent = parent
self.level = 0
def emit(self, record):
self._parent.LogMessage(record)
def handle(self, record):
wx.PostEvent(self._parent, LogEvent(message=record.getMessage()))
def acquire(self):
pass
def release(self):
pass
if __name__ == '__main__':
app = PackageDocApp(redirect=False)
app.MainLoop()
|
[
"[email protected]"
] | |
133204167552658953dec762fb75f1d33e371f32
|
768058e7f347231e06a28879922690c0b6870ed4
|
/venv/lib/python3.7/site-packages/cytoolz/tests/test_curried_toolzlike.py
|
04da221e7a6c2da495482c4d0ec0f832cf4c4132
|
[] |
no_license
|
jciech/HeisenbergSpinChains
|
58b4238281d8c158b11c6c22dd0da82025fd7284
|
e43942bbd09f6675e7e2ff277f8930dc0518d08e
|
refs/heads/master
| 2022-12-18T08:04:08.052966 | 2020-09-29T12:55:00 | 2020-09-29T12:55:00 | 258,476,448 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,439 |
py
|
import cytoolz
import cytoolz.curried
import types
from dev_skip_test import dev_skip_test
# Note that the tests in this file assume `toolz.curry` is a class, but we
# may some day make `toolz.curry` a function and `toolz.Curry` a class.
@dev_skip_test
def test_toolzcurry_is_class():
import toolz
assert isinstance(toolz.curry, type) is True
assert isinstance(toolz.curry, types.FunctionType) is False
@dev_skip_test
def test_cytoolz_like_toolz():
import toolz
import toolz.curried
for key, val in vars(toolz.curried).items():
if isinstance(val, toolz.curry):
if val.func is toolz.curry: # XXX: Python 3.4 work-around!
continue
assert hasattr(cytoolz.curried, key), (
"cytoolz.curried.%s does not exist" % key
)
assert isinstance(getattr(cytoolz.curried, key), cytoolz.curry), (
"cytoolz.curried.%s should be curried" % key
)
@dev_skip_test
def test_toolz_like_cytoolz():
import toolz
import toolz.curried
for key, val in vars(cytoolz.curried).items():
if isinstance(val, cytoolz.curry):
assert hasattr(toolz.curried, key), (
"cytoolz.curried.%s should not exist" % key
)
assert isinstance(getattr(toolz.curried, key), toolz.curry), (
"cytoolz.curried.%s should not be curried" % key
)
|
[
"[email protected]"
] | |
b97d80c5929e9b8e0c5d063c370f852396e5f312
|
03d7375757324e7ff1d3c9ab187230581d54416d
|
/backend/rent_a_road_test_3250/wsgi.py
|
579cf1810f7da700f3d420b0e011e94c1c08dd8b
|
[] |
no_license
|
crowdbotics-apps/rent-a-road-test-3250
|
6d657e2d68dc1c5d94fa4504b239ff2b5caea1fc
|
847636c17ed56722f64dc3b18765c81364aaf80a
|
refs/heads/master
| 2022-12-08T10:54:58.557243 | 2019-05-13T01:35:42 | 2019-05-13T01:35:42 | 186,325,879 | 0 | 0 | null | 2022-12-06T15:59:27 | 2019-05-13T01:35:39 |
JavaScript
|
UTF-8
|
Python
| false | false | 420 |
py
|
"""
WSGI config for rent_a_road_test_3250 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rent_a_road_test_3250.settings")
application = get_wsgi_application()
|
[
"[email protected]"
] | |
1dfc2fd41a419ec42d4dfca32b8e62d7ead75847
|
8f9ea3f14bdf2187de759939b2bbc87fe68ccfc0
|
/tensorflow/compiler/xla/python/xla_client_test.py
|
375e720f9b433f45ad5adc329104c286184a7510
|
[
"Apache-2.0"
] |
permissive
|
davidstanke/bazel-mvn-demo
|
4ea43f0ba293a28b916a27eab5f0812e9b753c2c
|
cff14dddce15ea7152988da576673bd15bab6c6e
|
refs/heads/master
| 2022-10-20T07:52:29.651851 | 2018-11-22T13:17:51 | 2018-11-22T13:17:51 | 157,782,756 | 2 | 0 |
Apache-2.0
| 2022-10-04T23:47:05 | 2018-11-15T22:54:09 |
C++
|
UTF-8
|
Python
| false | false | 53,532 |
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Python extension-based XLA client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import threading
import numpy as np
from tensorflow.compiler.xla.python import xla_client
import unittest
class LocalComputationTest(unittest.TestCase):
"""Base class for running an XLA Computation through the local client."""
def _NewComputation(self, name=None):
if name is None:
name = self.id()
return xla_client.ComputationBuilder(name)
def _Execute(self, c, arguments):
compiled_c = c.Build().CompileWithExampleArguments(arguments)
return compiled_c.Execute(arguments)
def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected):
assert expected is not None
result = self._Execute(c, arguments)
# Numpy's comparison methods are a bit too lenient by treating inputs as
# "array-like", meaning that scalar 4 will be happily compared equal to
# [[4]]. We'd like to be more strict so assert shapes as well.
self.assertEqual(np.asanyarray(result).shape, np.asanyarray(expected).shape)
assert_func(result, expected)
def _ExecuteAndCompareExact(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments, expected)
def _ExecuteAndCompareClose(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_allclose, c, arguments,
expected)
def NumpyArrayF32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float32 dtype."""
return np.array(*args, dtype=np.float32, **kwargs)
def NumpyArrayF64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float64 dtype."""
return np.array(*args, dtype=np.float64, **kwargs)
def NumpyArrayS32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int32 dtype."""
return np.array(*args, dtype=np.int32, **kwargs)
def NumpyArrayS64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int64 dtype."""
return np.array(*args, dtype=np.int64, **kwargs)
def NumpyArrayBool(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.bool dtype."""
return np.array(*args, dtype=np.bool, **kwargs)
class ComputationsWithConstantsTest(LocalComputationTest):
"""Tests focusing on Constant ops."""
def testConstantScalarSumF32(self):
c = self._NewComputation()
root = c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self.assertEqual(c.GetShape(root), c.GetReturnValueShape())
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumF64(self):
c = self._NewComputation()
c.Add(c.ConstantF64Scalar(1.11), c.ConstantF64Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumS32(self):
c = self._NewComputation()
c.Add(c.ConstantS32Scalar(1), c.ConstantS32Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantScalarSumS64(self):
c = self._NewComputation()
c.Add(c.ConstantS64Scalar(1), c.ConstantS64Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantVectorMulF32(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF32([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF32([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorMulF64(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF64([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF64([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorScalarDivF32(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF32([1.5, 2.5, 3.0, -10.8])),
c.ConstantF32Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarDivF64(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF64([1.5, 2.5, 3.0, -10.8])),
c.ConstantF64Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarPowF32(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF32([1.5, 2.5, 3.0])), c.ConstantF32Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testConstantVectorScalarPowF64(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF64([1.5, 2.5, 3.0])), c.ConstantF64Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testBooleanAnd(self):
c = self._NewComputation()
c.And(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, False])
def testBooleanOr(self):
c = self._NewComputation()
c.Or(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False])
def testSum2DF32(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF32([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testGetProto(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF32([[1, -1, 1], [-1, 1, -1]])))
built = c.Build()
proto = built.GetProto() # HloModuleProto
self.assertTrue(len(proto.computations) == 1)
self.assertTrue(len(proto.computations[0].instructions) == 3)
def testSum2DF64(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF64([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testSum2DWith1DBroadcastDim0F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim0F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim1F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testSum2DWith1DBroadcastDim1F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testConstantAxpyF32(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF32Scalar(2),
c.Constant(NumpyArrayF32([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF32([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
def testConstantAxpyF64(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF64Scalar(2),
c.Constant(NumpyArrayF64([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF64([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
class ParametersTest(LocalComputationTest):
"""Tests focusing on Parameter ops and argument-passing."""
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.f32_4vector = NumpyArrayF32([-2.3, 3.3, -4.3, 5.3])
self.f64_scalar_2 = NumpyArrayF64(2.0)
self.f64_4vector = NumpyArrayF64([-2.3, 3.3, -4.3, 5.3])
self.s32_scalar_3 = NumpyArrayS32(3)
self.s32_4vector = NumpyArrayS32([10, 15, -2, 7])
self.s64_scalar_3 = NumpyArrayS64(3)
self.s64_4vector = NumpyArrayS64([10, 15, -2, 7])
def testScalarTimesVectorAutonumberF32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f32_scalar_2)
p1 = c.ParameterFromNumpy(self.f32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorAutonumberF64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f64_scalar_2)
p1 = c.ParameterFromNumpy(self.f64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorS32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s32_scalar_3)
p1 = c.ParameterFromNumpy(self.s32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s32_scalar_3, self.s32_4vector],
expected=[30, 45, -6, 21])
def testScalarTimesVectorS64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s64_scalar_3)
p1 = c.ParameterFromNumpy(self.s64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s64_scalar_3, self.s64_4vector],
expected=[30, 45, -6, 21])
def testScalarMinusVectorExplicitNumberingF32(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f32_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f32_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
def testScalarMinusVectorExplicitNumberingF64(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f64_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f64_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
class LocalBufferTest(LocalComputationTest):
"""Tests focusing on execution with LocalBuffers."""
def _Execute(self, c, arguments):
compiled_c = c.Build().CompileWithExampleArguments(arguments)
arg_buffers = [xla_client.LocalBuffer.from_pyval(arg) for arg in arguments]
result_buffer = compiled_c.ExecuteWithLocalBuffers(arg_buffers)
return result_buffer.to_py()
def testConstantSum(self):
c = self._NewComputation()
c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testOneParameterSum(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11)],
expected=4.25)
def testTwoParameterSum(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)),
c.ParameterFromNumpy(NumpyArrayF32(0.)))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11), NumpyArrayF32(3.14)],
expected=4.25)
def testCannotCallWithDeletedBuffers(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
arg = NumpyArrayF32(1.11)
compiled_c = c.Build().CompileWithExampleArguments([arg])
arg_buffer = xla_client.LocalBuffer.from_pyval(arg)
arg_buffer.delete()
with self.assertRaises(ValueError):
compiled_c.ExecuteWithLocalBuffers([arg_buffer])
class SingleOpTest(LocalComputationTest):
"""Tests for single ops.
The goal here is smoke testing - to exercise the most basic functionality of
single XLA ops. As minimal as possible number of additional ops are added
around the op being tested.
"""
def testConcatenateF32(self):
c = self._NewComputation()
c.Concatenate(
(c.Constant(NumpyArrayF32([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF32([4.0, 5.0, 6.0]))),
dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConcatenateF64(self):
c = self._NewComputation()
c.Concatenate(
(c.Constant(NumpyArrayF64([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF64([4.0, 5.0, 6.0]))),
dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConvertElementType(self):
xla_types = {
np.bool: xla_client.xla_data_pb2.PRED,
np.int32: xla_client.xla_data_pb2.S32,
np.int64: xla_client.xla_data_pb2.S64,
np.float32: xla_client.xla_data_pb2.F32,
np.float64: xla_client.xla_data_pb2.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.ConvertElementType(x, xla_types[dst_dtype])
result = c.Build().Compile().Execute()
expected = np.array(template, dtype=dst_dtype)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.dtype, expected.dtype)
np.testing.assert_equal(result, expected)
x = [0, 1, 0, 0, 1]
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype)
def testCrossReplicaSumOneReplica(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
c.CrossReplicaSum(c.Constant(lhs))
self._ExecuteAndCompareExact(c, expected=lhs)
def testDotMatrixVectorF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixVectorF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotGeneral(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = (([2], [1]), ([0], [0]))
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs))
def testDotGeneralWithDotDimensionNumbersProto(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.xla_data_pb2.DotDimensionNumbers()
dimension_numbers.lhs_contracting_dimensions.append(2)
dimension_numbers.rhs_contracting_dimensions.append(1)
dimension_numbers.lhs_batch_dimensions.append(0)
dimension_numbers.rhs_batch_dimensions.append(0)
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs))
def testConvF32Same(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(c.Constant(lhs), c.Constant(rhs),
[1, 1], xla_client.PaddingType.SAME)
result = np.array([[[[640., 700., 760., 300.],
[880., 940., 1000., 380.],
[1120., 1180., 1240., 460.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvF32Valid(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(c.Constant(lhs), c.Constant(rhs),
[2, 1], xla_client.PaddingType.VALID)
result = np.array([[[[640., 700., 760.],
[1120., 1180., 1240.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvWithGeneralPaddingF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
c.ConvWithGeneralPadding(c.Constant(lhs), c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
c.ConvGeneralDilated(c.Constant(lhs), c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation,
dimension_numbers)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedPermutedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NHWC", "OIHW", "CWNH")
c.ConvGeneralDilated(c.Constant(np.transpose(lhs, (0, 2, 3, 1))),
c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation,
dimension_numbers)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=np.transpose(result, (1, 3, 0, 2)))
def testBooleanNot(self):
c = self._NewComputation()
arr = NumpyArrayBool([True, False, True])
c.Not(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=~arr)
def testExp(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Exp(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.exp(arr))
def testRound(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Round(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.round(arr))
def testLog(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.log(arr))
def testNeg(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Neg(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=-arr)
def testFloor(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Floor(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.floor(arr))
def testCeil(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Ceil(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.ceil(arr))
def testAbs(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.])
c.Abs(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.abs(arr))
def testTanh(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Tanh(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.tanh(arr))
def testTrans(self):
def _TransposeAndTest(array):
c = self._NewComputation()
c.Trans(c.Constant(array))
self._ExecuteAndCompareClose(c, expected=array.T)
# Test square and non-square matrices in both default (C) and F orders.
for array_fun in [NumpyArrayF32, NumpyArrayF64]:
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]]))
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]], order="F"))
_TransposeAndTest(array_fun([[1, 2], [4, 5]]))
_TransposeAndTest(array_fun([[1, 2], [4, 5]], order="F"))
def testTranspose(self):
def _TransposeAndTest(array, permutation):
c = self._NewComputation()
c.Transpose(c.Constant(array), permutation)
expected = np.transpose(array, permutation)
self._ExecuteAndCompareClose(c, expected=expected)
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0])
arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32)
for permutation in itertools.permutations(range(arr.ndim)):
_TransposeAndTest(arr, permutation)
_TransposeAndTest(np.asfortranarray(arr), permutation)
def testEq(self):
c = self._NewComputation()
c.Eq(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
def testNe(self):
c = self._NewComputation()
c.Ne(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True])
c.Ne(
c.Constant(NumpyArrayF32([-2.0, 0.0,
float("nan"),
float("nan")])),
c.Constant(NumpyArrayF32([2.0, -0.0, 1.0, float("nan")])))
self._ExecuteAndAssertWith(
np.testing.assert_allclose, c, (), expected=[True, False, True, True])
def testGt(self):
c = self._NewComputation()
c.Gt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False, False])
def testGe(self):
c = self._NewComputation()
c.Ge(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False, False])
def testLt(self):
c = self._NewComputation()
c.Lt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, False, False, True, True])
def testLe(self):
c = self._NewComputation()
c.Le(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True, True])
def testMax(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 2.0, 3.0, 7.0, 12.0])
def testMaxExplicitBroadcastDim0(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareExact(c, expected=[[3, 3, 3], [4, 5, 6], [7, 8, 9]])
def testMaxExplicitBroadcastDim1(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareExact(c, expected=[[3, 4, 5], [4, 5, 6], [7, 8, 9]])
def testMin(self):
c = self._NewComputation()
c.Min(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 0.0, 2.0, 4.0, 9.0])
def testPad(self):
c = self._NewComputation()
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)),
[(1, 2, 1), (0, 1, 0)])
self._ExecuteAndCompareClose(c, expected=[[0.0, 0.0, 0.0],
[1.0, 2.0, 0.0],
[0.0, 0.0, 0.0],
[3.0, 4.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
def testPadWithPaddingConfig(self):
c = self._NewComputation()
padding_config = xla_client.xla_data_pb2.PaddingConfig()
for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]:
dimension = padding_config.dimensions.add()
dimension.edge_padding_low = lo
dimension.edge_padding_high = hi
dimension.interior_padding = interior
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)),
padding_config)
self._ExecuteAndCompareClose(c, expected=[[0.0, 0.0, 0.0],
[1.0, 2.0, 0.0],
[0.0, 0.0, 0.0],
[3.0, 4.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
def testReshape(self):
c = self._NewComputation()
c.Reshape(
c.Constant(NumpyArrayS32([[1, 2], [3, 4], [5, 6]])),
dimensions=[0, 1],
new_sizes=[2, 3])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 5, 6]])
def testCollapse(self):
c = self._NewComputation()
c.Collapse(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[1, 2])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3, 4], [5, 6, 7, 8]])
def testRev(self):
c = self._NewComputation()
c.Rev(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[0, 2])
self._ExecuteAndCompareExact(
c, expected=[[[6, 5], [8, 7]], [[2, 1], [4, 3]]])
def testClampF32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayF32(-1)),
c.Constant(NumpyArrayF32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayF32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, -1, 0, 1, 2, 2])
# TODO(b/72689392): re-enable when bug S32 resolved
def DISABLED_testClampS32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayS32(-1)),
c.Constant(NumpyArrayS32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayS32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, 0, 1, 2, 2])
def testSelect(self):
c = self._NewComputation()
c.Select(
c.Constant(NumpyArrayBool([True, False, False, True, False])),
c.Constant(NumpyArrayS32([1, 2, 3, 4, 5])),
c.Constant(NumpyArrayS32([-1, -2, -3, -4, -5])))
self._ExecuteAndCompareExact(c, expected=[1, -2, -3, 4, -5])
def testSlice(self):
c = self._NewComputation()
c.Slice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), [1, 0],
[3, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testSliceInDim(self):
c = self._NewComputation()
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=1,
limit_index=2,
stride=1,
dimno=1)
self._ExecuteAndCompareExact(c, expected=[[2], [5], [8]])
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=0,
limit_index=3,
stride=2,
dimno=0)
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [7, 8, 9]])
def testDynamicSlice(self):
c = self._NewComputation()
c.DynamicSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([1, 0])), [2, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testDynamicUpdateSlice(self):
c = self._NewComputation()
c.DynamicUpdateSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([[1, 2], [3, 4]])),
c.Constant(NumpyArrayS32([1, 1])))
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 1, 2], [7, 3, 4]])
def testTuple(self):
c = self._NewComputation()
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True])))
result = c.Build().Compile().Execute()
self.assertIsInstance(result, tuple)
np.testing.assert_equal(result[0], 42)
np.testing.assert_allclose(result[1], [1.0, 2.0])
np.testing.assert_equal(result[2], [True, False, False, True])
def testGetTupleElement(self):
c = self._NewComputation()
c.GetTupleElement(
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True]))), 1)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0])
def testBroadcast(self):
c = self._NewComputation()
c.Broadcast(c.Constant(NumpyArrayS32([10, 20, 30, 40])), sizes=(3,))
self._ExecuteAndCompareExact(
c, expected=[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]])
def testRngNormal(self):
shape = (2, 3)
c = self._NewComputation()
c.RngNormal(c.Constant(NumpyArrayF32(0.)), c.Constant(NumpyArrayF32(1.)),
dims=shape)
result = c.Build().Compile().Execute()
# since the result is random, we just check shape and uniqueness
self.assertEqual(result.shape, shape)
self.assertEqual(len(np.unique(result)), np.prod(shape))
def testRngUniformF32(self):
lo, hi = 2., 4.
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(c.Constant(NumpyArrayF32(lo)), c.Constant(NumpyArrayF32(hi)),
dims=shape)
result = c.Build().Compile().Execute()
# since the result is random, we just check shape, uniqueness, and range
self.assertEqual(result.shape, shape)
self.assertEqual(len(np.unique(result)), np.prod(shape))
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testRngUniformS32(self):
lo, hi = 2, 4
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(c.Constant(NumpyArrayS32(lo)), c.Constant(NumpyArrayS32(hi)),
dims=shape)
result = c.Build().Compile().Execute()
# since the result is random, we just check shape, integrality, and range
self.assertEqual(result.shape, shape)
self.assertEqual(result.dtype, np.int32)
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testIsConstant(self):
c = self._NewComputation()
a = c.ConstantS32Scalar(3)
b = c.ConstantS32Scalar(1)
x = c.ParameterFromNumpy(NumpyArrayS32(0))
const_expr = c.Sub(b, a)
non_const_expr = c.Mul(const_expr, x)
self.assertTrue(c.IsConstant(const_expr))
self.assertFalse(c.IsConstant(non_const_expr))
# self.assertTrue(c.IsConstant(c.Sub(c.Add(x, a), x))) # TODO(b/77245564)
class EmbeddedComputationsTest(LocalComputationTest):
"""Tests for XLA graphs with embedded computations (such as maps)."""
def _CreateConstantS32Computation(self):
"""Computation (f32) -> s32 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s32_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantS32Scalar(1)
return c.Build()
def _CreateConstantS64Computation(self):
"""Computation (f64) -> s64 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s64_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantS64Scalar(1)
return c.Build()
def _CreateConstantF32Computation(self):
"""Computation (f32) -> f32 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f32_one")
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantF32Scalar(1.0)
return c.Build()
def _CreateConstantF64Computation(self):
"""Computation (f64) -> f64 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f64_one")
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantF64Scalar(1.0)
return c.Build()
def _CreateMulF32By2Computation(self):
"""Computation (f32) -> f32 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f32_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(2.0))
return c.Build()
def _CreateMulF32ByParamComputation(self):
"""Computation (f32) -> f32 that multiplies one parameter by the other."""
c = self._NewComputation("mul_f32_by_param")
c.Mul(c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateMulF64By2Computation(self):
"""Computation (f64) -> f64 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f64_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(2.0))
return c.Build()
def _CreateBinaryAddF32Computation(self):
"""Computation (f32, f32) -> f32 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryAddF64Computation(self):
"""Computation (f64, f64) -> f64 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateBinaryDivF32Computation(self):
"""Computation (f32, f32) -> f32 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryDivF64Computation(self):
"""Computation (f64, f64) -> f64 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateTestF32Lt10Computation(self):
"""Computation (f32) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f32_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(10.))
return c.Build()
def _CreateTestF64Lt10Computation(self):
"""Computation (f64) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f64_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(10.))
return c.Build()
def _CreateBinaryGeF32Computation(self):
"""Computation (f32, f32) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryGeF64Computation(self):
"""Computation (f64, f64) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _MakeSample3DArrayF32(self):
return NumpyArrayF32([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def _MakeSample3DArrayF64(self):
return NumpyArrayF64([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def testCallF32(self):
c = self._NewComputation()
c.Call(
self._CreateMulF32By2Computation(),
operands=(c.ConstantF32Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testCallF64(self):
c = self._NewComputation()
c.Call(
self._CreateMulF64By2Computation(),
operands=(c.ConstantF64Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testMapEachElementToS32Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS32Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapEachElementToS64Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS64Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapMulBy2F32(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testMapMulBy2F64(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testSimpleMapChainF32(self):
# Chains a map of constant-f32 with a map of mul-by-2
c = self._NewComputation()
const_f32 = c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF32Computation(), [0])
c.Map([const_f32], self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testSimpleMapChainF64(self):
# Chains a map of constant-f64 with a map of mul-by-2
c = self._NewComputation()
const_f64 = c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF64Computation(), [0])
c.Map([const_f64], self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testDivVectorsWithMapF32(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF32([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF32Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def testDivVectorsWithMapF64(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF64([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF64Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def DISABLED_testMapWithStaticOperands(self):
c = self._NewComputation()
factor = c.ConstantF32Scalar(3.0)
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF32ByParamComputation(), [0],
static_operands=[factor])
self._ExecuteAndCompareClose(c, expected=[3.0, 6.0, 9.0, 12.0])
def testSelectAndScatterF32(self):
c = self._NewComputation()
c.SelectAndScatter(c.Constant(NumpyArrayF32([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF32([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF32(1)),
scatter=self._CreateBinaryAddF32Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testSelectAndScatterF64(self):
c = self._NewComputation()
c.SelectAndScatter(c.Constant(NumpyArrayF64([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF64([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF64(1)),
scatter=self._CreateBinaryAddF64Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testReduce1DtoScalarF32(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce1DtoScalarF64(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce2DTo1DDim0F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim0F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim1F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce2DTo1DDim1F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce3DAllPossibleWaysF32(self):
input_array = self._MakeSample3DArrayF32()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduce3DAllPossibleWaysF64(self):
input_array = self._MakeSample3DArrayF64()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduceWindowValidUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testReduceWindowValidUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testWhileF32(self):
cond = self._CreateTestF32Lt10Computation()
body = self._CreateMulF32By2Computation()
c = self._NewComputation()
init = c.ConstantF32Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testWhileF64(self):
cond = self._CreateTestF64Lt10Computation()
body = self._CreateMulF64By2Computation()
c = self._NewComputation()
init = c.ConstantF64Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testConditionalTrue(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(True)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=6.)
def testConditionalFalse(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(False)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=1.)
def testInfeedS32Values(self):
to_infeed = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
c.Infeed(xla_client.Shape.from_pyval(to_infeed[0]))
compiled_c = c.Build().CompileWithExampleArguments()
for item in to_infeed:
xla_client.transfer_to_infeed(item)
for item in to_infeed:
result = compiled_c.Execute()
self.assertEqual(result, item)
def testInfeedThenOutfeedS32(self):
to_round_trip = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
x = c.Infeed(xla_client.Shape.from_pyval(to_round_trip[0]))
c.Outfeed(x)
compiled_c = c.Build().CompileWithExampleArguments()
for want in to_round_trip:
execution = threading.Thread(target=compiled_c.Execute)
execution.start()
xla_client.transfer_to_infeed(want)
got = xla_client.transfer_from_outfeed(
xla_client.Shape.from_pyval(to_round_trip[0]))
execution.join()
self.assertEqual(want, got)
class ErrorTest(LocalComputationTest):
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.s32_scalar_2 = NumpyArrayS32(2)
def testInvokeWithWrongElementType(self):
c = self._NewComputation()
c.SetOpMetadata(xla_client.CurrentSourceInfoMetadata())
c.ParameterFromNumpy(self.s32_scalar_2)
c.ClearOpMetadata()
self.assertRaisesRegexp(
RuntimeError, r"Invalid argument shape.*xla_client_test.py.*"
r"expected s32\[\], got f32\[\]",
lambda: c.Build().CompileWithExampleArguments([self.f32_scalar_2]))
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
76272de482e0e63b6b221d492c32ad0b05144b48
|
3a9f2b3d79cf214704829427ee280f4b49dca70a
|
/saigon/rat/RuckusAutoTest/tests/zd/CB_ZD_Mesh_Recovery_SSID_Testing.py
|
fda260e9213a84cb8606005a0f95c60cdf109186
|
[] |
no_license
|
jichunwei/MyGitHub-1
|
ae0c1461fe0a337ef459da7c0d24d4cf8d4a4791
|
f826fc89a030c6c4e08052d2d43af0b1b4b410e3
|
refs/heads/master
| 2021-01-21T10:19:22.900905 | 2016-08-20T03:34:52 | 2016-08-20T03:34:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,930 |
py
|
# Copyright (C) 2011 Ruckus Wireless, Inc. All rights reserved.
# Please make sure the following module docstring is accurate since it will be used in report generation.
"""
Description:
@author: An Nguyen
@contact: [email protected]
@since: Feb 2012
Prerequisite (Assumptions about the state of the test bed/DUT):
1. Build under test is loaded on the AP and Zone Director
Required components: 'Station', 'RuckusAP', 'ZoneDirector'
Test parameters:
Test procedure:
1. Config:
-
2. Test:
- Verify if the mesh tree are match with expected
3. Cleanup:
-
Result type: PASS/FAIL
Results: PASS: If the mesh tree is not changed
FAIL: If the mesh tree is changed
Messages: If FAIL the test script returns a message related to the criterion that is not satisfied
"""
import time
import logging
from pprint import pformat
from RuckusAutoTest.models import Test
from RuckusAutoTest.components import Helpers as lib
from RuckusAutoTest.tests.zd import libZD_TestMethods as tmethod
from RuckusAutoTest.tests.zd import libZD_TestConfig as tconfig
from RuckusAutoTest.common import lib_Debug as bugme
class CB_ZD_Mesh_Recovery_SSID_Testing(Test):
required_components = ['Station', 'RuckusAP', 'ZoneDirector']
parameter_description = {
}
def config(self, conf):
self._init_test_parameter(conf)
def test(self):
self._get_recovery_ssid_info()
if self.errmsg: return self.returnResult('FAIL', self.errmsg)
self._change_mesh_ssid_and_reboot_ap()
if self.errmsg: return self.returnResult('FAIL', self.errmsg)
self._verify_station_could_scan_the_recovery_ssid()
if self.errmsg: return self.returnResult('FAIL', self.errmsg)
self._verify_station_could_connect_to_recovery_ssid_wlan()
if self.errmsg: return self.returnResult('FAIL', self.errmsg)
return self.returnResult('PASS', self.passmsg)
def cleanup(self):
pass
def _init_test_parameter(self, conf):
self.conf = {'timeout': 600}
self.conf.update(conf)
self.errmsg = ''
self.passmsg = ''
self.zd = self.testbed.components['ZoneDirector']
self.active_ap = self.carrierbag[self.conf['ap_tag']]['ap_ins']
self.target_station = self.carrierbag[self.conf['sta_tag']]['sta_ins']
def _get_recovery_ssid_info(self):
self.recovery_ssid_info = lib.apcli.radiogrp.get_recovery_ssid(self.active_ap)
if not self.recovery_ssid_info['timeout'] and not self.recovery_ssid_info['service_wlan']:
self.errmsg = 'The recovery-ssid function is not supported. Please check the system.'
elif self.recovery_ssid_info['service_wlan'].lower() != 'enabled':
self.errmsg = 'The service wlan is not enabled as expected.'
if self.errmsg: return
self.recovery_wlan_cfg = {}
wlan_list_info = lib.apcli.radiogrp.get_wlanlist(self.active_ap)
for wlan_info in wlan_list_info:
if wlan_info['name'] == 'recovery-ssid':
self.recovery_wlan_cfg['ssid'] = wlan_info['ssid']
passphrase = lib.apcli.radiogrp.get_passphrase(self.active_ap, wlan_info['wlanid'])
if passphrase['passphrase'] == 'DID NOT EXIST':
self.errmsg = 'Can not get the recovery-ssid wlan passphrase'
return
self.recovery_wlan_cfg['key_string'] = passphrase['passphrase'][0]
self.recovery_wlan_cfg['encryption'] = 'AES'
self.recovery_wlan_cfg['wpa_ver'] = 'WPA2'
self.recovery_wlan_cfg['auth'] = 'PSK'
return
self.errmsg = 'Can not find out the recovery-ssid wlan settings'
def _change_mesh_ssid_and_reboot_ap(self):
"""
the mesh ap could not reconnect to the system during the meshu and meshd ssid be change
"""
test_ssid = 'test-recovery-ssid'
logging.info('Change the mesh ssid to %s and reboot the active ap' % test_ssid)
self.active_ap.cmd('set ssid meshu %s' % test_ssid)
self.active_ap.cmd('set ssid meshd %s' % test_ssid)
self.active_ap.reboot(login=False)
msg = 'Waiting %s seconds for the recovery-ssid wlan [%s] is up'
msg = msg % (self.recovery_ssid_info['timeout'], self.recovery_wlan_cfg['ssid'])
logging.info(msg)
time.sleep(int(self.recovery_ssid_info['timeout']))
def _verify_station_could_scan_the_recovery_ssid(self):
"""
"""
msg = tmethod.verify_wlan_in_the_air(self.target_station,
self.recovery_wlan_cfg['ssid'],
self.conf['timeout'])
if "The station didn't see the WLAN" in msg:
self.errmsg = '[SCANNED IN %s SECS] %s' % (self.conf['timeout'], msg)
else:
self.passmsg = 'The recovery-ssid wlan[%s] is up as expected.' % self.recovery_wlan_cfg['ssid']
def _verify_station_could_connect_to_recovery_ssid_wlan(self):
"""
"""
tconfig.remove_all_wlan_from_station(self.target_station,
check_status_timeout = self.conf['timeout'])
self.errmsg = tmethod.assoc_station_with_ssid(self.target_station,
self.recovery_wlan_cfg,
self.conf['timeout'])
if not self.errmsg:
passmsg = '; target station could connect to the recovery-ssid wlan %s'
passmsg = passmsg % str(self.recovery_wlan_cfg)
self.passmsg += passmsg
|
[
"[email protected]"
] | |
1f82a2aa9fdec35d68ece70fff55bb94073d3df2
|
62e5b9ccdc8ee3671156919a2b44fba17e429089
|
/bin/env.py
|
4f6cb5de4ccf59fb09d32cb7b7ad6f32862e84bf
|
[
"CC0-1.0"
] |
permissive
|
ccgeom/ccg-notes
|
609cf4a761ec3d0f24a14cea408e9d5604fafc74
|
6fade9e0ebbbe747d0f07457aa8047470d15ca1b
|
refs/heads/master
| 2022-08-26T10:43:15.112786 | 2022-07-08T04:39:46 | 2022-07-08T04:39:46 | 62,777,276 | 12 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,046 |
py
|
#!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
import sys
import os
import virtualenv as venv
"""
Colorful output
"""
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = "\033[1m"
def head(msg):
print(HEADER + msg + ENDC)
def info(msg):
print(msg)
def infog(msg):
print(OKGREEN + msg + ENDC)
def infob(msg):
print(OKBLUE + msg + ENDC)
def warn(msg):
print(WARNING + msg + ENDC)
def err(msg):
print(FAIL + msg + ENDC)
"""
Welcome message
"""
head("Welcome!")
"""
Check python version
"""
info("checking python version...")
req_version = (3, 7)
cur_version = sys.version_info
if cur_version < req_version:
err("Your Python interpreter is too old. Please consider upgrading to 3.6 or above.")
sys.exit(-1)
"""
Check virtual enviroment
"""
if not os.path.exists(".py"):
if cur_version >= (3, 7, 7):
sys.argv = ['.py']
venv.cli_run(sys.argv)
#else:
#sys.argv = ['virtualenv', '.py']
#venv.main()
|
[
"[email protected]"
] | |
a7d718c61e135fceba95959631eed10c720cf4dd
|
53ccc4f5198d10102c8032e83f9af25244b179cf
|
/SoftUni Lessons/Python Development/Python Fundamentals June 2019/Problems and Files/03. PYTHON INTRO, FUNCTIONS, DEBUGGING/Functions-and-Debugging/04. Draw a Filled Square.py
|
e4dde5e933fe0544aed1d0ecfb3f1f25e1cdd415
|
[] |
no_license
|
SimeonTsvetanov/Coding-Lessons
|
aad32e0b4cc6f5f43206cd4a937fec5ebea64f2d
|
8f70e54b5f95911d0bdbfda7d03940cb824dcd68
|
refs/heads/master
| 2023-06-09T21:29:17.790775 | 2023-05-24T22:58:48 | 2023-05-24T22:58:48 | 221,786,441 | 13 | 6 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,563 |
py
|
"""
Functions and Debugging
Проверка: https://judge.softuni.bg/Contests/Compete/Index/922#3
04. Draw a Filled Square
Условие:
Draw at the console a filled square of size n like in the example:
Examples
Input:
4
Output:
--------
-\/\/\/-
-\/\/\/-
--------
Hints
1. Read the input
2. Create a function which will print the top and the bottom rows (they are the same).
Don’t forget to give it a descriptive name and to give it as a parameter some length
3. Create the function which will print the middle rows.
4. Use the functions that you've just created to draw a square.
"""
num = int(input())
def top_and_bottom():
for i in range(num * 2):
print(f"-", end="")
print()
def filling():
print(f"-", end="")
for i in range(int(((num * 2) - 2) / 2)):
print(f"\/", end="")
print(f"-", end="")
print()
def square():
top_and_bottom()
for i in range(num - 2):
filling()
top_and_bottom()
square()
# """
# Input: 4
#
# Output:
# --------
# -\/\/\/-
# -\/\/\/-
# --------
# """
#
#
# def header_and_footer(number):
# print(number * 2 * "-")
#
#
# def filling(number):
# internal = int((number * 2 - 2) / 2) * "\\/"
# for row in range(number - 2):
# print(f"-{internal}-")
#
#
# def print_square(number):
# header_and_footer(number)
# filling(number)
# header_and_footer(number)
#
#
# if __name__ == '__main__':
# print_square(number=int(input()))
#
#
|
[
"[email protected]"
] | |
9a9cca7de31b0609496ea4dcd0f6039c86ee463c
|
ac0b9c85542e6d1ef59c5e9df4618ddf22223ae0
|
/kratos/applications/ManagerApplication/test_examples/henry.py
|
d9cc93e644ba5e85b9ef36843485c17350e78ac9
|
[] |
no_license
|
UPC-EnricBonet/trunk
|
30cb6fbd717c1e78d95ec66bc0f6df1a041b2b72
|
1cecfe201c8c9a1b87b2d87faf8e505b7b1f772d
|
refs/heads/master
| 2021-06-04T05:10:06.060945 | 2016-07-15T15:29:00 | 2016-07-15T15:29:00 | 33,677,051 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,736 |
py
|
#setting the domain size for the problem to be solved
domain_size = 2 # 2D problem
#including kratos path
import sys
from KratosMultiphysics import * #we import the KRATOS
from KratosMultiphysics.ManagerApplication import * #and now our application
from KratosMultiphysics.HenryApplication import * #and now our application
#Create a new empty model part called "ExampleModelPart"
model_part = ModelPart("ExampleModelPart");
print ("Model part defined: ExampleModelPart")
#we import the python file that includes the commands that we need
import fractional_iterative_solver
#import variables that we will need from solver to our recent created model_part
fractional_iterative_solver.AddVariables(model_part)
# (note that our model part does not have nodes or elements yet)
# introducing input & outoput (also postProcess) file name
output_file_name = "result_henry"
input_file_name = "henry"
# Mesh built by GID for the postProcess
gid_mode = GiDPostMode.GiD_PostAscii
multifile = MultiFileFlag.MultipleFiles
deformed_mesh_flag = WriteDeformedMeshFlag.WriteUndeformed
write_conditions = WriteConditionsFlag.WriteElementsOnly
gid_io = GidIO(output_file_name, gid_mode, multifile, deformed_mesh_flag, write_conditions)
model_part_io_fluid = ModelPartIO(input_file_name)
model_part_io_fluid.ReadModelPart(model_part)
model_part.ProcessInfo.SetValue(IS_FLOW_STATIONARY, 1);
model_part.ProcessInfo.SetValue(IS_BUOYANCY, 0);
model_part.ProcessInfo.SetValue(IS_TRANSPORT_STATIONARY, 0);
model_part.ProcessInfo.SetValue(GRAVITY_X, 0.0);
model_part.ProcessInfo.SetValue(GRAVITY_Y, -9.81);
#Aarchivo = open("MatrixSystem.txt", "r")
#Acontenido = archivo.read()
#Aprint (contenido)
#the buffer size should be set up after the mesh is read for the first time (Transient problem =2,3. Steady problem =1)
model_part.SetBufferSize(2)
# we add the DoFs
fractional_iterative_solver.AddDofs(model_part)
print ("Time steps values on each time for unkown=2 (Buffer size)")
#creating flow solver (custom)
solver = fractional_iterative_solver.FractionalIterativeSolver(model_part,domain_size)
#we import the C++ file that includes the commands that we need
#import fractional_iterative_strategy
## This part is contained is for the FractionalSolver not included at fractional_iterative_solver.py
solver.time_order = 1
solver.echo_level = 3
print ("Iterative solver create succesfully")
solver.Initialize()
Nsteps=40
Dt=2
out=0
out_step=1
print ("Solver inicializate!")
#solver.calculateDensityNodes()
#Arhivo de comparación:
filePost="SystemTest.txt"
#print(model_part.Elements)
#darcyXmatrix=Matrix(Nsteps,300)
solver.ReadFile(filePost,Nsteps)
print("Unitary")
solver.UnitaryTest()
#darcyXmatrix=solver.ReadFile(filePost,Nsteps)
#print ("darcyXmatrix[1,200]")
#print (darcyXmatrix[1,200])
mesh_name = 0.0
gid_io.InitializeMesh( mesh_name );
gid_io.WriteMesh((model_part).GetMesh());
gid_io.FinalizeMesh()
gid_io.InitializeResults(mesh_name,(model_part).GetMesh())
#clean file output for matrix
open('MatrixSystem.txt', 'w').close()
print ("Mesh read for the postProcess")
#if step==0:
#for node in model_part.Nodes:
#node.SetSolutionStepValue(PRESSURE,0,0.0)
#node.SetSolutionStepValue(CONCENTRATION,0,0.0)
#if(node.Y < 0.5):
#node.SetSolutionStepValue(PRESSURE,0,9800.0)
#node.SetSolutionStepValue(CONCENTRATION,0,0.0)
#else:
#node.SetSolutionStepValue(PRESSURE,0,0.0)
#node.SetSolutionStepValue(CONCENTRATION,0,0.0)
gid_io.WriteNodalResults(PRESSURE,model_part.Nodes,0, 0)
gid_io.WriteNodalResults(CONCENTRATION,model_part.Nodes,0,0)
gid_io.WriteNodalResults(DENSITY,model_part.Nodes,0,1000)
gid_io.WriteNodalResults(DARCY_FLOW_BALANCE,model_part.Nodes,0,0)
gid_io.WriteNodalResults(SINKSOURCE_BALANCE,model_part.Nodes,0,0)
import time as timer
t1 = timer.time()
for step in range(1,Nsteps):
out=out+1
time = Dt*step
print ("new step, time:",time)
model_part.CloneTimeStep(time)
solver.Solve()
print ("Assert")
solver.AssertVariables(step,Nsteps)
if out==out_step:
out=0
# print ("printing step:",step)
gid_io.WriteNodalResults(PRESSURE,model_part.Nodes,time,0)
gid_io.WriteNodalResults(CONCENTRATION,model_part.Nodes,time,0)
gid_io.WriteNodalResults(DENSITY,model_part.Nodes,time,0)
gid_io.WriteNodalResults(DARCY_FLOW_BALANCE,model_part.Nodes,time,0)
gid_io.WriteNodalResults(SINKSOURCE_BALANCE,model_part.Nodes,time,0)
gid_io.Flush()
t2=timer.time()
total_time=t2-t1
print ("total_time", total_time)
gid_io.FinalizeResults()
|
[
"[email protected]"
] | |
6f444bfe806a2c58d244d733db0a18853b36e3a1
|
97cb35277f4cbeb44b83ce75a4bcd8c008863d2d
|
/articles/migrations/0001_initial.py
|
e6c9b63f6136de188e39b1c141e3a7f462120d3d
|
[] |
no_license
|
TareqMonwer/CS-Basic-Django-Project
|
586dd17370929428dcc1ad8a3a9b83b487a40382
|
a0769caba38b6063b4761d3f8cdb593f7053f12e
|
refs/heads/main
| 2023-07-15T19:19:49.337261 | 2021-08-28T17:21:54 | 2021-08-28T17:21:54 | 400,535,672 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 782 |
py
|
# Generated by Django 3.2.6 on 2021-08-28 16:38
from django.db import migrations, models
import django.db.models.functions.text
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('content', models.TextField()),
],
options={
'verbose_name': 'Post',
'verbose_name_plural': 'Posts',
'ordering': [django.db.models.functions.text.Lower('title')],
},
),
]
|
[
"[email protected]"
] | |
e2f87b38b11fcc268984013e2471c1e0b3709e7a
|
8951fd5293dfb77c64ceddd19459e99a0c1cf677
|
/kvmagent/kvmagent/test/test_nfs_primary_storage_create_empty_volume.py
|
c77db100f78667df482bbef222b8634d3420813f
|
[
"Apache-2.0"
] |
permissive
|
SoftwareKing/zstack-utility
|
7cdc229f05ac511214135fcaa88b5acf5aa08126
|
4765928650cde4f4472a960de9e93a849a5555e3
|
refs/heads/master
| 2021-01-18T18:20:55.913454 | 2015-09-01T13:39:26 | 2015-09-01T13:39:26 | 41,954,728 | 1 | 0 | null | 2015-09-05T08:46:12 | 2015-09-05T08:46:12 | null |
UTF-8
|
Python
| false | false | 2,127 |
py
|
'''
@author: Frank
'''
import unittest
import time
import os.path
from kvmagent import kvmagent
from kvmagent.plugins import nfs_primarystorage_plugin
from zstacklib.utils import http
from zstacklib.utils import jsonobject
from zstacklib.utils import log
from zstacklib.utils import uuidhelper
from zstacklib.utils import linux
logger = log.get_logger(__name__)
class Test(unittest.TestCase):
NFS_URL = 'localhost:/home/primary'
CALLBACK_URL = 'http://localhost:7070/testcallback'
def callback(self, req):
rsp = jsonobject.loads(req[http.REQUEST_BODY])
print jsonobject.dumps(rsp, True)
def setUp(self):
self.service = kvmagent.new_rest_service()
kvmagent.get_http_server().register_sync_uri('/testcallback', self.callback)
self.service.start(True)
time.sleep(1)
def mount(self):
cmd = nfs_primarystorage_plugin.MountCmd()
cmd.url = self.NFS_URL
cmd.mountPath = os.path.join('/mnt', uuidhelper.uuid())
callurl = kvmagent._build_url_for_test([nfs_primarystorage_plugin.MOUNT_PATH])
ret = http.json_dump_post(callurl, cmd)
rsp = jsonobject.loads(ret)
self.assertTrue(rsp.success, rsp.error)
self.assertTrue(linux.is_mounted(cmd.url, cmd.mountPath))
def testName(self):
self.mount()
cmd = nfs_primarystorage_plugin.CreateEmptyVolumeCmd()
cmd.accountUuid = uuidhelper.uuid()
cmd.hypervisorType = 'KVM'
cmd.installUrl = '/tmp/emptyvolume.qcow2'
cmd.name = 'testEmptyVolume'
cmd.size = '1410400256'
cmd.uuid = uuidhelper.uuid()
url = kvmagent._build_url_for_test([nfs_primarystorage_plugin.CREATE_EMPTY_VOLUME_PATH])
rsp = http.json_dump_post(url, cmd, headers={http.TASK_UUID:uuidhelper.uuid(), http.CALLBACK_URI:self.CALLBACK_URL})
time.sleep(5)
self.service.stop()
linux.umount_by_url(self.NFS_URL)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
[
"[email protected]"
] | |
9f6bb109db8b941489c1b95f3573d745e1820854
|
64b135891387dac3a4bb29f3001a524830d0e4e4
|
/identities/urls.py
|
abfe0abcb60a404366d0a0c606133c58c7fdf59d
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
dynamicguy/treeio
|
9ad52802722b64a212e22710c04dbb0bb50d831e
|
4f674898cff2331711639a9b5f6812c874a2cb25
|
refs/heads/master
| 2021-08-28T11:25:41.504635 | 2014-01-31T17:16:22 | 2014-01-31T17:16:22 | 11,323,559 | 0 | 0 |
NOASSERTION
| 2021-08-16T20:18:53 | 2013-07-10T20:31:31 |
Python
|
UTF-8
|
Python
| false | false | 4,876 |
py
|
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Identities module URLs
"""
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('treeio.identities.views',
url(r'^(\.(?P<response_format>\w+))?$', 'index', name='identities'),
url(r'^index(\.(?P<response_format>\w+))?$', 'index', name='identities_index'),
url(r'^users(\.(?P<response_format>\w+))?/?$', 'index_users', name='identities_index_users'),
url(r'^groups(\.(?P<response_format>\w+))?/?$', 'index_groups', name='identities_index_groups'),
url(r'^types/(?P<type_id>\d+)(\.(?P<response_format>\w+))?/?$',
'type_view', name='identities_index_by_type'),
# Types
url(r'^type/view/(?P<type_id>\d+)(\.(?P<response_format>\w+))?/?$',
'type_view', name='identities_type_view'),
url(r'^type/edit/(?P<type_id>\d+)(\.(?P<response_format>\w+))?/?$',
'type_edit', name='identities_type_edit'),
url(r'^type/delete/(?P<type_id>\d+)(\.(?P<response_format>\w+))?/?$',
'type_delete', name='identities_type_delete'),
url(r'^type/add(\.(?P<response_format>\w+))?/?$', 'type_add', name='identities_type_add'),
# Fields
url(r'^field/view/(?P<field_id>\d+)(\.(?P<response_format>\w+))?/?$',
'field_view', name='identities_field_view'),
url(r'^field/edit/(?P<field_id>\d+)(\.(?P<response_format>\w+))?/?$',
'field_edit', name='identities_field_edit'),
url(r'^field/delete/(?P<field_id>\d+)(\.(?P<response_format>\w+))?/?$',
'field_delete', name='identities_field_delete'),
url(r'^field/add(\.(?P<response_format>\w+))?/?$', 'field_add', name='identities_field_add'),
# Contacts
url(r'^contact/add(\.(?P<response_format>\w+))?/?$', 'contact_add', name='identities_contact_add'),
url(r'^contact/add/(?P<type_id>\d+)(\.(?P<response_format>\w+))?/?$',
'contact_add_typed', name='identities_contact_add_typed'),
url(r'^me(\.(?P<response_format>\w+))?/?$', 'contact_me', name='identities_contact_me'),
url(r'^me/objects/(?P<attribute>[a-z_.]+)/list(\.(?P<response_format>\w+))?/?$',
'contact_me', name='identities_contact_me_objects'),
url(r'^contact/view/(?P<contact_id>\d+)(\.(?P<response_format>\w+))?/?$',
'contact_view', name='identities_contact_view'),
url(r'^contact/view/(?P<contact_id>\d+)/objects/(?P<attribute>[a-z_.]+)/list(\.(?P<response_format>\w+))?/?$',
'contact_view', name='identities_contact_view_objects'),
url(r'^contact/view/(?P<contact_id>\d+)/picture/?$',
'contact_view_picture', name='identities_contact_view_picture'),
url(r'^contact/edit/(?P<contact_id>\d+)(\.(?P<response_format>\w+))?/?$',
'contact_edit', name='identities_contact_edit'),
url(r'^contact/delete/(?P<contact_id>\d+)(\.(?P<response_format>\w+))?/?$',
'contact_delete', name='identities_contact_delete'),
url(r'^user/view/(?P<user_id>\d+)(\.(?P<response_format>\w+))?/?$',
'user_view', name='identities_user_view'),
url(r'^group/view/(?P<group_id>\d+)(\.(?P<response_format>\w+))?/?$',
'group_view', name='identities_group_view'),
url(r'^settings/view(\.(?P<response_format>\w+))?/?$',
'settings_view', name='identities_settings_view'),
# Locations
url(r'^location/index(\.(?P<response_format>\w+))?/?$',
'location_index', name='identities_location_index'),
url(r'^location/view/(?P<location_id>\d+)(\.(?P<response_format>\w+))?/?$',
'location_view', name='identities_location_view'),
url(r'^location/edit/(?P<location_id>\d+)(\.(?P<response_format>\w+))?/?$',
'location_edit', name='identities_location_edit'),
url(r'^location/delete/(?P<location_id>\d+)(\.(?P<response_format>\w+))?/?$',
'location_delete', name='identities_location_delete'),
url(r'^location/add(\.(?P<response_format>\w+))?/?$', 'location_add', name='identities_location_add'),
# AJAX callbacks
url(r'^ajax/users(\.(?P<response_format>\w+))?/?$',
'ajax_user_lookup', name='identities_ajax_user_lookup'),
url(r'^ajax/access(\.(?P<response_format>\w+))?/?$',
'ajax_access_lookup', name='identities_ajax_access_lookup'),
url(r'^ajax/contacts(\.(?P<response_format>\w+))?/?$',
'ajax_contact_lookup', name='identities_ajax_contact_lookup'),
url(r'^ajax/locations(\.(?P<response_format>\w+))?/?$',
'ajax_location_lookup', name='identities_ajax_location_lookup'),
)
|
[
"[email protected]"
] | |
6f1f594771992e3786c0ad60350a49a4d83ed6f5
|
16dbe8b1be0cd360ac1062072430f1f2b7d95bd6
|
/FlightPlanner/RnavTurningSegmentAnalyser/ui_RnavTurningSegmentAnalyser.py
|
1f1f2874de7983ce09a0355697e6af5e90e5c126
|
[] |
no_license
|
developer124320/FlightPlanner
|
4a0d9a450ddddede95512ad76437db2906154536
|
f1e4c762c360c0a00022ae6fa028fc1aee2a467d
|
refs/heads/master
| 2022-08-25T14:00:57.495037 | 2020-05-27T01:26:27 | 2020-05-27T01:26:27 | 267,186,057 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 32,104 |
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'RnavTurningSegmentAnalyser.ui'
#
# Created: Wed Nov 25 17:23:08 2015
# by: PyQt4 UI code generator 4.10.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
from FlightPlanner.Panels.TrackRadialBoxPanel import TrackRadialBoxPanel
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_RnavTurningSegmentAnalyser(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(435, 580)
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.gbGeneral = QtGui.QGroupBox(Form)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.gbGeneral.sizePolicy().hasHeightForWidth())
self.gbGeneral.setSizePolicy(sizePolicy)
self.gbGeneral.setObjectName(_fromUtf8("gbGeneral"))
self.vl_gbGeneral = QtGui.QVBoxLayout(self.gbGeneral)
self.vl_gbGeneral.setObjectName(_fromUtf8("vl_gbGeneral"))
self.frameRnavSpecification = QtGui.QFrame(self.gbGeneral)
self.frameRnavSpecification.setFrameShape(QtGui.QFrame.NoFrame)
self.frameRnavSpecification.setFrameShadow(QtGui.QFrame.Raised)
self.frameRnavSpecification.setObjectName(_fromUtf8("frameRnavSpecification"))
self.hLayoutRnavSpecification = QtGui.QHBoxLayout(self.frameRnavSpecification)
self.hLayoutRnavSpecification.setSpacing(0)
self.hLayoutRnavSpecification.setMargin(0)
self.hLayoutRnavSpecification.setObjectName(_fromUtf8("hLayoutRnavSpecification"))
self.label_67 = QtGui.QLabel(self.frameRnavSpecification)
self.label_67.setMinimumSize(QtCore.QSize(180, 0))
self.label_67.setMaximumSize(QtCore.QSize(180, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.label_67.setFont(font)
self.label_67.setObjectName(_fromUtf8("label_67"))
self.hLayoutRnavSpecification.addWidget(self.label_67)
self.cmbRnavSpecification = QtGui.QComboBox(self.frameRnavSpecification)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cmbRnavSpecification.sizePolicy().hasHeightForWidth())
self.cmbRnavSpecification.setSizePolicy(sizePolicy)
self.cmbRnavSpecification.setMinimumSize(QtCore.QSize(72, 0))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.cmbRnavSpecification.setFont(font)
self.cmbRnavSpecification.setObjectName(_fromUtf8("cmbRnavSpecification"))
self.hLayoutRnavSpecification.addWidget(self.cmbRnavSpecification)
self.vl_gbGeneral.addWidget(self.frameRnavSpecification)
self.framePhaseOfFlight = QtGui.QFrame(self.gbGeneral)
self.framePhaseOfFlight.setFrameShape(QtGui.QFrame.NoFrame)
self.framePhaseOfFlight.setFrameShadow(QtGui.QFrame.Raised)
self.framePhaseOfFlight.setObjectName(_fromUtf8("framePhaseOfFlight"))
self.hLayoutPhaseOfFlight = QtGui.QHBoxLayout(self.framePhaseOfFlight)
self.hLayoutPhaseOfFlight.setSpacing(0)
self.hLayoutPhaseOfFlight.setMargin(0)
self.hLayoutPhaseOfFlight.setObjectName(_fromUtf8("hLayoutPhaseOfFlight"))
self.label_69 = QtGui.QLabel(self.framePhaseOfFlight)
self.label_69.setMinimumSize(QtCore.QSize(180, 0))
self.label_69.setMaximumSize(QtCore.QSize(180, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.label_69.setFont(font)
self.label_69.setObjectName(_fromUtf8("label_69"))
self.hLayoutPhaseOfFlight.addWidget(self.label_69)
self.cmbPhaseOfFlight = QtGui.QComboBox(self.framePhaseOfFlight)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cmbPhaseOfFlight.sizePolicy().hasHeightForWidth())
self.cmbPhaseOfFlight.setSizePolicy(sizePolicy)
self.cmbPhaseOfFlight.setMinimumSize(QtCore.QSize(72, 0))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.cmbPhaseOfFlight.setFont(font)
self.cmbPhaseOfFlight.setObjectName(_fromUtf8("cmbPhaseOfFlight"))
self.hLayoutPhaseOfFlight.addWidget(self.cmbPhaseOfFlight)
self.vl_gbGeneral.addWidget(self.framePhaseOfFlight)
self.gbWaypoint = QtGui.QGroupBox(self.gbGeneral)
self.gbWaypoint.setObjectName(_fromUtf8("gbWaypoint"))
self.vLayoutGbWaypoint = QtGui.QVBoxLayout(self.gbWaypoint)
self.vLayoutGbWaypoint.setObjectName(_fromUtf8("vLayoutGbWaypoint"))
self.frameType = QtGui.QFrame(self.gbWaypoint)
self.frameType.setFrameShape(QtGui.QFrame.NoFrame)
self.frameType.setFrameShadow(QtGui.QFrame.Raised)
self.frameType.setObjectName(_fromUtf8("frameType"))
self.hLayoutType = QtGui.QHBoxLayout(self.frameType)
self.hLayoutType.setSpacing(0)
self.hLayoutType.setMargin(0)
self.hLayoutType.setObjectName(_fromUtf8("hLayoutType"))
self.label_70 = QtGui.QLabel(self.frameType)
self.label_70.setMinimumSize(QtCore.QSize(180, 0))
self.label_70.setMaximumSize(QtCore.QSize(180, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.label_70.setFont(font)
self.label_70.setObjectName(_fromUtf8("label_70"))
self.hLayoutType.addWidget(self.label_70)
self.cmbType = QtGui.QComboBox(self.frameType)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cmbType.sizePolicy().hasHeightForWidth())
self.cmbType.setSizePolicy(sizePolicy)
self.cmbType.setMinimumSize(QtCore.QSize(72, 0))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.cmbType.setFont(font)
self.cmbType.setObjectName(_fromUtf8("cmbType"))
self.hLayoutType.addWidget(self.cmbType)
self.vLayoutGbWaypoint.addWidget(self.frameType)
self.vl_gbGeneral.addWidget(self.gbWaypoint)
self.chbCatH = QtGui.QCheckBox(self.gbGeneral)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.chbCatH.setFont(font)
self.chbCatH.setObjectName(_fromUtf8("chbCatH"))
self.vl_gbGeneral.addWidget(self.chbCatH)
self.chbCircularArcs = QtGui.QCheckBox(self.gbGeneral)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.chbCircularArcs.setFont(font)
self.chbCircularArcs.setObjectName(_fromUtf8("chbCircularArcs"))
self.vl_gbGeneral.addWidget(self.chbCircularArcs)
self.gbParameters = QtGui.QGroupBox(self.gbGeneral)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.gbParameters.sizePolicy().hasHeightForWidth())
self.gbParameters.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setBold(False)
font.setWeight(50)
self.gbParameters.setFont(font)
self.gbParameters.setObjectName(_fromUtf8("gbParameters"))
self.vl_gbParameters = QtGui.QVBoxLayout(self.gbParameters)
self.vl_gbParameters.setObjectName(_fromUtf8("vl_gbParameters"))
self.frameSelectionMode = QtGui.QFrame(self.gbParameters)
self.frameSelectionMode.setFrameShape(QtGui.QFrame.NoFrame)
self.frameSelectionMode.setFrameShadow(QtGui.QFrame.Raised)
self.frameSelectionMode.setObjectName(_fromUtf8("frameSelectionMode"))
self.hLayoutSelectionMode = QtGui.QHBoxLayout(self.frameSelectionMode)
self.hLayoutSelectionMode.setSpacing(0)
self.hLayoutSelectionMode.setMargin(0)
self.hLayoutSelectionMode.setObjectName(_fromUtf8("hLayoutSelectionMode"))
self.label_66 = QtGui.QLabel(self.frameSelectionMode)
self.label_66.setMinimumSize(QtCore.QSize(180, 0))
self.label_66.setMaximumSize(QtCore.QSize(180, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.label_66.setFont(font)
self.label_66.setObjectName(_fromUtf8("label_66"))
self.hLayoutSelectionMode.addWidget(self.label_66)
self.cmbSelectionMode = QtGui.QComboBox(self.frameSelectionMode)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cmbSelectionMode.sizePolicy().hasHeightForWidth())
self.cmbSelectionMode.setSizePolicy(sizePolicy)
self.cmbSelectionMode.setMinimumSize(QtCore.QSize(72, 0))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.cmbSelectionMode.setFont(font)
self.cmbSelectionMode.setObjectName(_fromUtf8("cmbSelectionMode"))
self.hLayoutSelectionMode.addWidget(self.cmbSelectionMode)
self.vl_gbParameters.addWidget(self.frameSelectionMode)
self.txtInbound = TrackRadialBoxPanel(self.gbParameters)
self.txtInbound.Caption = "In-bound Track"
self.vl_gbParameters.addWidget(self.txtInbound)
self.txtOutbound = TrackRadialBoxPanel(self.gbParameters)
self.txtOutbound.Caption = "Out-bound Track"
self.vl_gbParameters.addWidget(self.txtOutbound)
# self.frame_Track = QtGui.QFrame(self.gbParameters)
# self.frame_Track.setFrameShape(QtGui.QFrame.NoFrame)
# self.frame_Track.setFrameShadow(QtGui.QFrame.Raised)
# self.frame_Track.setObjectName(_fromUtf8("frame_Track"))
# self.hLayoutTrack = QtGui.QHBoxLayout(self.frame_Track)
# self.hLayoutTrack.setSpacing(0)
# self.hLayoutTrack.setMargin(0)
# self.hLayoutTrack.setObjectName(_fromUtf8("hLayoutTrack"))
# self.label_75 = QtGui.QLabel(self.frame_Track)
# self.label_75.setMinimumSize(QtCore.QSize(180, 0))
# self.label_75.setMaximumSize(QtCore.QSize(180, 16777215))
# font = QtGui.QFont()
# font.setBold(False)
# font.setWeight(50)
# self.label_75.setFont(font)
# self.label_75.setObjectName(_fromUtf8("label_75"))
# self.hLayoutTrack.addWidget(self.label_75)
# self.frame_APV_10 = QtGui.QFrame(self.frame_Track)
# self.frame_APV_10.setFrameShape(QtGui.QFrame.StyledPanel)
# self.frame_APV_10.setFrameShadow(QtGui.QFrame.Raised)
# self.frame_APV_10.setObjectName(_fromUtf8("frame_APV_10"))
# self.horizontalLayout_14 = QtGui.QHBoxLayout(self.frame_APV_10)
# self.horizontalLayout_14.setSpacing(0)
# self.horizontalLayout_14.setMargin(0)
# self.horizontalLayout_14.setObjectName(_fromUtf8("horizontalLayout_14"))
# self.txtInbound = QtGui.QLineEdit(self.frame_APV_10)
# self.txtInbound.setEnabled(True)
# font = QtGui.QFont()
# font.setBold(False)
# font.setWeight(50)
# self.txtInbound.setFont(font)
# self.txtInbound.setObjectName(_fromUtf8("txtInbound"))
# self.horizontalLayout_14.addWidget(self.txtInbound)
# self.btnCaptureInboundTrack = QtGui.QToolButton(self.frame_APV_10)
# self.btnCaptureInboundTrack.setText(_fromUtf8(""))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("Resource/coordinate_capture.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
# self.btnCaptureInboundTrack.setIcon(icon)
# self.btnCaptureInboundTrack.setObjectName(_fromUtf8("btnCaptureInboundTrack"))
# self.horizontalLayout_14.addWidget(self.btnCaptureInboundTrack)
# self.hLayoutTrack.addWidget(self.frame_APV_10)
# self.vl_gbParameters.addWidget(self.frame_Track)
# self.frameDistance = QtGui.QFrame(self.gbParameters)
# self.frameDistance.setFrameShape(QtGui.QFrame.NoFrame)
# self.frameDistance.setFrameShadow(QtGui.QFrame.Raised)
# self.frameDistance.setObjectName(_fromUtf8("frameDistance"))
# self.hLayoutDistance = QtGui.QHBoxLayout(self.frameDistance)
# self.hLayoutDistance.setSpacing(0)
# self.hLayoutDistance.setMargin(0)
# self.hLayoutDistance.setObjectName(_fromUtf8("hLayoutDistance"))
# self.label_76 = QtGui.QLabel(self.frameDistance)
# self.label_76.setMinimumSize(QtCore.QSize(180, 0))
# self.label_76.setMaximumSize(QtCore.QSize(180, 16777215))
# font = QtGui.QFont()
# font.setBold(False)
# font.setWeight(50)
# self.label_76.setFont(font)
# self.label_76.setObjectName(_fromUtf8("label_76"))
# self.hLayoutDistance.addWidget(self.label_76)
# self.frame_APV_11 = QtGui.QFrame(self.frameDistance)
# self.frame_APV_11.setFrameShape(QtGui.QFrame.StyledPanel)
# self.frame_APV_11.setFrameShadow(QtGui.QFrame.Raised)
# self.frame_APV_11.setObjectName(_fromUtf8("frame_APV_11"))
# self.horizontalLayout_15 = QtGui.QHBoxLayout(self.frame_APV_11)
# self.horizontalLayout_15.setSpacing(0)
# self.horizontalLayout_15.setMargin(0)
# self.horizontalLayout_15.setObjectName(_fromUtf8("horizontalLayout_15"))
# self.txtOutbound = QtGui.QLineEdit(self.frame_APV_11)
# self.txtOutbound.setEnabled(True)
# font = QtGui.QFont()
# font.setBold(False)
# font.setWeight(50)
# self.txtOutbound.setFont(font)
# self.txtOutbound.setObjectName(_fromUtf8("txtOutbound"))
# self.horizontalLayout_15.addWidget(self.txtOutbound)
# self.btnCaptureOutboundTrack = QtGui.QToolButton(self.frame_APV_11)
# self.btnCaptureOutboundTrack.setText(_fromUtf8(""))
# self.btnCaptureOutboundTrack.setIcon(icon)
# self.btnCaptureOutboundTrack.setObjectName(_fromUtf8("btnCaptureOutboundTrack"))
# self.horizontalLayout_15.addWidget(self.btnCaptureOutboundTrack)
# self.hLayoutDistance.addWidget(self.frame_APV_11)
# self.vl_gbParameters.addWidget(self.frameDistance)
self.framePDG = QtGui.QFrame(self.gbParameters)
self.framePDG.setFrameShape(QtGui.QFrame.NoFrame)
self.framePDG.setFrameShadow(QtGui.QFrame.Raised)
self.framePDG.setObjectName(_fromUtf8("framePDG"))
self.hLayoutPDG = QtGui.QHBoxLayout(self.framePDG)
self.hLayoutPDG.setSpacing(0)
self.hLayoutPDG.setMargin(0)
self.hLayoutPDG.setObjectName(_fromUtf8("hLayoutPDG"))
self.label_71 = QtGui.QLabel(self.framePDG)
self.label_71.setMinimumSize(QtCore.QSize(180, 0))
self.label_71.setMaximumSize(QtCore.QSize(180, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.label_71.setFont(font)
self.label_71.setObjectName(_fromUtf8("label_71"))
self.hLayoutPDG.addWidget(self.label_71)
self.txtIas = QtGui.QLineEdit(self.framePDG)
self.txtIas.setMinimumSize(QtCore.QSize(72, 0))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.txtIas.setFont(font)
self.txtIas.setObjectName(_fromUtf8("txtIas"))
self.hLayoutPDG.addWidget(self.txtIas)
self.vl_gbParameters.addWidget(self.framePDG)
self.frameAltitude = QtGui.QFrame(self.gbParameters)
self.frameAltitude.setFrameShape(QtGui.QFrame.NoFrame)
self.frameAltitude.setFrameShadow(QtGui.QFrame.Raised)
self.frameAltitude.setObjectName(_fromUtf8("frameAltitude"))
self.hLayoutAltitude = QtGui.QHBoxLayout(self.frameAltitude)
self.hLayoutAltitude.setSpacing(0)
self.hLayoutAltitude.setMargin(0)
self.hLayoutAltitude.setObjectName(_fromUtf8("hLayoutAltitude"))
self.label_77 = QtGui.QLabel(self.frameAltitude)
self.label_77.setMinimumSize(QtCore.QSize(180, 0))
self.label_77.setMaximumSize(QtCore.QSize(180, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.label_77.setFont(font)
self.label_77.setObjectName(_fromUtf8("label_77"))
self.hLayoutAltitude.addWidget(self.label_77)
self.txtAltitudeM = QtGui.QLineEdit(self.frameAltitude)
self.txtAltitudeM.setMinimumSize(QtCore.QSize(72, 0))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.txtAltitudeM.setFont(font)
self.txtAltitudeM.setText(_fromUtf8(""))
self.txtAltitudeM.setObjectName(_fromUtf8("txtAltitudeM"))
self.hLayoutAltitude.addWidget(self.txtAltitudeM)
self.label = QtGui.QLabel(self.frameAltitude)
self.label.setObjectName(_fromUtf8("label"))
self.hLayoutAltitude.addWidget(self.label)
self.txtAltitude = QtGui.QLineEdit(self.frameAltitude)
self.txtAltitude.setMinimumSize(QtCore.QSize(72, 0))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.txtAltitude.setFont(font)
self.txtAltitude.setObjectName(_fromUtf8("txtAltitude"))
self.hLayoutAltitude.addWidget(self.txtAltitude)
self.label_2 = QtGui.QLabel(self.frameAltitude)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.hLayoutAltitude.addWidget(self.label_2)
self.vl_gbParameters.addWidget(self.frameAltitude)
self.frameMoc = QtGui.QFrame(self.gbParameters)
self.frameMoc.setFrameShape(QtGui.QFrame.NoFrame)
self.frameMoc.setFrameShadow(QtGui.QFrame.Raised)
self.frameMoc.setObjectName(_fromUtf8("frameMoc"))
self.hLayoutMoc = QtGui.QHBoxLayout(self.frameMoc)
self.hLayoutMoc.setSpacing(0)
self.hLayoutMoc.setMargin(0)
self.hLayoutMoc.setObjectName(_fromUtf8("hLayoutMoc"))
self.label_78 = QtGui.QLabel(self.frameMoc)
self.label_78.setMinimumSize(QtCore.QSize(180, 0))
self.label_78.setMaximumSize(QtCore.QSize(180, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.label_78.setFont(font)
self.label_78.setObjectName(_fromUtf8("label_78"))
self.hLayoutMoc.addWidget(self.label_78)
self.txtIsa = QtGui.QLineEdit(self.frameMoc)
self.txtIsa.setMinimumSize(QtCore.QSize(72, 0))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.txtIsa.setFont(font)
self.txtIsa.setObjectName(_fromUtf8("txtIsa"))
self.hLayoutMoc.addWidget(self.txtIsa)
self.vl_gbParameters.addWidget(self.frameMoc)
self.frameMoc_2 = QtGui.QFrame(self.gbParameters)
self.frameMoc_2.setFrameShape(QtGui.QFrame.NoFrame)
self.frameMoc_2.setFrameShadow(QtGui.QFrame.Raised)
self.frameMoc_2.setObjectName(_fromUtf8("frameMoc_2"))
self.hLayoutMoc_2 = QtGui.QHBoxLayout(self.frameMoc_2)
self.hLayoutMoc_2.setSpacing(0)
self.hLayoutMoc_2.setMargin(0)
self.hLayoutMoc_2.setObjectName(_fromUtf8("hLayoutMoc_2"))
self.label_79 = QtGui.QLabel(self.frameMoc_2)
self.label_79.setMinimumSize(QtCore.QSize(180, 0))
self.label_79.setMaximumSize(QtCore.QSize(180, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.label_79.setFont(font)
self.label_79.setObjectName(_fromUtf8("label_79"))
self.hLayoutMoc_2.addWidget(self.label_79)
self.txtBankAngle = QtGui.QLineEdit(self.frameMoc_2)
self.txtBankAngle.setMinimumSize(QtCore.QSize(72, 0))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.txtBankAngle.setFont(font)
self.txtBankAngle.setObjectName(_fromUtf8("txtBankAngle"))
self.hLayoutMoc_2.addWidget(self.txtBankAngle)
self.vl_gbParameters.addWidget(self.frameMoc_2)
self.frameMoc_3 = QtGui.QFrame(self.gbParameters)
self.frameMoc_3.setFrameShape(QtGui.QFrame.NoFrame)
self.frameMoc_3.setFrameShadow(QtGui.QFrame.Raised)
self.frameMoc_3.setObjectName(_fromUtf8("frameMoc_3"))
self.hLayoutMoc_3 = QtGui.QHBoxLayout(self.frameMoc_3)
self.hLayoutMoc_3.setSpacing(0)
self.hLayoutMoc_3.setMargin(0)
self.hLayoutMoc_3.setObjectName(_fromUtf8("hLayoutMoc_3"))
self.label_80 = QtGui.QLabel(self.frameMoc_3)
self.label_80.setMinimumSize(QtCore.QSize(180, 0))
self.label_80.setMaximumSize(QtCore.QSize(180, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.label_80.setFont(font)
self.label_80.setObjectName(_fromUtf8("label_80"))
self.hLayoutMoc_3.addWidget(self.label_80)
self.txtBankEstTime = QtGui.QLineEdit(self.frameMoc_3)
self.txtBankEstTime.setMinimumSize(QtCore.QSize(72, 0))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.txtBankEstTime.setFont(font)
self.txtBankEstTime.setObjectName(_fromUtf8("txtBankEstTime"))
self.hLayoutMoc_3.addWidget(self.txtBankEstTime)
self.vl_gbParameters.addWidget(self.frameMoc_3)
self.frameMoc_4 = QtGui.QFrame(self.gbParameters)
self.frameMoc_4.setFrameShape(QtGui.QFrame.NoFrame)
self.frameMoc_4.setFrameShadow(QtGui.QFrame.Raised)
self.frameMoc_4.setObjectName(_fromUtf8("frameMoc_4"))
self.hLayoutMoc_4 = QtGui.QHBoxLayout(self.frameMoc_4)
self.hLayoutMoc_4.setSpacing(0)
self.hLayoutMoc_4.setMargin(0)
self.hLayoutMoc_4.setObjectName(_fromUtf8("hLayoutMoc_4"))
self.label_81 = QtGui.QLabel(self.frameMoc_4)
self.label_81.setMinimumSize(QtCore.QSize(180, 0))
self.label_81.setMaximumSize(QtCore.QSize(180, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.label_81.setFont(font)
self.label_81.setObjectName(_fromUtf8("label_81"))
self.hLayoutMoc_4.addWidget(self.label_81)
self.txtPilotTime = QtGui.QLineEdit(self.frameMoc_4)
self.txtPilotTime.setMinimumSize(QtCore.QSize(72, 0))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.txtPilotTime.setFont(font)
self.txtPilotTime.setObjectName(_fromUtf8("txtPilotTime"))
self.hLayoutMoc_4.addWidget(self.txtPilotTime)
self.vl_gbParameters.addWidget(self.frameMoc_4)
self.framePrimaryMoc = QtGui.QFrame(self.gbParameters)
self.framePrimaryMoc.setFrameShape(QtGui.QFrame.NoFrame)
self.framePrimaryMoc.setFrameShadow(QtGui.QFrame.Raised)
self.framePrimaryMoc.setObjectName(_fromUtf8("framePrimaryMoc"))
self.hLayoutPrimaryMoc = QtGui.QHBoxLayout(self.framePrimaryMoc)
self.hLayoutPrimaryMoc.setSpacing(0)
self.hLayoutPrimaryMoc.setMargin(0)
self.hLayoutPrimaryMoc.setObjectName(_fromUtf8("hLayoutPrimaryMoc"))
self.label_68 = QtGui.QLabel(self.framePrimaryMoc)
self.label_68.setMinimumSize(QtCore.QSize(180, 0))
self.label_68.setMaximumSize(QtCore.QSize(180, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.label_68.setFont(font)
self.label_68.setObjectName(_fromUtf8("label_68"))
self.hLayoutPrimaryMoc.addWidget(self.label_68)
self.txtPrimaryMoc = QtGui.QLineEdit(self.framePrimaryMoc)
self.txtPrimaryMoc.setMinimumSize(QtCore.QSize(72, 0))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.txtPrimaryMoc.setFont(font)
self.txtPrimaryMoc.setObjectName(_fromUtf8("txtPrimaryMoc"))
self.hLayoutPrimaryMoc.addWidget(self.txtPrimaryMoc)
self.label_6 = QtGui.QLabel(self.framePrimaryMoc)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.hLayoutPrimaryMoc.addWidget(self.label_6)
self.txtPrimaryMocFt = QtGui.QLineEdit(self.framePrimaryMoc)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.txtPrimaryMocFt.setFont(font)
self.txtPrimaryMocFt.setText(_fromUtf8(""))
self.txtPrimaryMocFt.setObjectName(_fromUtf8("txtPrimaryMocFt"))
self.hLayoutPrimaryMoc.addWidget(self.txtPrimaryMocFt)
self.labelMocFt = QtGui.QLabel(self.framePrimaryMoc)
self.labelMocFt.setObjectName(_fromUtf8("labelMocFt"))
self.hLayoutPrimaryMoc.addWidget(self.labelMocFt)
self.vl_gbParameters.addWidget(self.framePrimaryMoc)
self.frameConstruct = QtGui.QFrame(self.gbParameters)
self.frameConstruct.setFrameShape(QtGui.QFrame.NoFrame)
self.frameConstruct.setFrameShadow(QtGui.QFrame.Raised)
self.frameConstruct.setObjectName(_fromUtf8("frameConstruct"))
self.hLayoutConstruct = QtGui.QHBoxLayout(self.frameConstruct)
self.hLayoutConstruct.setSpacing(0)
self.hLayoutConstruct.setMargin(0)
self.hLayoutConstruct.setObjectName(_fromUtf8("hLayoutConstruct"))
self.label_83 = QtGui.QLabel(self.frameConstruct)
self.label_83.setMinimumSize(QtCore.QSize(180, 0))
self.label_83.setMaximumSize(QtCore.QSize(180, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.label_83.setFont(font)
self.label_83.setObjectName(_fromUtf8("label_83"))
self.hLayoutConstruct.addWidget(self.label_83)
self.cmbConstructionType = QtGui.QComboBox(self.frameConstruct)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cmbConstructionType.sizePolicy().hasHeightForWidth())
self.cmbConstructionType.setSizePolicy(sizePolicy)
self.cmbConstructionType.setMinimumSize(QtCore.QSize(72, 0))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.cmbConstructionType.setFont(font)
self.cmbConstructionType.setObjectName(_fromUtf8("cmbConstructionType"))
self.hLayoutConstruct.addWidget(self.cmbConstructionType)
self.vl_gbParameters.addWidget(self.frameConstruct)
self.frameMOCmultipiler = QtGui.QFrame(self.gbParameters)
self.frameMOCmultipiler.setFrameShape(QtGui.QFrame.NoFrame)
self.frameMOCmultipiler.setFrameShadow(QtGui.QFrame.Raised)
self.frameMOCmultipiler.setObjectName(_fromUtf8("frameMOCmultipiler"))
self.hLayoutMOCmultipiler = QtGui.QHBoxLayout(self.frameMOCmultipiler)
self.hLayoutMOCmultipiler.setSpacing(0)
self.hLayoutMOCmultipiler.setMargin(0)
self.hLayoutMOCmultipiler.setObjectName(_fromUtf8("hLayoutMOCmultipiler"))
self.label_84 = QtGui.QLabel(self.frameMOCmultipiler)
self.label_84.setMinimumSize(QtCore.QSize(180, 0))
self.label_84.setMaximumSize(QtCore.QSize(180, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.label_84.setFont(font)
self.label_84.setObjectName(_fromUtf8("label_84"))
self.hLayoutMOCmultipiler.addWidget(self.label_84)
self.mocSpinBox = QtGui.QSpinBox(self.frameMOCmultipiler)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.mocSpinBox.sizePolicy().hasHeightForWidth())
self.mocSpinBox.setSizePolicy(sizePolicy)
self.mocSpinBox.setMinimumSize(QtCore.QSize(72, 0))
self.mocSpinBox.setMinimum(1)
self.mocSpinBox.setObjectName(_fromUtf8("mocSpinBox"))
self.hLayoutMOCmultipiler.addWidget(self.mocSpinBox)
self.vl_gbParameters.addWidget(self.frameMOCmultipiler)
self.chbDrawTolerance = QtGui.QCheckBox(self.gbParameters)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.chbDrawTolerance.setFont(font)
self.chbDrawTolerance.setObjectName(_fromUtf8("chbDrawTolerance"))
self.vl_gbParameters.addWidget(self.chbDrawTolerance)
self.vl_gbGeneral.addWidget(self.gbParameters)
self.verticalLayout.addWidget(self.gbGeneral)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.gbGeneral.setTitle(_translate("Form", "General", None))
self.label_67.setText(_translate("Form", "RNAV Specification:", None))
self.label_69.setText(_translate("Form", "Phase Of Flight:", None))
self.gbWaypoint.setTitle(_translate("Form", "Waypoint", None))
self.label_70.setText(_translate("Form", "Type:", None))
self.chbCatH.setText(_translate("Form", "Cat. H", None))
self.chbCircularArcs.setText(_translate("Form", "Use Circular Arcs Method for Turns <= 30", None))
self.gbParameters.setTitle(_translate("Form", "Parameters", None))
self.label_66.setText(_translate("Form", "Selection Mode:", None))
# self.label_75.setText(_translate("Form", "In-bound Track ():", None))
# self.txtInbound.setText(_translate("Form", "0", None))
# self.label_76.setText(_translate("Form", "Out-bound Track():", None))
# self.txtOutbound.setText(_translate("Form", "0", None))
self.label_71.setText(_translate("Form", "IAS(kts):", None))
self.txtIas.setText(_translate("Form", "315", None))
self.label_77.setText(_translate("Form", "Altitude:", None))
self.label.setText(_translate("Form", "m", None))
self.txtAltitude.setText(_translate("Form", "9000", None))
self.label_2.setText(_translate("Form", "ft", None))
self.label_78.setText(_translate("Form", "ISA(C):", None))
self.txtIsa.setText(_translate("Form", "15", None))
self.label_79.setText(_translate("Form", "Bank Angle():", None))
self.txtBankAngle.setText(_translate("Form", "15", None))
self.label_80.setText(_translate("Form", "Bank Estimate Time:", None))
self.txtBankEstTime.setText(_translate("Form", "1", None))
self.label_81.setText(_translate("Form", "Pilot Time:", None))
self.txtPilotTime.setText(_translate("Form", "1", None))
self.label_68.setText(_translate("Form", "Primary Moc:", None))
self.txtPrimaryMoc.setText(_translate("Form", "300", None))
self.label_6.setText(_translate("Form", "m", None))
self.labelMocFt.setText(_translate("Form", "ft", None))
self.label_83.setText(_translate("Form", "Construction Type:", None))
self.label_84.setText(_translate("Form", "MOCmultiplier:", None))
self.chbDrawTolerance.setText(_translate("Form", "Draw Waypoint Tolerance", None))
|
[
"[email protected]"
] | |
2a4b56c826c10ff5f08b34c6eaad0370b5779382
|
ca2ebf664e1d4b7338d014ca92a95bebe31063ff
|
/greeter_client.py
|
3edd2bd9bf6ef469dc18f0f5cd08cb4c4f03ab5b
|
[] |
no_license
|
ttpro1995/playground-grpc
|
cfc5d9fbad085da39286cad25181884d3d052468
|
3b41ebbaadc27deb0c06b806a3dcc928f3eaaf34
|
refs/heads/master
| 2023-03-26T10:26:10.595902 | 2021-03-27T11:29:57 | 2021-03-27T11:29:57 | 352,055,991 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 464 |
py
|
import grpc
import hello_pb2_grpc
import hello_pb2
channel = grpc.insecure_channel('localhost:50051')
stub = hello_pb2_grpc.GreeterStub(channel)
response = stub.SayHello(hello_pb2.HelloRequest(name='you'))
print("Greeter client received: " + response.message)
response = stub.SayHelloAgain(hello_pb2.HelloRequest(name='you'))
print("Greeter client received: " + response.message)
response = stub.SayHelloAgain(hello_pb2.HelloRequest(name='meow'))
print(response)
|
[
"[email protected]"
] | |
69ae600a5a427c98f64cef92c66bec4af9383a99
|
cabe35a027a4c26a6360f60b00b176235d79c98b
|
/Exercises/ex38/3.py
|
b65224f1d414c734d587d30de897a978e15ad3e4
|
[] |
no_license
|
rishikant42/Python-TheHardWay
|
e3ac9c903be5065277095827a7e31662a1d56cbf
|
5c1c7ff6c376627bc6b1abf1fc7a8d7f3ef40176
|
refs/heads/master
| 2022-07-23T15:12:32.572778 | 2022-06-25T10:29:52 | 2022-06-25T10:29:52 | 70,502,885 | 0 | 1 | null | 2017-02-26T12:51:23 | 2016-10-10T15:42:42 |
Python
|
UTF-8
|
Python
| false | false | 119 |
py
|
#import random
num = 0
while (num < 100):
num = num +1
if (num % 2) == 0:
continue
print num
|
[
"[email protected]"
] | |
f4084b1a4d000098c78ec59feb3865cb7fad3d77
|
d23e26d37b42fbce4fe51add8f2d3b29bc38f865
|
/projecteuler/p035.py
|
fd3d2afae9dbc1a44e36e60c0025772d79f975b7
|
[
"MIT"
] |
permissive
|
rene-d/math
|
6728908a3c6c6c6dc5cf77c1c8a52412c90459b9
|
34d33bdfbf2756f442c0deb085b940262d8a1f44
|
refs/heads/master
| 2022-11-05T04:20:41.204352 | 2022-10-23T08:01:04 | 2022-10-25T06:06:19 | 117,944,288 | 4 | 0 | null | 2018-02-02T21:45:40 | 2018-01-18T06:50:42 |
Python
|
UTF-8
|
Python
| false | false | 814 |
py
|
"""
Circular primes
The number, 197, is called a circular prime because all rotations of the digits:
197, 971, and 719, are themselves prime.
There are thirteen such primes below 100: 2, 3, 5, 7, 11, 13, 17, 31, 37, 71, 73, 79, and 97.
How many circular primes are there below one million?
https://projecteuler.net/problem=35
"""
from eulerlib import Crible
def rotation(n):
nombres = []
chiffres = 0
q = n
while q != 0:
q //= 10
chiffres += 1
decalage = 10 ** (chiffres - 1)
for _ in range(chiffres):
n, r = divmod(n, 10)
n += r * decalage
nombres.append(n)
return nombres
crible = Crible(1000000)
resultat = 0
for i in crible.liste():
if all([crible.est_premier(j) for j in rotation(i)]):
resultat += 1
print(resultat)
|
[
"[email protected]"
] | |
ed18e68ccd011c6613dbedac5a2f84f27a16f8ca
|
ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1
|
/res/packages/scripts/scripts/common/dossiers2/__init__.py
|
88ded47466e28f07d7c6af032904a1d6282b0a24
|
[] |
no_license
|
webiumsk/WOT-0.9.20.0
|
de3d7441c5d442f085c47a89fa58a83f1cd783f2
|
811cb4e1bca271372a1d837a268b6e0e915368bc
|
refs/heads/master
| 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null |
WINDOWS-1250
|
Python
| false | false | 610 |
py
|
# 2017.08.29 21:52:49 Střední Evropa (letní čas)
# Embedded file name: scripts/common/dossiers2/__init__.py
from dossiers2.common.utils import getDossierVersion
from dossiers2.custom import updaters
from dossiers2.custom.builders import *
def init():
from dossiers2.custom import init as custom_init
custom_init()
from dossiers2.ui import init as ui_init
ui_init()
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\common\dossiers2\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:52:49 Střední Evropa (letní čas)
|
[
"[email protected]"
] | |
7b0ab2d7afed8a101332881219940266b8fe20d0
|
20acd4e916ce4bccbfaba12158e348e49923c46b
|
/setup.py
|
ff24496d325c047d807ce593e0428cf9216e5367
|
[] |
no_license
|
noseapp/noseapp_alchemy
|
3df4999a9fcc476f42624609ed049a8528dbdfff
|
f6606990befd147852fd939c16a6f85de143d52f
|
refs/heads/master
| 2023-04-30T21:46:28.407386 | 2015-05-29T17:47:36 | 2015-05-29T17:47:36 | 30,965,543 | 1 | 1 | null | 2023-04-16T02:42:30 | 2015-02-18T12:56:29 |
Python
|
UTF-8
|
Python
| false | false | 655 |
py
|
# -*- coding: utf-8 -*-
from setuptools import setup
from setuptools import find_packages
__version__ = '1.0.0'
if __name__ == '__main__':
setup(
name='noseapp_alchemy',
url='https://github.com/trifonovmixail/noseapp_alchemy',
version=__version__,
packages=find_packages(),
author='Mikhail Trifonov',
author_email='[email protected]',
description='SqlAlchemy extension for noseapp lib',
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=[
'noseapp>=1.0.9',
'sqlalchemy==0.9.8',
],
)
|
[
"[email protected]"
] | |
17cc52ea7967e87801c45229c14d5036fa03301c
|
1fe0b680ce53bb3bb9078356ea2b25e572d9cfdc
|
/venv/lib/python2.7/site-packages/ansible/modules/clustering/k8s/_kubernetes.py
|
34ceb6c51365f739c7b36aa0bebdebb43478f2d7
|
[
"MIT"
] |
permissive
|
otus-devops-2019-02/devopscourses_infra
|
1929c4a9eace3fdb0eb118bf216f3385fc0cdb1c
|
e42e5deafce395af869084ede245fc6cff6d0b2c
|
refs/heads/master
| 2020-04-29T02:41:49.985889 | 2019-05-21T06:35:19 | 2019-05-21T06:35:19 | 175,780,457 | 0 | 1 |
MIT
| 2019-05-21T06:35:20 | 2019-03-15T08:35:54 |
HCL
|
UTF-8
|
Python
| false | false | 16,691 |
py
|
#!/usr/bin/python
# Copyright: (c) 2015, Google Inc. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: kubernetes
version_added: "2.1"
deprecated:
removed_in: "2.9"
why: This module used the oc command line tool, where as M(k8s_raw) goes over the REST API.
alternative: Use M(k8s_raw) instead.
short_description: Manage Kubernetes resources
description:
- This module can manage Kubernetes resources on an existing cluster using
the Kubernetes server API. Users can specify in-line API data, or
specify an existing Kubernetes YAML file.
- Currently, this module
(1) Only supports HTTP Basic Auth
(2) Only supports 'strategic merge' for update, http://goo.gl/fCPYxT
SSL certs are not working, use C(validate_certs=off) to disable.
options:
api_endpoint:
description:
- The IPv4 API endpoint of the Kubernetes cluster.
required: true
aliases: [ endpoint ]
inline_data:
description:
- The Kubernetes YAML data to send to the API I(endpoint). This option is
mutually exclusive with C('file_reference').
required: true
file_reference:
description:
- Specify full path to a Kubernets YAML file to send to API I(endpoint).
This option is mutually exclusive with C('inline_data').
patch_operation:
description:
- Specify patch operation for Kubernetes resource update.
- For details, see the description of PATCH operations at
U(https://github.com/kubernetes/kubernetes/blob/release-1.5/docs/devel/api-conventions.md#patch-operations).
default: Strategic Merge Patch
choices: [ JSON Patch, Merge Patch, Strategic Merge Patch ]
aliases: [ patch_strategy ]
version_added: 2.4
certificate_authority_data:
description:
- Certificate Authority data for Kubernetes server. Should be in either
standard PEM format or base64 encoded PEM data. Note that certificate
verification is broken until ansible supports a version of
'match_hostname' that can match the IP address against the CA data.
state:
description:
- The desired action to take on the Kubernetes data.
required: true
choices: [ absent, present, replace, update ]
default: present
url_password:
description:
- The HTTP Basic Auth password for the API I(endpoint). This should be set
unless using the C('insecure') option.
aliases: [ password ]
url_username:
description:
- The HTTP Basic Auth username for the API I(endpoint). This should be set
unless using the C('insecure') option.
default: admin
aliases: [ username ]
insecure:
description:
- Reverts the connection to using HTTP instead of HTTPS. This option should
only be used when execuing the M('kubernetes') module local to the Kubernetes
cluster using the insecure local port (locahost:8080 by default).
validate_certs:
description:
- Enable/disable certificate validation. Note that this is set to
C(false) until Ansible can support IP address based certificate
hostname matching (exists in >= python3.5.0).
type: bool
default: 'no'
author:
- Eric Johnson (@erjohnso) <[email protected]>
'''
EXAMPLES = '''
# Create a new namespace with in-line YAML.
- name: Create a kubernetes namespace
kubernetes:
api_endpoint: 123.45.67.89
url_username: admin
url_password: redacted
inline_data:
kind: Namespace
apiVersion: v1
metadata:
name: ansible-test
labels:
label_env: production
label_ver: latest
annotations:
a1: value1
a2: value2
state: present
# Create a new namespace from a YAML file.
- name: Create a kubernetes namespace
kubernetes:
api_endpoint: 123.45.67.89
url_username: admin
url_password: redacted
file_reference: /path/to/create_namespace.yaml
state: present
# Do the same thing, but using the insecure localhost port
- name: Create a kubernetes namespace
kubernetes:
api_endpoint: 123.45.67.89
insecure: true
file_reference: /path/to/create_namespace.yaml
state: present
'''
RETURN = '''
# Example response from creating a Kubernetes Namespace.
api_response:
description: Raw response from Kubernetes API, content varies with API.
returned: success
type: complex
contains:
apiVersion: "v1"
kind: "Namespace"
metadata:
creationTimestamp: "2016-01-04T21:16:32Z"
name: "test-namespace"
resourceVersion: "509635"
selfLink: "/api/v1/namespaces/test-namespace"
uid: "6dbd394e-b328-11e5-9a02-42010af0013a"
spec:
finalizers:
- kubernetes
status:
phase: "Active"
'''
import base64
import json
import traceback
YAML_IMP_ERR = None
try:
import yaml
HAS_LIB_YAML = True
except ImportError:
YAML_IMP_ERR = traceback.format_exc()
HAS_LIB_YAML = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.urls import fetch_url
############################################################################
############################################################################
# For API coverage, this Anislbe module provides capability to operate on
# all Kubernetes objects that support a "create" call (except for 'Events').
# In order to obtain a valid list of Kubernetes objects, the v1 spec file
# was referenced and the below python script was used to parse the JSON
# spec file, extract only the objects with a description starting with
# 'create a'. The script then iterates over all of these base objects
# to get the endpoint URL and was used to generate the KIND_URL map.
#
# import json
# from urllib2 import urlopen
#
# r = urlopen("https://raw.githubusercontent.com/kubernetes"
# "/kubernetes/master/api/swagger-spec/v1.json")
# v1 = json.load(r)
#
# apis = {}
# for a in v1['apis']:
# p = a['path']
# for o in a['operations']:
# if o["summary"].startswith("create a") and o["type"] != "v1.Event":
# apis[o["type"]] = p
#
# def print_kind_url_map():
# results = []
# for a in apis.keys():
# results.append('"%s": "%s"' % (a[3:].lower(), apis[a]))
# results.sort()
# print("KIND_URL = {")
# print(",\n".join(results))
# print("}")
#
# if __name__ == '__main__':
# print_kind_url_map()
############################################################################
############################################################################
KIND_URL = {
"binding": "/api/v1/namespaces/{namespace}/bindings",
"configmap": "/api/v1/namespaces/{namespace}/configmaps",
"endpoints": "/api/v1/namespaces/{namespace}/endpoints",
"limitrange": "/api/v1/namespaces/{namespace}/limitranges",
"namespace": "/api/v1/namespaces",
"node": "/api/v1/nodes",
"persistentvolume": "/api/v1/persistentvolumes",
"persistentvolumeclaim": "/api/v1/namespaces/{namespace}/persistentvolumeclaims", # NOQA
"pod": "/api/v1/namespaces/{namespace}/pods",
"podtemplate": "/api/v1/namespaces/{namespace}/podtemplates",
"replicationcontroller": "/api/v1/namespaces/{namespace}/replicationcontrollers", # NOQA
"resourcequota": "/api/v1/namespaces/{namespace}/resourcequotas",
"secret": "/api/v1/namespaces/{namespace}/secrets",
"service": "/api/v1/namespaces/{namespace}/services",
"serviceaccount": "/api/v1/namespaces/{namespace}/serviceaccounts",
"daemonset": "/apis/extensions/v1beta1/namespaces/{namespace}/daemonsets",
"deployment": "/apis/extensions/v1beta1/namespaces/{namespace}/deployments",
"horizontalpodautoscaler": "/apis/extensions/v1beta1/namespaces/{namespace}/horizontalpodautoscalers", # NOQA
"ingress": "/apis/extensions/v1beta1/namespaces/{namespace}/ingresses",
"job": "/apis/extensions/v1beta1/namespaces/{namespace}/jobs",
}
USER_AGENT = "ansible-k8s-module/0.0.1"
# TODO(erjohnso): SSL Certificate validation is currently unsupported.
# It can be made to work when the following are true:
# - Ansible consistently uses a "match_hostname" that supports IP Address
# matching. This is now true in >= python3.5.0. Currently, this feature
# is not yet available in backports.ssl_match_hostname (still 3.4).
# - Ansible allows passing in the self-signed CA cert that is created with
# a kubernetes master. The lib/ansible/module_utils/urls.py method,
# SSLValidationHandler.get_ca_certs() needs a way for the Kubernetes
# CA cert to be passed in and included in the generated bundle file.
# When this is fixed, the following changes can be made to this module,
# - Remove the 'return' statement in line 254 below
# - Set 'required=true' for certificate_authority_data and ensure that
# ansible's SSLValidationHandler.get_ca_certs() can pick up this CA cert
# - Set 'required=true' for the validate_certs param.
def decode_cert_data(module):
return
# pylint: disable=unreachable
d = module.params.get("certificate_authority_data")
if d and not d.startswith("-----BEGIN"):
module.params["certificate_authority_data"] = base64.b64decode(d)
def api_request(module, url, method="GET", headers=None, data=None):
body = None
if data:
data = json.dumps(data)
response, info = fetch_url(module, url, method=method, headers=headers, data=data)
if int(info['status']) == -1:
module.fail_json(msg="Failed to execute the API request: %s" % info['msg'], url=url, method=method, headers=headers)
if response is not None:
body = json.loads(response.read())
return info, body
def k8s_create_resource(module, url, data):
info, body = api_request(module, url, method="POST", data=data, headers={"Content-Type": "application/json"})
if info['status'] == 409:
name = data["metadata"].get("name", None)
info, body = api_request(module, url + "/" + name)
return False, body
elif info['status'] >= 400:
module.fail_json(msg="failed to create the resource: %s" % info['msg'], url=url)
return True, body
def k8s_delete_resource(module, url, data):
name = data.get('metadata', {}).get('name')
if name is None:
module.fail_json(msg="Missing a named resource in object metadata when trying to remove a resource")
url = url + '/' + name
info, body = api_request(module, url, method="DELETE")
if info['status'] == 404:
return False, "Resource name '%s' already absent" % name
elif info['status'] >= 400:
module.fail_json(msg="failed to delete the resource '%s': %s" % (name, info['msg']), url=url)
return True, "Successfully deleted resource name '%s'" % name
def k8s_replace_resource(module, url, data):
name = data.get('metadata', {}).get('name')
if name is None:
module.fail_json(msg="Missing a named resource in object metadata when trying to replace a resource")
headers = {"Content-Type": "application/json"}
url = url + '/' + name
info, body = api_request(module, url, method="PUT", data=data, headers=headers)
if info['status'] == 409:
name = data["metadata"].get("name", None)
info, body = api_request(module, url + "/" + name)
return False, body
elif info['status'] >= 400:
module.fail_json(msg="failed to replace the resource '%s': %s" % (name, info['msg']), url=url)
return True, body
def k8s_update_resource(module, url, data, patch_operation):
# PATCH operations are explained in details at:
# https://github.com/kubernetes/kubernetes/blob/release-1.5/docs/devel/api-conventions.md#patch-operations
PATCH_OPERATIONS_MAP = {
'JSON Patch': 'application/json-patch+json',
'Merge Patch': 'application/merge-patch+json',
'Strategic Merge Patch': 'application/strategic-merge-patch+json',
}
name = data.get('metadata', {}).get('name')
if name is None:
module.fail_json(msg="Missing a named resource in object metadata when trying to update a resource")
headers = {"Content-Type": PATCH_OPERATIONS_MAP[patch_operation]}
url = url + '/' + name
info, body = api_request(module, url, method="PATCH", data=data, headers=headers)
if info['status'] == 409:
name = data["metadata"].get("name", None)
info, body = api_request(module, url + "/" + name)
return False, body
elif info['status'] >= 400:
module.fail_json(msg="failed to update the resource '%s': %s" % (name, info['msg']), url=url)
return True, body
def main():
module = AnsibleModule(
argument_spec=dict(
http_agent=dict(type='str', default=USER_AGENT),
url_username=dict(type='str', default='admin', aliases=['username']),
url_password=dict(type='str', default='', no_log=True, aliases=['password']),
force_basic_auth=dict(type='bool', default=True),
validate_certs=dict(type='bool', default=False),
certificate_authority_data=dict(type='str'),
insecure=dict(type='bool', default=False),
api_endpoint=dict(type='str', required=True),
patch_operation=dict(type='str', default='Strategic Merge Patch', aliases=['patch_strategy'],
choices=['JSON Patch', 'Merge Patch', 'Strategic Merge Patch']),
file_reference=dict(type='str'),
inline_data=dict(type='str'),
state=dict(type='str', default='present', choices=['absent', 'present', 'replace', 'update'])
),
mutually_exclusive=(('file_reference', 'inline_data'),
('url_username', 'insecure'),
('url_password', 'insecure')),
required_one_of=(('file_reference', 'inline_data'),),
)
if not HAS_LIB_YAML:
module.fail_json(msg=missing_required_lib('PyYAML'), exception=YAML_IMP_ERR)
decode_cert_data(module)
api_endpoint = module.params.get('api_endpoint')
state = module.params.get('state')
insecure = module.params.get('insecure')
inline_data = module.params.get('inline_data')
file_reference = module.params.get('file_reference')
patch_operation = module.params.get('patch_operation')
if inline_data:
if not isinstance(inline_data, dict) and not isinstance(inline_data, list):
data = yaml.safe_load(inline_data)
else:
data = inline_data
else:
try:
f = open(file_reference, "r")
data = [x for x in yaml.safe_load_all(f)]
f.close()
if not data:
module.fail_json(msg="No valid data could be found.")
except Exception:
module.fail_json(msg="The file '%s' was not found or contained invalid YAML/JSON data" % file_reference)
# set the transport type and build the target endpoint url
transport = 'https'
if insecure:
transport = 'http'
target_endpoint = "%s://%s" % (transport, api_endpoint)
body = []
changed = False
# make sure the data is a list
if not isinstance(data, list):
data = [data]
for item in data:
namespace = "default"
if item and 'metadata' in item:
namespace = item.get('metadata', {}).get('namespace', "default")
kind = item.get('kind', '').lower()
try:
url = target_endpoint + KIND_URL[kind]
except KeyError:
module.fail_json(msg="invalid resource kind specified in the data: '%s'" % kind)
url = url.replace("{namespace}", namespace)
else:
url = target_endpoint
if state == 'present':
item_changed, item_body = k8s_create_resource(module, url, item)
elif state == 'absent':
item_changed, item_body = k8s_delete_resource(module, url, item)
elif state == 'replace':
item_changed, item_body = k8s_replace_resource(module, url, item)
elif state == 'update':
item_changed, item_body = k8s_update_resource(module, url, item, patch_operation)
changed |= item_changed
body.append(item_body)
module.exit_json(changed=changed, api_response=body)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
f65b0b1dc6d7043f86f1a147c66acad09a14d0b1
|
f7574ee7a679261e758ba461cb5a5a364fdb0ed1
|
/PopulatingNextRightPointersinEachNodeII.py
|
1ff5842f547af6b76b2f6a0ce91307d35175faa3
|
[] |
no_license
|
janewjy/Leetcode
|
807050548c0f45704f2f0f821a7fef40ffbda0ed
|
b4dccd3d1c59aa1e92f10ed5c4f7a3e1d08897d8
|
refs/heads/master
| 2021-01-10T19:20:22.858158 | 2016-02-26T16:03:19 | 2016-02-26T16:03:19 | 40,615,255 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,657 |
py
|
# Definition for binary tree with next pointer.
# class TreeLinkNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# self.next = None
class Solution(object):
def connect(self, root):
"""
:type root: TreeLinkNode
:rtype: nothing
"""
if not root:
return
queue = []
front = [root]
while front:
for i in xrange(len(front)):
if front[i].left:
queue.append(front[i].left)
if front[i].right:
queue.append(front[i].right)
if i < len(front) -1:
front[i].next = front[i+1]
front = queue
queue = []
# Definition for binary tree with next pointer.
# class TreeLinkNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# self.next = None
class Solution(object):
def connect(self, root):
"""
:type root: TreeLinkNode
:rtype: nothing
"""
if not root:
return
front = [root]
while front:
level = []
for i in xrange(len(front)):
if front[i].left:
level.append(front[i].left)
if front[i].right:
level.append(front[i].right)
if i != len(front)-1:
front[i].next = front[i+1]
front = level
|
[
"[email protected]"
] | |
3cbf941917133ce9558f26f7a1ef68338357df49
|
fa24173e152aa2132895fda77022d998a7d61f38
|
/wick/models/segmentation/testnets/mnas_linknets/linknet.py
|
1e2c7d81c88c20a48d832be93380b0c3b7d9369c
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
shijie2016/wick
|
1c791ec6b22016787c84583fe30823150b279888
|
a83d9ca753e8732efb780e84a54d667a8b5f3f37
|
refs/heads/master
| 2020-04-28T09:19:22.693956 | 2019-03-08T16:15:26 | 2019-03-08T16:15:26 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 43,544 |
py
|
# Source: https://github.com/snakers4/mnasnet-pytorch/blob/master/src/models/linknet.py
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
from .resnext import resnext101_32x4d
from .inception_resnet import inceptionresnetv2
from .inception4 import inceptionv4
from .decoder import DecoderBlockLinkNetV2 as DecoderBlock
from .decoder import DecoderBlockLinkNetInceptionV2 as DecoderBlockInception
nonlinearity = nn.ReLU
class BasicConv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes,
kernel_size=kernel_size, stride=stride,
padding=padding, bias=False) # verify bias false
self.bn = nn.BatchNorm2d(out_planes,
eps=0.001, # value found in tensorflow
momentum=0.1, # default pytorch value
affine=True)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class LinkNet18(nn.Module):
def __init__(self,
num_classes,
num_channels=3,
is_deconv=False,
decoder_kernel_size=4,
pretrained=True
):
super().__init__()
filters = [64, 128, 256, 512]
resnet = models.resnet18(pretrained=pretrained)
self.mean = (0.485, 0.456, 0.406)
self.std = (0.229, 0.224, 0.225)
if num_channels == 3:
self.firstconv = resnet.conv1
else:
self.firstconv = nn.Conv2d(num_channels, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3))
self.firstbn = resnet.bn1
self.firstrelu = resnet.relu
self.firstmaxpool = resnet.maxpool
self.encoder1 = resnet.layer1
self.encoder2 = resnet.layer2
self.encoder3 = resnet.layer3
self.encoder4 = resnet.layer4
# Decoder
self.decoder4 = DecoderBlock(in_channels=filters[3],
n_filters=filters[2],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder3 = DecoderBlock(in_channels=filters[2],
n_filters=filters[1],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder2 = DecoderBlock(in_channels=filters[1],
n_filters=filters[0],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder1 = DecoderBlock(in_channels=filters[0],
n_filters=filters[0],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
# Final Classifier
self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 3, stride=2)
self.finalrelu1 = nonlinearity(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nonlinearity(inplace=True)
self.finalconv3 = nn.Conv2d(32, num_classes, 2, padding=1)
def freeze(self):
self.require_encoder_grad(False)
def unfreeze(self):
self.require_encoder_grad(True)
def require_encoder_grad(self, requires_grad):
blocks = [self.firstconv,
self.encoder1,
self.encoder2,
self.encoder3,
self.encoder4]
for block in blocks:
for p in block.parameters():
p.requires_grad = requires_grad
# noinspection PyCallingNonCallable
def forward(self, x):
# Encoder
x = self.firstconv(x)
x = self.firstbn(x)
x = self.firstrelu(x)
x = self.firstmaxpool(x)
e1 = self.encoder1(x)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
# Decoder with Skip Connections
d4 = self.decoder4(e4) + e3
d3 = self.decoder3(d4) + e2
d2 = self.decoder2(d3) + e1
d1 = self.decoder1(d2)
# Final Classification
f1 = self.finaldeconv1(d1)
f2 = self.finalrelu1(f1)
f3 = self.finalconv2(f2)
f4 = self.finalrelu2(f3)
f5 = self.finalconv3(f4)
return f5
class LinkNet34(nn.Module):
def __init__(self,
num_classes,
num_channels=3,
is_deconv=False,
decoder_kernel_size=4,
pretrained=True
):
super().__init__()
filters = [64, 128, 256, 512]
resnet = models.resnet34(pretrained=pretrained)
self.mean = (0.485, 0.456, 0.406)
self.std = (0.229, 0.224, 0.225)
if num_channels == 3:
self.firstconv = resnet.conv1
else:
self.firstconv = nn.Conv2d(num_channels, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3))
self.firstbn = resnet.bn1
self.firstrelu = resnet.relu
self.firstmaxpool = resnet.maxpool
self.encoder1 = resnet.layer1
self.encoder2 = resnet.layer2
self.encoder3 = resnet.layer3
self.encoder4 = resnet.layer4
# Decoder
self.decoder4 = DecoderBlock(in_channels=filters[3],
n_filters=filters[2],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder3 = DecoderBlock(in_channels=filters[2],
n_filters=filters[1],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder2 = DecoderBlock(in_channels=filters[1],
n_filters=filters[0],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder1 = DecoderBlock(in_channels=filters[0],
n_filters=filters[0],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
# Final Classifier
self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 3, stride=2)
self.finalrelu1 = nonlinearity(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nonlinearity(inplace=True)
self.finalconv3 = nn.Conv2d(32, num_classes, 2, padding=1)
def freeze(self):
self.require_encoder_grad(False)
def unfreeze(self):
self.require_encoder_grad(True)
def require_encoder_grad(self, requires_grad):
blocks = [self.firstconv,
self.encoder1,
self.encoder2,
self.encoder3,
self.encoder4]
for block in blocks:
for p in block.parameters():
p.requires_grad = requires_grad
# noinspection PyCallingNonCallable
def forward(self, x):
# Encoder
x = self.firstconv(x)
x = self.firstbn(x)
x = self.firstrelu(x)
x = self.firstmaxpool(x)
e1 = self.encoder1(x)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
# Decoder with Skip Connections
d4 = self.decoder4(e4) + e3
d3 = self.decoder3(d4) + e2
d2 = self.decoder2(d3) + e1
d1 = self.decoder1(d2)
# Final Classification
f1 = self.finaldeconv1(d1)
f2 = self.finalrelu1(f1)
f3 = self.finalconv2(f2)
f4 = self.finalrelu2(f3)
f5 = self.finalconv3(f4)
return f5
class LinkNet50(nn.Module):
def __init__(self,
num_classes,
num_channels=3,
is_deconv=False,
decoder_kernel_size=4,
pretrained=True
):
super().__init__()
filters = [256, 512, 1024, 2048]
resnet = models.resnet50(pretrained=pretrained)
self.mean = (0.485, 0.456, 0.406)
self.std = (0.229, 0.224, 0.225)
# self.firstconv = resnet.conv1
# assert num_channels == 3, "num channels not used now. to use changle first conv layer to support num channels other then 3"
# try to use 8-channels as first input
if num_channels == 3:
self.firstconv = resnet.conv1
else:
self.firstconv = nn.Conv2d(num_channels, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3))
self.firstbn = resnet.bn1
self.firstrelu = resnet.relu
self.firstmaxpool = resnet.maxpool
self.encoder1 = resnet.layer1
self.encoder2 = resnet.layer2
self.encoder3 = resnet.layer3
self.encoder4 = resnet.layer4
# Decoder
self.decoder4 = DecoderBlock(in_channels=filters[3],
n_filters=filters[2],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder3 = DecoderBlock(in_channels=filters[2],
n_filters=filters[1],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder2 = DecoderBlock(in_channels=filters[1],
n_filters=filters[0],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder1 = DecoderBlock(in_channels=filters[0],
n_filters=filters[0],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
# Final Classifier
self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 3, stride=2)
self.finalrelu1 = nonlinearity(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nonlinearity(inplace=True)
self.finalconv3 = nn.Conv2d(32, num_classes, 2, padding=1)
def freeze(self):
self.require_encoder_grad(False)
def unfreeze(self):
self.require_encoder_grad(True)
def require_encoder_grad(self, requires_grad):
blocks = [self.firstconv,
self.encoder1,
self.encoder2,
self.encoder3,
self.encoder4]
for block in blocks:
for p in block.parameters():
p.requires_grad = requires_grad
# noinspection PyCallingNonCallable
def forward(self, x):
# Encoder
x = self.firstconv(x)
x = self.firstbn(x)
x = self.firstrelu(x)
x = self.firstmaxpool(x)
e1 = self.encoder1(x)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
# Decoder with Skip Connections
d4 = self.decoder4(e4) + e3
d3 = self.decoder3(d4) + e2
d2 = self.decoder2(d3) + e1
d1 = self.decoder1(d2)
# Final Classification
f1 = self.finaldeconv1(d1)
f2 = self.finalrelu1(f1)
f3 = self.finalconv2(f2)
f4 = self.finalrelu2(f3)
f5 = self.finalconv3(f4)
return f5
class LinkNet101(nn.Module):
def __init__(self,
num_classes,
num_channels=3,
is_deconv=False,
decoder_kernel_size=4,
pretrained=True
):
super().__init__()
filters = [256, 512, 1024, 2048]
resnet = models.resnet101(pretrained=pretrained)
self.mean = (0.485, 0.456, 0.406)
self.std = (0.229, 0.224, 0.225)
# self.firstconv = resnet.conv1
# assert num_channels == 3, "num channels not used now. to use changle first conv layer to support num channels other then 3"
# try to use 8-channels as first input
if num_channels == 3:
self.firstconv = resnet.conv1
else:
self.firstconv = nn.Conv2d(num_channels, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3))
self.firstbn = resnet.bn1
self.firstrelu = resnet.relu
self.firstmaxpool = resnet.maxpool
self.encoder1 = resnet.layer1
self.encoder2 = resnet.layer2
self.encoder3 = resnet.layer3
self.encoder4 = resnet.layer4
# Decoder
self.decoder4 = DecoderBlock(in_channels=filters[3],
n_filters=filters[2],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder3 = DecoderBlock(in_channels=filters[2],
n_filters=filters[1],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder2 = DecoderBlock(in_channels=filters[1],
n_filters=filters[0],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder1 = DecoderBlock(in_channels=filters[0],
n_filters=filters[0],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
# Final Classifier
self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 3, stride=2)
self.finalrelu1 = nonlinearity(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nonlinearity(inplace=True)
self.finalconv3 = nn.Conv2d(32, num_classes, 2, padding=1)
def freeze(self):
self.require_encoder_grad(False)
def unfreeze(self):
self.require_encoder_grad(True)
def require_encoder_grad(self, requires_grad):
blocks = [self.firstconv,
self.encoder1,
self.encoder2,
self.encoder3,
self.encoder4]
for block in blocks:
for p in block.parameters():
p.requires_grad = requires_grad
# noinspection PyCallingNonCallable
def forward(self, x):
# Encoder
x = self.firstconv(x)
x = self.firstbn(x)
x = self.firstrelu(x)
x = self.firstmaxpool(x)
e1 = self.encoder1(x)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
# Decoder with Skip Connections
d4 = self.decoder4(e4) + e3
d3 = self.decoder3(d4) + e2
d2 = self.decoder2(d3) + e1
d1 = self.decoder1(d2)
# Final Classification
f1 = self.finaldeconv1(d1)
f2 = self.finalrelu1(f1)
f3 = self.finalconv2(f2)
f4 = self.finalrelu2(f3)
f5 = self.finalconv3(f4)
return f5
class LinkNeXt(nn.Module):
def __init__(self,
num_classes,
num_channels=3,
is_deconv=False,
decoder_kernel_size=4,
pretrained=True
):
super().__init__()
filters = [256, 512, 1024, 2048]
# only pretrained
resnet = resnext101_32x4d(num_classes=1000, pretrained='imagenet')
self.mean = (0.485, 0.456, 0.406)
self.std = (0.229, 0.224, 0.225)
self.stem = resnet.stem
self.encoder1 = resnet.layer1
self.encoder2 = resnet.layer2
self.encoder3 = resnet.layer3
self.encoder4 = resnet.layer4
# Decoder
self.decoder4 = DecoderBlock(in_channels=filters[3],
n_filters=filters[2],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder3 = DecoderBlock(in_channels=filters[2],
n_filters=filters[1],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder2 = DecoderBlock(in_channels=filters[1],
n_filters=filters[0],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder1 = DecoderBlock(in_channels=filters[0],
n_filters=filters[0],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
# Final Classifier
self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 3, stride=2)
self.finalrelu1 = nonlinearity(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nonlinearity(inplace=True)
self.finalconv3 = nn.Conv2d(32, num_classes, 2, padding=1)
def freeze(self):
self.require_encoder_grad(False)
def unfreeze(self):
self.require_encoder_grad(True)
def require_encoder_grad(self, requires_grad):
blocks = [self.stem,
self.encoder1,
self.encoder2,
self.encoder3,
self.encoder4]
for block in blocks:
for p in block.parameters():
p.requires_grad = requires_grad
# noinspection PyCallingNonCallable
def forward(self, x):
# Encoder
x = self.stem(x)
e1 = self.encoder1(x)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
# Decoder with Skip Connections
d4 = self.decoder4(e4) + e3
# d4 = e3
d3 = self.decoder3(d4) + e2
d2 = self.decoder2(d3) + e1
d1 = self.decoder1(d2)
# Final Classification
f1 = self.finaldeconv1(d1)
f2 = self.finalrelu1(f1)
f3 = self.finalconv2(f2)
f4 = self.finalrelu2(f3)
f5 = self.finalconv3(f4)
# return F.sigmoid(f5)
return f5
class LinkNet152(nn.Module):
def __init__(self,
num_classes,
num_channels=3,
is_deconv=False,
decoder_kernel_size=3,
pretrained=True
):
super().__init__()
filters = [256, 512, 1024, 2048]
resnet = models.resnet152(pretrained=pretrained)
self.mean = (0.485, 0.456, 0.406)
self.std = (0.229, 0.224, 0.225)
# self.firstconv = resnet.conv1
# assert num_channels == 3, "num channels not used now. to use changle first conv layer to support num channels other then 3"
# try to use 8-channels as first input
if num_channels == 3:
self.firstconv = resnet.conv1
else:
self.firstconv = nn.Conv2d(num_channels, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3))
self.firstbn = resnet.bn1
self.firstrelu = resnet.relu
self.firstmaxpool = resnet.maxpool
self.encoder1 = resnet.layer1
self.encoder2 = resnet.layer2
self.encoder3 = resnet.layer3
self.encoder4 = resnet.layer4
# Decoder
self.decoder4 = DecoderBlock(in_channels=filters[3],
n_filters=filters[2],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder3 = DecoderBlock(in_channels=filters[2],
n_filters=filters[1],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder2 = DecoderBlock(in_channels=filters[1],
n_filters=filters[0],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder1 = DecoderBlock(in_channels=filters[0],
n_filters=filters[0],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
# Final Classifier
self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 3, stride=2)
self.finalrelu1 = nonlinearity(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nonlinearity(inplace=True)
self.finalconv3 = nn.Conv2d(32, num_classes, 2, padding=1)
def freeze(self):
self.require_encoder_grad(False)
def unfreeze(self):
self.require_encoder_grad(True)
def require_encoder_grad(self, requires_grad):
blocks = [self.firstconv,
self.encoder1,
self.encoder2,
self.encoder3,
self.encoder4]
for block in blocks:
for p in block.parameters():
p.requires_grad = requires_grad
# noinspection PyCallingNonCallable
def forward(self, x):
# Encoder
x = self.firstconv(x)
x = self.firstbn(x)
x = self.firstrelu(x)
x = self.firstmaxpool(x)
e1 = self.encoder1(x)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
# Decoder with Skip Connections
d4 = self.decoder4(e4) + e3
d3 = self.decoder3(d4) + e2
d2 = self.decoder2(d3) + e1
d1 = self.decoder1(d2)
# Final Classification
f1 = self.finaldeconv1(d1)
f2 = self.finalrelu1(f1)
f3 = self.finalconv2(f2)
f4 = self.finalrelu2(f3)
f5 = self.finalconv3(f4)
return f5
class LinkCeption(nn.Module):
def __init__(self,
num_classes,
num_channels=3,
is_deconv=False,
decoder_kernel_size=4,
pretrained=True
):
super().__init__()
self.mean = (0.5, 0.5, 0.5)
self.std = (0.5, 0.5, 0.5)
filters = [64, 384, 384, 1024, 1536]
# only pre-trained
inception = inceptionv4(num_classes=1000, pretrained='imagenet')
if num_channels == 3:
self.stem1 = nn.Sequential(
inception.features[0],
inception.features[1],
inception.features[2],
)
else:
self.stem1 = nn.Sequential(
BasicConv2d(num_channels, 32, kernel_size=3, stride=2),
inception.features[1],
inception.features[2],
)
self.stem2 = nn.Sequential(
inception.features[3],
inception.features[4],
inception.features[5],
)
self.block1 = nn.Sequential(
inception.features[6],
inception.features[7],
inception.features[8],
inception.features[9],
)
self.tr1 = inception.features[10]
self.block2 = nn.Sequential(
inception.features[11],
inception.features[12],
inception.features[13],
inception.features[14],
inception.features[15],
inception.features[16],
inception.features[17],
)
self.tr2 = inception.features[18]
self.block3 = nn.Sequential(
inception.features[19],
inception.features[20],
inception.features[21]
)
# Decoder
self.decoder4 = DecoderBlockInception(in_channels=filters[4],
out_channels=filters[3],
n_filters=filters[3],
last_padding=0,
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder3 = DecoderBlockInception(in_channels=filters[3],
out_channels=filters[2],
n_filters=filters[2],
last_padding=0,
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder2 = DecoderBlockInception(in_channels=filters[2],
out_channels=filters[1],
n_filters=filters[1],
last_padding=0,
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder1 = DecoderBlockInception(in_channels=filters[1],
out_channels=filters[0],
n_filters=filters[0],
last_padding=0,
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
# Final Classifier
self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 1, stride=2)
self.finalnorm1 = nn.BatchNorm2d(32)
self.finalrelu1 = nonlinearity(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalnorm2 = nn.BatchNorm2d(32)
self.finalrelu2 = nonlinearity(inplace=True)
self.finalconv3 = nn.Conv2d(32, num_classes, 2, padding=0)
def freeze(self):
self.require_encoder_grad(False)
def unfreeze(self):
self.require_encoder_grad(True)
def require_encoder_grad(self, requires_grad):
blocks = [self.stem1,
self.stem2,
self.block1,
self.tr1,
self.block2,
self.tr2,
self.block3]
for block in blocks:
for p in block.parameters():
p.requires_grad = requires_grad
# noinspection PyCallingNonCallable
def forward(self, x):
final_shape = x.shape[2:]
# Encoder
x = self.stem1(x)
e1 = self.stem2(x)
e2 = self.block1(e1)
e3 = self.tr1(e2)
e3 = self.block2(e3)
e4 = self.tr2(e3)
e4 = self.block3(e4)
# Decoder with Skip Connections
d4 = self.decoder4(e4)[:, :, 0:e3.size(2), 0:e3.size(3)] + e3
d3 = self.decoder3(d4)[:, :, 0:e2.size(2), 0:e2.size(3)] + e2
d2 = self.decoder2(d3)[:, :, 0:self.decoder2(e1).size(2), 0:self.decoder2(e1).size(3)] + self.decoder2(e1)
d1 = self.decoder1(d2)
# Final Classification
f1 = self.finaldeconv1(d1)
f1 = self.finalnorm1(f1)
f2 = self.finalrelu1(f1)
f2 = self.finalnorm2(f2)
f3 = self.finalconv2(f2)
f4 = self.finalrelu2(f3)
f5 = self.finalconv3(f4)
out = F.interpolate(f5, size=final_shape, mode="bilinear")
return out
class LinkInceptionResNet(nn.Module):
def __init__(self,
num_classes,
num_channels=3,
is_deconv=False,
decoder_kernel_size=3,
pretrained=True
):
super().__init__()
self.mean = (0.5, 0.5, 0.5)
self.std = (0.5, 0.5, 0.5)
filters = [64, 192, 320, 1088, 2080]
# only pre-trained
ir = inceptionresnetv2(pretrained='imagenet', num_classes=1000)
if num_channels == 3:
self.stem1 = nn.Sequential(
ir.conv2d_1a,
ir.conv2d_2a,
ir.conv2d_2b,
)
else:
self.stem1 = nn.Sequential(
BasicConv2d(num_channels, 32, kernel_size=3, stride=2),
ir.conv2d_2a,
ir.conv2d_2b,
)
self.maxpool_3a = ir.maxpool_3a
self.stem2 = nn.Sequential(
ir.conv2d_3b,
ir.conv2d_4a,
)
self.maxpool_5a = ir.maxpool_5a
self.mixed_5b = ir.mixed_5b
self.mixed_6a = ir.mixed_6a
self.mixed_7a = ir.mixed_7a
self.skip1 = ir.repeat
self.skip2 = ir.repeat_1
self.skip3 = ir.repeat_2
# Decoder
self.decoder3 = DecoderBlockInception(in_channels=filters[4],
out_channels=filters[3],
n_filters=filters[3],
last_padding=0,
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder2 = DecoderBlockInception(in_channels=filters[3],
out_channels=filters[2],
n_filters=filters[2],
last_padding=0,
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder1 = DecoderBlockInception(in_channels=filters[2],
out_channels=filters[1],
n_filters=filters[1],
last_padding=0,
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder0 = DecoderBlockInception(in_channels=filters[1],
out_channels=filters[0],
n_filters=filters[0],
last_padding=2,
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
# Final Classifier
self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 3, stride=2)
self.finalnorm1 = nn.BatchNorm2d(32)
self.finalrelu1 = nonlinearity(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalnorm2 = nn.BatchNorm2d(32)
self.finalrelu2 = nonlinearity(inplace=True)
self.finalconv3 = nn.Conv2d(32, num_classes, 2, padding=1)
def freeze(self):
self.require_encoder_grad(False)
def unfreeze(self):
self.require_encoder_grad(True)
def require_encoder_grad(self, requires_grad):
blocks = [self.stem1,
self.stem2,
self.mixed_5b,
self.mixed_6a,
self.mixed_7a,
self.skip1,
self.skip2,
self.skip3]
for block in blocks:
for p in block.parameters():
p.requires_grad = requires_grad
# noinspection PyCallingNonCallable
def forward(self, x):
# Encoder
x = self.stem1(x)
x1 = self.maxpool_3a(x)
x1 = self.stem2(x1)
x2 = self.maxpool_3a(x1)
x2 = self.mixed_5b(x2)
e1 = self.skip1(x2)
e1_resume = self.mixed_6a(e1)
e2 = self.skip2(e1_resume)
e2_resume = self.mixed_7a(e2)
e3 = self.skip3(e2_resume)
# Decoder with Skip Connections
d3 = self.decoder3(e3)[:, :, 0:e2.size(2), 0:e2.size(3)] + e2
d2 = self.decoder2(d3)[:, :, 0:e1.size(2), 0:e1.size(3)] + e1
d1 = self.decoder1(d2)[:, :, 0:x1.size(2), 0:x1.size(3)] + x1
d0 = self.decoder0(d1)
# Final Classification
f1 = self.finaldeconv1(d0)
f2 = self.finalrelu1(f1)
f3 = self.finalconv2(f2)
f4 = self.finalrelu2(f3)
f5 = self.finalconv3(f4)
return f5
class LinkDenseNet161(nn.Module):
def __init__(self,
num_classes,
num_channels=3,
is_deconv=False,
decoder_kernel_size=4,
pretrained=True
):
super().__init__()
filters = [384, 768, 2112, 2208]
densenet = models.densenet161(pretrained=pretrained)
self.mean = (0.485, 0.456, 0.406)
self.std = (0.229, 0.224, 0.225)
if num_channels == 3:
self.firstconv = densenet.features.conv0
else:
self.firstconv = nn.Conv2d(num_channels, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3))
self.stem = nn.Sequential(
self.firstconv,
densenet.features.norm0,
densenet.features.relu0,
densenet.features.pool0,
)
self.encoder1 = nn.Sequential(densenet.features.denseblock1)
self.encoder2 = nn.Sequential(densenet.features.transition1,
densenet.features.denseblock2)
self.encoder3 = nn.Sequential(densenet.features.transition2,
densenet.features.denseblock3)
self.encoder4 = nn.Sequential(densenet.features.transition3,
densenet.features.denseblock4)
# Decoder
self.decoder4 = DecoderBlock(in_channels=filters[3],
n_filters=filters[2],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder3 = DecoderBlock(in_channels=filters[2],
n_filters=filters[1],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder2 = DecoderBlock(in_channels=filters[1],
n_filters=filters[0],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder1 = DecoderBlock(in_channels=filters[0],
n_filters=filters[0],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
# Final Classifier
self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 3, stride=2)
self.finalrelu1 = nonlinearity(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nonlinearity(inplace=True)
self.finalconv3 = nn.Conv2d(32, num_classes, 2, padding=1)
def require_encoder_grad(self, requires_grad):
blocks = [self.stem,
self.encoder1,
self.encoder2,
self.encoder3,
self.encoder4]
for block in blocks:
for p in block.parameters():
p.requires_grad = requires_grad
def freeze(self):
self.require_encoder_grad(False)
def unfreeze(self):
self.require_encoder_grad(True)
# noinspection PyCallingNonCallable
def forward(self, x):
# Encoder
x = self.stem(x)
e1 = self.encoder1(x)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
# Decoder with Skip Connections
d4 = self.decoder4(e4) + e3
d3 = self.decoder3(d4) + e2
d2 = self.decoder2(d3) + e1
d1 = self.decoder1(d2)
# Final Classification
f1 = self.finaldeconv1(d1)
f2 = self.finalrelu1(f1)
f3 = self.finalconv2(f2)
f4 = self.finalrelu2(f3)
f5 = self.finalconv3(f4)
return f5
class LinkDenseNet121(nn.Module):
def __init__(self,
num_classes,
num_channels=3,
is_deconv=False,
decoder_kernel_size=4,
pretrained=True
):
super().__init__()
filters = [256, 512, 1024, 1024]
densenet = models.densenet121(pretrained=pretrained)
self.mean = (0.485, 0.456, 0.406)
self.std = (0.229, 0.224, 0.225)
if num_channels == 3:
self.firstconv = densenet.features.conv0
else:
self.firstconv = nn.Conv2d(num_channels, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3))
self.stem = nn.Sequential(
self.firstconv,
densenet.features.norm0,
densenet.features.relu0,
densenet.features.pool0,
)
self.encoder1 = nn.Sequential(densenet.features.denseblock1)
self.encoder2 = nn.Sequential(densenet.features.transition1,
densenet.features.denseblock2)
self.encoder3 = nn.Sequential(densenet.features.transition2,
densenet.features.denseblock3)
self.encoder4 = nn.Sequential(densenet.features.transition3,
densenet.features.denseblock4)
# Decoder
self.decoder4 = DecoderBlock(in_channels=filters[3],
n_filters=filters[2],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder3 = DecoderBlock(in_channels=filters[2],
n_filters=filters[1],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder2 = DecoderBlock(in_channels=filters[1],
n_filters=filters[0],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder1 = DecoderBlock(in_channels=filters[0],
n_filters=filters[0],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
# Final Classifier
self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 3, stride=2)
self.finalrelu1 = nonlinearity(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nonlinearity(inplace=True)
self.finalconv3 = nn.Conv2d(32, num_classes, 2, padding=1)
def require_encoder_grad(self, requires_grad):
blocks = [self.stem,
self.encoder1,
self.encoder2,
self.encoder3,
self.encoder4]
for block in blocks:
for p in block.parameters():
p.requires_grad = requires_grad
def freeze(self):
self.require_encoder_grad(False)
def unfreeze(self):
self.require_encoder_grad(True)
# noinspection PyCallingNonCallable
def forward(self, x):
# Encoder
x = self.stem(x)
e1 = self.encoder1(x)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
# Decoder with Skip Connections
d4 = self.decoder4(e4) + e3
d3 = self.decoder3(d4) + e2
d2 = self.decoder2(d3) + e1
d1 = self.decoder1(d2)
# Final Classification
f1 = self.finaldeconv1(d1)
f2 = self.finalrelu1(f1)
f3 = self.finalconv2(f2)
f4 = self.finalrelu2(f3)
f5 = self.finalconv3(f4)
return f5
class CoarseLinkNet50(nn.Module):
def __init__(self,
num_classes,
num_channels=3,
is_deconv=False,
decoder_kernel_size=4,
pretrained=True
):
super().__init__()
filters = [256, 512, 1024, 2048]
resnet = models.resnet50(pretrained=pretrained)
self.mean = (0.485, 0.456, 0.406)
self.std = (0.229, 0.224, 0.225)
# self.firstconv = resnet.conv1
# assert num_channels == 3, "num channels not used now. to use changle first conv layer to support num channels other then 3"
# try to use 8-channels as first input
if num_channels == 3:
self.firstconv = resnet.conv1
else:
self.firstconv = nn.Conv2d(num_channels, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3))
self.firstbn = resnet.bn1
self.firstrelu = resnet.relu
self.firstmaxpool = resnet.maxpool
self.encoder1 = resnet.layer1
self.encoder2 = resnet.layer2
self.encoder3 = resnet.layer3
self.encoder4 = resnet.layer4
# Decoder
self.decoder4 = DecoderBlock(in_channels=filters[3],
n_filters=filters[2],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder3 = DecoderBlock(in_channels=filters[2],
n_filters=filters[1],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder2 = DecoderBlock(in_channels=filters[1],
n_filters=filters[0],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder1 = DecoderBlock(in_channels=filters[0],
n_filters=filters[0],
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
# Final Classifier
self.finalconv1 = nn.Conv2d(filters[0], 32, 2, padding=1)
self.finalrelu1 = nonlinearity(inplace=True)
self.finalconv2 = nn.Conv2d(32, num_classes, 2, padding=1)
def freeze(self):
self.require_encoder_grad(False)
def unfreeze(self):
self.require_encoder_grad(True)
def require_encoder_grad(self, requires_grad):
blocks = [self.firstconv,
self.encoder1,
self.encoder2,
self.encoder3,
self.encoder4]
for block in blocks:
for p in block.parameters():
p.requires_grad = requires_grad
# noinspection PyCallingNonCallable
def forward(self, x):
# Encoder
x = self.firstconv(x)
x = self.firstbn(x)
x = self.firstrelu(x)
x = self.firstmaxpool(x)
e1 = self.encoder1(x)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
# Decoder with Skip Connections
d4 = self.decoder4(e4) + e3
d3 = self.decoder3(d4) + e2
d2 = self.decoder2(d3) + e1
d1 = self.decoder1(d2)
# Final Classification
f1 = self.finalconv1(d1)
f2 = self.finalrelu1(f1)
f3 = self.finalconv2(f2)
return f3
|
[
"[email protected]"
] | |
793df36891361844131a4d33e050c547c0b01bfc
|
59d7db2d959e071991ece694728958b08a6f7c58
|
/envs/create_game/levels/create_game_marker.py
|
772a0d58b12cf5021463822a56ef3b8f0d5823b7
|
[] |
no_license
|
Sivinious/cse257
|
04cdd6f14a7ac0db66626e93305e4015256f1433
|
6b6f21c289094487da89b261af0dacba8135cd25
|
refs/heads/main
| 2023-05-15T08:12:54.908326 | 2021-05-29T10:12:39 | 2021-05-29T10:12:39 | 371,932,535 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,203 |
py
|
import numpy as np
from .create_level_file import CreateLevelFile
def ball_begin_handler(arbiter, space, data):
obj_1 = arbiter.shapes[1]
obj_2 = arbiter.shapes[0]
if hasattr(obj_1, 'is_marker') and hasattr(obj_2, 'is_target'):
obj_1.hit_target = True
if hasattr(obj_2, 'is_marker') and hasattr(obj_1, 'is_target'):
obj_2.hit_target = True
return True
class CreateGameMarker(CreateLevelFile):
"""
Defines additional behavior in logic game for when there is another ball
that must first collide with the target ball. We call this the "Marker"
ball. Inherent from this class to provide additional
"""
def __init__(self, available_tools=None, gravity=(0.0, -2.0),
tool_variety=True, tool_gen=None):
super().__init__()
self.hit_target_handler = None
self.marker_must_hit = False
self.target_reward = 0.0
def set_settings(self, settings):
super().set_settings(settings)
self.target_reward = settings.target_reward
def reset(self):
obs = super().reset()
self.marker_obj = self.env_tools[0]
self.marker_obj.shape.is_marker = True
self.target_obj.shape.is_target = True
if self.hit_target_handler is None:
self.hit_target_handler = self.space.add_collision_handler(self.marker_obj.shape.collision_type,
self.target_obj.shape.collision_type)
self.hit_target_handler.begin = ball_begin_handler
self.prev_dist = self.calc_distance(self.target_obj.body.position, self.marker_obj.body.position)
self.target_hit = 0.0
self.marker_collided = False
return obs
def step(self, action):
obs, reward, done, info = super().step(action)
general_reward = reward
# Dense reward based off of distance from target ball to the goal
cur_target_pos = self.target_obj.body.position
move_dist = self.calc_distance(self.target_obj_start_pos, cur_target_pos)
if self.target_hit == 0 and move_dist > self.settings.move_thresh and \
(not self.marker_must_hit or hasattr(self.marker_obj.shape, 'hit_target')):
if self.settings.marker_reward == 'reg':
self.target_hit += 1.
reward += self.target_reward
elif self.settings.marker_reward == 'dir':
goal_on_left = self.target_obj_start_pos[0] < self.goal_pos[0]
moved_target_left = self.target_obj_start_pos[0] < cur_target_pos[0]
if goal_on_left == moved_target_left:
self.target_hit += 1.0
reward += self.target_reward
else:
raise ValueError('Unknown marker reward type')
self.prev_dist = self.calc_distance(cur_target_pos, self.goal_pos)
else:
distance = self.calc_distance(cur_target_pos,
self.marker_obj.body.position)
reward += self.dense_reward_scale * (self.prev_dist - distance)
self.episode_dense_reward += self.dense_reward_scale * (self.prev_dist - distance)
self.prev_dist = distance
# Terminate if the marker ball is out of bounds AND target is not hit yet
if (not self.within_bounds(self.marker_obj.body.position)) and self.target_hit == 0:
done = True
reward += self.settings.marker_gone_reward
self.episode_reward += (reward - general_reward)
if done:
info['ep_len'] = self.episode_len
info['ep_target_hit'] = self.target_hit
info['ep_goal_hit'] = self.goal_hit
info['ep_reward'] = self.episode_reward
info['ep_subgoal_reward'] = self.total_subgoal_add_reward
info['ep_no_op'] = self.no_op_count
info['ep_invalid_action'] = self.invalid_action_count
info['ep_blocked_action'] = self.blocked_action_count
info['ep_overlap_action'] = self.overlap_action_count
info['ep_dense_reward'] = self.episode_dense_reward
info['ep_placed_tools'] = len(self.placed_tools)
return obs, reward, done, info
|
[
"[email protected]"
] | |
6ce7c3b81e8347c816509e1759efdab460e04679
|
7733ae47afbf86989e1d3bfd06b9c4ca3edba0e1
|
/data_structures/stacks/stack_using_linked_list.py
|
0779cac32020c3ac7817c2ae4335cb52817268a7
|
[] |
no_license
|
EricMontague/Datastructures-and-Algorithms
|
853ac290557e9ecf60c187401a7d576a99529ba7
|
2ce6d8b893f0b8affc8c880165fb3f7ecfdeb19b
|
refs/heads/master
| 2021-07-25T14:59:59.059604 | 2021-01-26T18:24:44 | 2021-01-26T18:24:44 | 242,427,253 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,302 |
py
|
"""This module contains my implementation of a stack
based on a singly linked list.
"""
class StackItem:
"""Class to represent an item in a stack."""
def __init__(self, data):
self.data = data
self.next = None
class Stack:
"""Class to represent a stack based on a singly linked list."""
def __init__(self):
self._head = None
self._size = 0
def push(self, data):
"""Insert the given data on top of the stack."""
item = StackItem(data)
item.next = self._head
self._head = item
self._size += 1
def pop(self):
"""Remove and return the value of the item on top of the
stack.
"""
if self.is_empty():
raise IndexError("Stack is empty.")
item = self._head.data
self._head = self._head.next
self._size -= 1
return item
def is_empty(self):
"""Return True if the stack is empty, else return False."""
return self._size == 0
def peek(self):
"""Return the value of the item on the top of the stack."""
if self.is_empty():
return None
return self._head.data
@property
def size(self):
"""Return the number of items in the stack."""
return self._size
|
[
"[email protected]"
] | |
2770858fad66ba1d79ddd11c3f997d027173e1b0
|
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
|
/venv/Lib/site-packages/cobra/modelimpl/ospf/actxpol.py
|
93ffd6b54cfa9d3a6d2a078c3d3b02116afbdc08
|
[] |
no_license
|
bkhoward/aciDOM
|
91b0406f00da7aac413a81c8db2129b4bfc5497b
|
f2674456ecb19cf7299ef0c5a0887560b8b315d0
|
refs/heads/master
| 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,769 |
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class ACtxPol(Mo):
meta = ClassMeta("cobra.model.ospf.ACtxPol")
meta.isAbstract = True
meta.moClassName = "ospfACtxPol"
meta.moClassName = "ospfACtxPol"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Abstraction of OSPF Context Policy"
meta.writeAccessMask = 0x0
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.superClasses.add("cobra.model.fabric.L3CtxPol")
meta.superClasses.add("cobra.model.fabric.ProtoPol")
meta.superClasses.add("cobra.model.fabric.ProtoDomPol")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.superClasses.add("cobra.model.fabric.L3DomPol")
meta.concreteSubClasses.add("cobra.model.ospf.CtxDef")
meta.concreteSubClasses.add("cobra.model.ospf.CtxDefAf")
meta.concreteSubClasses.add("cobra.model.ospf.CtxPol")
meta.rnPrefixes = [
]
prop = PropMeta("str", "bwRef", "bwRef", 1089, PropCategory.REGULAR)
prop.label = "Bandwidth Preference"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 4000000)]
prop.defaultValue = 40000
prop.defaultValueStr = "40000"
meta.props.add("bwRef", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "ctrl", "ctrl", 22755, PropCategory.REGULAR)
prop.label = "Control knobs"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 0
prop._addConstant("name-lookup", "enable-name-lookup-for-router-ids", 2)
prop._addConstant("pfx-suppress", "prefix-suppression", 1)
meta.props.add("ctrl", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dist", "dist", 1087, PropCategory.REGULAR)
prop.label = "Distance Preference"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 255)]
prop.defaultValue = 110
prop.defaultValueStr = "110"
meta.props.add("dist", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "grCtrl", "grCtrl", 1098, PropCategory.REGULAR)
prop.label = "Graceful Restart Controls"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "helper"
prop._addConstant("helper", "graceful-restart-helper", 1)
meta.props.add("grCtrl", prop)
prop = PropMeta("str", "lsaArrivalIntvl", "lsaArrivalIntvl", 1094, PropCategory.REGULAR)
prop.label = "Min Arrival Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(10, 600000)]
prop.defaultValue = 1000
prop.defaultValueStr = "1000"
meta.props.add("lsaArrivalIntvl", prop)
prop = PropMeta("str", "lsaGpPacingIntvl", "lsaGpPacingIntvl", 1093, PropCategory.REGULAR)
prop.label = "Pacing Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 1800)]
prop.defaultValue = 10
prop.defaultValueStr = "10"
meta.props.add("lsaGpPacingIntvl", prop)
prop = PropMeta("str", "lsaHoldIntvl", "lsaHoldIntvl", 1096, PropCategory.REGULAR)
prop.label = "Throttle Hold Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(50, 30000)]
prop.defaultValue = 5000
prop.defaultValueStr = "5000"
meta.props.add("lsaHoldIntvl", prop)
prop = PropMeta("str", "lsaMaxIntvl", "lsaMaxIntvl", 1097, PropCategory.REGULAR)
prop.label = "Throttle Max Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(50, 30000)]
prop.defaultValue = 5000
prop.defaultValueStr = "5000"
meta.props.add("lsaMaxIntvl", prop)
prop = PropMeta("str", "lsaStartIntvl", "lsaStartIntvl", 1095, PropCategory.REGULAR)
prop.label = "Throttle Start Wait Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 5000)]
prop.defaultValue = 0
prop.defaultValueStr = "0"
meta.props.add("lsaStartIntvl", prop)
prop = PropMeta("str", "maxEcmp", "maxEcmp", 1088, PropCategory.REGULAR)
prop.label = "Max ECMP"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 64)]
prop.defaultValue = 8
prop.defaultValueStr = "8"
meta.props.add("maxEcmp", prop)
prop = PropMeta("str", "maxLsaAction", "maxLsaAction", 17808, PropCategory.REGULAR)
prop.label = "Action"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "reject"
prop._addConstant("log", "log", 2)
prop._addConstant("reject", "reject", 0)
prop._addConstant("restart", "restart", 1)
meta.props.add("maxLsaAction", prop)
prop = PropMeta("str", "maxLsaNum", "maxLsaNum", 17803, PropCategory.REGULAR)
prop.label = "Maximum # of non self-generated LSAs"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 4294967295)]
prop.defaultValue = 20000
prop.defaultValueStr = "20000"
meta.props.add("maxLsaNum", prop)
prop = PropMeta("str", "maxLsaResetIntvl", "maxLsaResetIntvl", 17807, PropCategory.REGULAR)
prop.label = "Reset Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 1440)]
prop.defaultValue = 10
prop.defaultValueStr = "10"
meta.props.add("maxLsaResetIntvl", prop)
prop = PropMeta("str", "maxLsaSleepCnt", "maxLsaSleepCnt", 17805, PropCategory.REGULAR)
prop.label = "Sleep Count"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 4294967295)]
prop.defaultValue = 5
prop.defaultValueStr = "5"
meta.props.add("maxLsaSleepCnt", prop)
prop = PropMeta("str", "maxLsaSleepIntvl", "maxLsaSleepIntvl", 17806, PropCategory.REGULAR)
prop.label = "Sleep Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 1440)]
prop.defaultValue = 5
prop.defaultValueStr = "5"
meta.props.add("maxLsaSleepIntvl", prop)
prop = PropMeta("str", "maxLsaThresh", "maxLsaThresh", 17804, PropCategory.REGULAR)
prop.label = "Threshold"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 100)]
prop.defaultValue = 75
prop.defaultValueStr = "75"
meta.props.add("maxLsaThresh", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "spfHoldIntvl", "spfHoldIntvl", 1091, PropCategory.REGULAR)
prop.label = "Max Hold Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 600000)]
prop.defaultValue = 1000
prop.defaultValueStr = "1000"
meta.props.add("spfHoldIntvl", prop)
prop = PropMeta("str", "spfInitIntvl", "spfInitIntvl", 1090, PropCategory.REGULAR)
prop.label = "Initial Delay Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 600000)]
prop.defaultValue = 200
prop.defaultValueStr = "200"
meta.props.add("spfInitIntvl", prop)
prop = PropMeta("str", "spfMaxIntvl", "spfMaxIntvl", 1092, PropCategory.REGULAR)
prop.label = "Min Wait Time"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 600000)]
prop.defaultValue = 5000
prop.defaultValueStr = "5000"
meta.props.add("spfMaxIntvl", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"[email protected]"
] | |
9707670f3dec472dded3c7da0ce0d31e2033090f
|
d668209e9951d249020765c011a836f193004c01
|
/tools/pnnx/tests/test_F_unfold.py
|
51f19a4f48a4b788476ea755f31fd662ef8f4214
|
[
"BSD-3-Clause",
"Zlib",
"BSD-2-Clause"
] |
permissive
|
Tencent/ncnn
|
d8371746c00439304c279041647362a723330a79
|
14b000d2b739bd0f169a9ccfeb042da06fa0a84a
|
refs/heads/master
| 2023-08-31T14:04:36.635201 | 2023-08-31T04:19:23 | 2023-08-31T04:19:23 | 95,879,426 | 18,818 | 4,491 |
NOASSERTION
| 2023-09-14T15:44:56 | 2017-06-30T10:55:37 |
C++
|
UTF-8
|
Python
| false | false | 1,747 |
py
|
# Tencent is pleased to support the open source community by making ncnn available.
#
# Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from packaging import version
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
def forward(self, x):
x0 = F.unfold(x, kernel_size=3)
x1 = F.unfold(x, kernel_size=(2,4), stride=(2,1), padding=2, dilation=1)
x2 = F.unfold(x, kernel_size=(1,3), stride=1, padding=(2,4), dilation=(1,2))
return x0, x1, x2
def test():
net = Model()
net.eval()
torch.manual_seed(0)
x = torch.rand(1, 12, 64, 64)
a0, a1, a2 = net(x)
# export torchscript
mod = torch.jit.trace(net, x)
mod.save("test_F_unfold.pt")
# torchscript to pnnx
import os
os.system("../src/pnnx test_F_unfold.pt inputshape=[1,12,64,64]")
# pnnx inference
import test_F_unfold_pnnx
b0, b1, b2 = test_F_unfold_pnnx.test_inference()
return torch.equal(a0, b0) and torch.equal(a1, b1) and torch.equal(a2, b2)
if __name__ == "__main__":
if test():
exit(0)
else:
exit(1)
|
[
"[email protected]"
] | |
917f13939d079a68ba1420e76b3c3de9c30c2574
|
555b9f764d9bca5232360979460bc35c2f5ad424
|
/google/ads/google_ads/v1/services/feed_mapping_service_client.py
|
db167566a014a7873d12dec695d51b7aa20a087b
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
juanmacugat/google-ads-python
|
b50256163782bc0223bcd8b29f789d74f4cfad05
|
0fc8a7dbf31d9e8e2a4364df93bec5f6b7edd50a
|
refs/heads/master
| 2021-02-18T17:00:22.067673 | 2020-03-05T16:13:57 | 2020-03-05T16:13:57 | 245,215,877 | 1 | 0 |
Apache-2.0
| 2020-03-05T16:39:34 | 2020-03-05T16:39:33 | null |
UTF-8
|
Python
| false | false | 12,864 |
py
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.ads.googleads.v1.services FeedMappingService API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.path_template
from google.ads.google_ads.v1.services import feed_mapping_service_client_config
from google.ads.google_ads.v1.services.transports import feed_mapping_service_grpc_transport
from google.ads.google_ads.v1.proto.services import feed_mapping_service_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-ads', ).version
class FeedMappingServiceClient(object):
"""Service to manage feed mappings."""
SERVICE_ADDRESS = 'googleads.googleapis.com:443'
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.ads.googleads.v1.services.FeedMappingService'
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedMappingServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def feed_mapping_path(cls, customer, feed_mapping):
"""Return a fully-qualified feed_mapping string."""
return google.api_core.path_template.expand(
'customers/{customer}/feedMappings/{feed_mapping}',
customer=customer,
feed_mapping=feed_mapping,
)
def __init__(self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None):
"""Constructor.
Args:
transport (Union[~.FeedMappingServiceGrpcTransport,
Callable[[~.Credentials, type], ~.FeedMappingServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
'The `client_config` argument is deprecated.',
PendingDeprecationWarning,
stacklevel=2)
else:
client_config = feed_mapping_service_client_config.config
if channel:
warnings.warn(
'The `channel` argument is deprecated; use '
'`transport` instead.',
PendingDeprecationWarning,
stacklevel=2)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=feed_mapping_service_grpc_transport.
FeedMappingServiceGrpcTransport,
)
else:
if credentials:
raise ValueError(
'Received both a transport instance and '
'credentials; these are mutually exclusive.')
self.transport = transport
else:
self.transport = feed_mapping_service_grpc_transport.FeedMappingServiceGrpcTransport(
address=self.SERVICE_ADDRESS,
channel=channel,
credentials=credentials,
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION, )
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME], )
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def get_feed_mapping(self,
resource_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Returns the requested feed mapping in full detail.
Args:
resource_name (str): The resource name of the feed mapping to fetch.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.ads.googleads_v1.types.FeedMapping` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_feed_mapping' not in self._inner_api_calls:
self._inner_api_calls[
'get_feed_mapping'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_feed_mapping,
default_retry=self._method_configs['GetFeedMapping'].retry,
default_timeout=self._method_configs['GetFeedMapping'].
timeout,
client_info=self._client_info,
)
request = feed_mapping_service_pb2.GetFeedMappingRequest(
resource_name=resource_name, )
return self._inner_api_calls['get_feed_mapping'](
request, retry=retry, timeout=timeout, metadata=metadata)
def mutate_feed_mappings(self,
customer_id,
operations,
partial_failure=None,
validate_only=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Creates or removes feed mappings. Operation statuses are
returned.
Args:
customer_id (str): The ID of the customer whose feed mappings are being modified.
operations (list[Union[dict, ~google.ads.googleads_v1.types.FeedMappingOperation]]): The list of operations to perform on individual feed mappings.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.ads.googleads_v1.types.FeedMappingOperation`
partial_failure (bool): If true, successful operations will be carried out and invalid
operations will return errors. If false, all operations will be carried
out in one transaction if and only if they are all valid.
Default is false.
validate_only (bool): If true, the request is validated but not executed. Only errors are
returned, not results.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.ads.googleads_v1.types.MutateFeedMappingsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'mutate_feed_mappings' not in self._inner_api_calls:
self._inner_api_calls[
'mutate_feed_mappings'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.mutate_feed_mappings,
default_retry=self._method_configs['MutateFeedMappings'].
retry,
default_timeout=self._method_configs['MutateFeedMappings'].
timeout,
client_info=self._client_info,
)
request = feed_mapping_service_pb2.MutateFeedMappingsRequest(
customer_id=customer_id,
operations=operations,
partial_failure=partial_failure,
validate_only=validate_only,
)
return self._inner_api_calls['mutate_feed_mappings'](
request, retry=retry, timeout=timeout, metadata=metadata)
|
[
"[email protected]"
] | |
a5513dfc2836838bb3c4606c9f502067189dd421
|
fd3da963aa5ad8ff0d6cf0cc4c9d9ff05f9135ca
|
/apps/goodss/urls.py
|
dd136dd536f0526fb077f7ce93ae0e9e66f4032a
|
[] |
no_license
|
huanshenyi/rental-system-backed
|
083dbfe18d28e7f0111282a93c84c415098d07f5
|
3c9487dcb9e650036a2a533a10a3c66f762b6fdb
|
refs/heads/master
| 2022-12-14T18:05:29.876973 | 2020-08-15T13:32:55 | 2020-08-15T13:32:55 | 281,384,752 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 393 |
py
|
__author__ = "ハリネズミ"
from rest_framework.routers import DefaultRouter
from . import views
router = DefaultRouter(trailing_slash=False)
router.register("goods", views.GoodsViewSet, basename="goods")
router.register("category", views.CategoryViewSet, basename="category")
router.register("tag", views.TagViewSet, basename="tag")
app_name = "goodss"
urlpatterns = [
] + router.urls
|
[
"[email protected]"
] | |
b28c117e4cb45e821c162342a952aaee187604aa
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/artificial/transf_BoxCox/trend_ConstantTrend/cycle_0/ar_12/test_artificial_128_BoxCox_ConstantTrend_0_12_100.py
|
02cb32eb108b5b7dafaa5e8d7ad78612f5428a54
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 |
BSD-3-Clause
| 2018-12-17T22:08:12 | 2018-06-12T17:15:43 |
Python
|
UTF-8
|
Python
| false | false | 272 |
py
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 0, transform = "BoxCox", sigma = 0.0, exog_count = 100, ar_order = 12);
|
[
"[email protected]"
] | |
1770713e3f35011aa2ee76ea0965250841cc4f2c
|
08a851f0d7218beb6c32b5438595c44bb2498af9
|
/library/migrations/0004_auto_20150908_1625.py
|
3ed5fa8c629bbc36ffed2de3dd854b3f049ef822
|
[] |
no_license
|
KobiBeef/base_src
|
47ff5a1ecbab0953f74b41533cafbd26eb428e16
|
975294df5edee8d1f441470a7e1cf8ce59778a0b
|
refs/heads/master
| 2020-06-05T08:30:21.358458 | 2015-12-08T08:09:06 | 2015-12-08T08:09:06 | 41,631,457 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 362 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('library', '0003_testcomment'),
]
operations = [
migrations.AlterModelOptions(
name='contact',
options={'ordering': ['-pk']},
),
]
|
[
"[email protected]"
] | |
a91ef44aa843bd22308c6d92577a6f5676cd70fc
|
d7fb8743b6faa4d948b2b08ca0dbdd3b0f11379b
|
/测试代码/keras/已经/1keras_lstm1/stock_lstm4.py
|
9b08b23f74975fac2ad2d7555aae9d4185fb679b
|
[] |
no_license
|
bancheng/Stock-market
|
219e9882858e6d10edad1d13fba67dadbedc27ba
|
142ea0eaed0fdccd8e79a51c34d66d1be1c336ed
|
refs/heads/master
| 2021-01-20T15:13:14.667022 | 2017-09-10T06:31:10 | 2017-09-10T06:31:10 | 90,737,452 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,884 |
py
|
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import cPickle as pkl
from keras.preprocessing import sequence, text
from keras.optimizers import SGD, RMSprop, Adagrad
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM, GRU
f=open("data.pkl",'rb')
datax = pkl.load(f)
datay = pkl.load(f)
_rand = np.random.randint(len(datay),size=len(datay))
X_test = datax[_rand[0]]
y_test = datay[_rand[0]]
X_train = datax[_rand[2000]]
y_train = datay[_rand[2000]]
i=1
while i<2000:
X_test = np.vstack((X_test,datax[_rand[i]]))
y_test = np.vstack((y_test,datay[_rand[i]]))
i=i+1
X_test = X_test.reshape(X_test.shape[0]/50,50,6)
i=2001
while (i>1999) & (i<len(datay)):
X_train = np.vstack((X_train, datax[_rand[i]]))
y_train = np.vstack((y_train, datay[_rand[i]]))
i=i+1
X_train = X_train.reshape(X_train.shape[0]/50,50,6)
# print('X_train shape:', X_train.shape)
# print('X_test shape:', X_test.shape)
model = Sequential()
model.add(LSTM(1, input_shape=(50, 6)))
# print('Build model...')
# model = Sequential()
# model.add(Embedding(max_features, 256))
# model.add(LSTM(256, 128)) # try using a GRU instead, for fun
model.add(Dropout(0.2))
model.add(Dense(1))
model.add(Activation('sigmoid'))
#
# # try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy', optimizer='adam')
#
print("Train...")
model.fit(X_train, y_train, batch_size=1, nb_epoch=50, validation_split=0.2, show_accuracy=True)
score = model.evaluate(X_test, y_test, batch_size=1)
print('Test score:', score)
#
# classes = model.predict_classes(X_test, batch_size=batch_size)
# acc = np_utils.accuracy(classes, y_test)
#
# print('Test accuracy:', acc)
#
# store_weights = {}
# for layer in model.layers :
# store_weights[layer] = layer.get_weights()
#
# # create a new model of same structure minus last layers, to explore intermediate outputs
# print('Build truncated model')
# chopped_model = Sequential()
# chopped_model.add(Embedding(max_features, 256, weights=model.layers[0].get_weights()))
# chopped_model.add(LSTM(256, 128, weights=model.layers[1].get_weights()))
# chopped_model.compile(loss='binary_crossentropy', optimizer='adam', class_mode="binary")
#
# # pickle intermediate outputs, model weights
# train_activations = chopped_model.predict(X_train, batch_size=batch_size)
# test_activations = chopped_model.predict(X_test, batch_size=batch_size)
# outputs = dict(final=classes, acc=acc, weights=store_weights, y_train=y_train, y_test=y_test,
# train_activations=train_activations, test_activations=test_activations)
#
# pkl.dump(outputs, open('results/predicted_activations.pkl', 'wb'),
# protocol=pkl.HIGHEST_PROTOCOL)
|
[
"[email protected]"
] | |
dcaa2b75aea54efe2bc037a0cb330dd43ea637a2
|
53784d3746eccb6d8fca540be9087a12f3713d1c
|
/res/packages/scripts/scripts/common/Lib/tokenize.py
|
dbb11be960e4c212f4ff08c3cf20cf5dae2722a9
|
[] |
no_license
|
webiumsk/WOT-0.9.17.1-CT
|
736666d53cbd0da6745b970e90a8bac6ea80813d
|
d7c3cf340ae40318933e7205bf9a17c7e53bac52
|
refs/heads/master
| 2021-01-09T06:00:33.898009 | 2017-02-03T21:40:17 | 2017-02-03T21:40:17 | 80,870,824 | 0 | 0 | null | null | null | null |
WINDOWS-1250
|
Python
| false | false | 16,767 |
py
|
# 2017.02.03 21:57:04 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/tokenize.py
"""Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
__author__ = 'Ka-Ping Yee <[email protected]>'
__credits__ = 'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro, Raymond Hettinger'
from itertools import chain
import string, re
from token import *
import token
__all__ = [ x for x in dir(token) if not x.startswith('_') ]
__all__ += ['COMMENT',
'tokenize',
'generate_tokens',
'NL',
'untokenize']
del x
del token
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
NL = N_TOKENS + 1
tok_name[NL] = 'NL'
N_TOKENS += 2
def group(*choices):
return '(' + '|'.join(choices) + ')'
def any(*choices):
return group(*choices) + '*'
def maybe(*choices):
return group(*choices) + '?'
Whitespace = '[ \\f\\t]*'
Comment = '#[^\\r\\n]*'
Ignore = Whitespace + any('\\\\\\r?\\n' + Whitespace) + maybe(Comment)
Name = '[a-zA-Z_]\\w*'
Hexnumber = '0[xX][\\da-fA-F]+[lL]?'
Octnumber = '(0[oO][0-7]+)|(0[0-7]*)[lL]?'
Binnumber = '0[bB][01]+[lL]?'
Decnumber = '[1-9]\\d*[lL]?'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = '[eE][-+]?\\d+'
Pointfloat = group('\\d+\\.\\d*', '\\.\\d+') + maybe(Exponent)
Expfloat = '\\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group('\\d+[jJ]', Floatnumber + '[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
Single = "[^'\\\\]*(?:\\\\.[^'\\\\]*)*'"
Double = '[^"\\\\]*(?:\\\\.[^"\\\\]*)*"'
Single3 = "[^'\\\\]*(?:(?:\\\\.|'(?!''))[^'\\\\]*)*'''"
Double3 = '[^"\\\\]*(?:(?:\\\\.|"(?!""))[^"\\\\]*)*"""'
Triple = group("[uUbB]?[rR]?'''", '[uUbB]?[rR]?"""')
String = group("[uUbB]?[rR]?'[^\\n'\\\\]*(?:\\\\.[^\\n'\\\\]*)*'", '[uUbB]?[rR]?"[^\\n"\\\\]*(?:\\\\.[^\\n"\\\\]*)*"')
Operator = group('\\*\\*=?', '>>=?', '<<=?', '<>', '!=', '//=?', '[+\\-*/%&|^=<>]=?', '~')
Bracket = '[][(){}]'
Special = group('\\r?\\n', '[:;.,`@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
ContStr = group("[uUbB]?[rR]?'[^\\n'\\\\]*(?:\\\\.[^\\n'\\\\]*)*" + group("'", '\\\\\\r?\\n'), '[uUbB]?[rR]?"[^\\n"\\\\]*(?:\\\\.[^\\n"\\\\]*)*' + group('"', '\\\\\\r?\\n'))
PseudoExtras = group('\\\\\\r?\\n|\\Z', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = map(re.compile, (Token,
PseudoToken,
Single3,
Double3))
endprogs = {"'": re.compile(Single),
'"': re.compile(Double),
"'''": single3prog,
'"""': double3prog,
"r'''": single3prog,
'r"""': double3prog,
"u'''": single3prog,
'u"""': double3prog,
"ur'''": single3prog,
'ur"""': double3prog,
"R'''": single3prog,
'R"""': double3prog,
"U'''": single3prog,
'U"""': double3prog,
"uR'''": single3prog,
'uR"""': double3prog,
"Ur'''": single3prog,
'Ur"""': double3prog,
"UR'''": single3prog,
'UR"""': double3prog,
"b'''": single3prog,
'b"""': double3prog,
"br'''": single3prog,
'br"""': double3prog,
"B'''": single3prog,
'B"""': double3prog,
"bR'''": single3prog,
'bR"""': double3prog,
"Br'''": single3prog,
'Br"""': double3prog,
"BR'''": single3prog,
'BR"""': double3prog,
'r': None,
'R': None,
'u': None,
'U': None,
'b': None,
'B': None}
triple_quoted = {}
for t in ("'''", '"""', "r'''", 'r"""', "R'''", 'R"""', "u'''", 'u"""', "U'''", 'U"""', "ur'''", 'ur"""', "Ur'''", 'Ur"""', "uR'''", 'uR"""', "UR'''", 'UR"""', "b'''", 'b"""', "B'''", 'B"""', "br'''", 'br"""', "Br'''", 'Br"""', "bR'''", 'bR"""', "BR'''", 'BR"""'):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"', "r'", 'r"', "R'", 'R"', "u'", 'u"', "U'", 'U"', "ur'", 'ur"', "Ur'", 'Ur"', "uR'", 'uR"', "UR'", 'UR"', "b'", 'b"', "B'", 'B"', "br'", 'br"', "Br'", 'Br"', "bR'", 'bR"', "BR'", 'BR"'):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception):
pass
class StopTokenizing(Exception):
pass
def printtoken(type, token, srow_scol, erow_ecol, line):
srow, scol = srow_scol
erow, ecol = erow_ecol
print '%d,%d-%d,%d:\t%s\t%s' % (srow,
scol,
erow,
ecol,
tok_name[type],
repr(token))
def tokenize(readline, tokeneater = printtoken):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
tokeneater(*token_info)
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
def add_whitespace(self, start):
row, col = start
if row < self.prev_row or row == self.prev_row and col < self.prev_col:
raise ValueError('start ({},{}) precedes previous end ({},{})'.format(row, col, self.prev_row, self.prev_col))
row_offset = row - self.prev_row
if row_offset:
self.tokens.append('\\\n' * row_offset)
self.prev_col = 0
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(' ' * col_offset)
def untokenize(self, iterable):
it = iter(iterable)
for t in it:
if len(t) == 2:
self.compat(t, it)
break
tok_type, token, start, end, line = t
if tok_type == ENDMARKER:
break
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return ''.join(self.tokens)
def compat(self, token, iterable):
indents = []
toks_append = self.tokens.append
startline = token[0] in (NEWLINE, NL)
prevstring = False
for tok in chain([token], iterable):
toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum == STRING:
if prevstring:
tokval = ' ' + tokval
prevstring = True
else:
prevstring = False
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
def untokenize(iterable):
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output text will tokenize the back to the input
t1 = [tok[:2] for tok in generate_tokens(f.readline)]
newcode = untokenize(t1)
readline = iter(newcode.splitlines(1)).next
t2 = [tok[:2] for tok in generate_tokens(readline)]
assert t1 == t2
"""
ut = Untokenizer()
return ut.untokenize(iterable)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argument, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = ('', 0)
contline = None
indents = [0]
while 1:
try:
line = readline()
except StopIteration:
line = ''
lnum += 1
pos, max = 0, len(line)
if contstr:
if not line:
raise TokenError, ('EOF in multi-line string', strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING,
contstr + line[:end],
strstart,
(lnum, end),
contline + line)
contstr, needcont = ('', 0)
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN,
contstr + line,
strstart,
(lnum, len(line)),
contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued:
if not line:
break
column = 0
while pos < max:
if line[pos] == ' ':
column += 1
elif line[pos] == '\t':
column = (column // tabsize + 1) * tabsize
elif line[pos] == '\x0c':
column = 0
else:
break
pos += 1
if pos == max:
break
if line[pos] in '#\r\n':
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield (COMMENT,
comment_token,
(lnum, pos),
(lnum, pos + len(comment_token)),
line)
yield (NL,
line[nl_pos:],
(lnum, nl_pos),
(lnum, len(line)),
line)
else:
yield ((NL, COMMENT)[line[pos] == '#'],
line[pos:],
(lnum, pos),
(lnum, len(line)),
line)
continue
if column > indents[-1]:
indents.append(column)
yield (INDENT,
line[:pos],
(lnum, 0),
(lnum, pos),
line)
while column < indents[-1]:
if column not in indents:
raise IndentationError('unindent does not match any outer indentation level', ('<tokenize>',
lnum,
pos,
line))
indents = indents[:-1]
yield (DEDENT,
'',
(lnum, pos),
(lnum, pos),
line)
else:
if not line:
raise TokenError, ('EOF in multi-line statement', (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch:
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
if start == end:
continue
token, initial = line[start:end], line[start]
if initial in numchars or initial == '.' and token != '.':
yield (NUMBER,
token,
spos,
epos,
line)
elif initial in '\r\n':
yield (NL if parenlev > 0 else NEWLINE,
token,
spos,
epos,
line)
elif not (initial == '#' and not token.endswith('\n')):
raise AssertionError
yield (COMMENT,
token,
spos,
epos,
line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch:
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING,
token,
spos,
(lnum, pos),
line)
else:
strstart = (lnum, start)
contstr = line[start:]
contline = line
break
elif initial in single_quoted or token[:2] in single_quoted or token[:3] in single_quoted:
if token[-1] == '\n':
strstart = (lnum, start)
endprog = endprogs[initial] or endprogs[token[1]] or endprogs[token[2]]
contstr, needcont = line[start:], 1
contline = line
break
else:
yield (STRING,
token,
spos,
epos,
line)
elif initial in namechars:
yield (NAME,
token,
spos,
epos,
line)
elif initial == '\\':
continued = 1
else:
if initial in '([{':
parenlev += 1
elif initial in ')]}':
parenlev -= 1
yield (OP,
token,
spos,
epos,
line)
else:
yield (ERRORTOKEN,
line[pos],
(lnum, pos),
(lnum, pos + 1),
line)
pos += 1
for indent in indents[1:]:
yield (DEDENT,
'',
(lnum, 0),
(lnum, 0),
'')
yield (ENDMARKER,
'',
(lnum, 0),
(lnum, 0),
'')
return
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
tokenize(open(sys.argv[1]).readline)
else:
tokenize(sys.stdin.readline)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\common\Lib\tokenize.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:57:04 Střední Evropa (běžný čas)
|
[
"[email protected]"
] | |
e8b83c5dc9d4999541cb30f76d63cb23ff3fff7d
|
814992618962991b1b6dd6f1cdf2853687cbfcd0
|
/examples/demo_013_HEOM.py
|
8fee2c5b5ca6c9822e512aeed24a2b19cfcb9ad4
|
[
"MIT"
] |
permissive
|
MichalPt/quantarhei
|
a5db7916405236dc78778e4ef378141a19a28ff2
|
536d4f39bb7f7d6893664520351d93eac2bc90f1
|
refs/heads/master
| 2022-12-15T09:36:53.108896 | 2022-07-28T09:44:12 | 2022-07-28T09:44:12 | 226,359,238 | 1 | 0 |
MIT
| 2019-12-06T15:37:24 | 2019-12-06T15:37:23 | null |
UTF-8
|
Python
| false | false | 7,159 |
py
|
# -*- coding: utf-8 -*-
_show_plots_ = True
import time
import numpy
import quantarhei as qr
from quantarhei.qm.liouvillespace.integrodiff.integrodiff \
import IntegrodiffPropagator
print("")
print("***********************************************************")
print("* *")
print("* Quantarhei's HEOM implementation demo *")
print("* *")
print("***********************************************************")
###############################################################################
#
# Model system definition
#
###############################################################################
# Three molecules
with qr.energy_units("1/cm"):
m1 = qr.Molecule([0.0, 10100.0])
m2 = qr.Molecule([0.0, 10300.0])
m3 = qr.Molecule([0.0, 10000.0])
# Aggregate is built from the molecules
agg = qr.Aggregate([m1, m2, m3])
# Couplings between them are set
with qr.energy_units("1/cm"):
agg.set_resonance_coupling(0,1,80.0)
agg.set_resonance_coupling(0,2,100.0)
# Interaction with the bath is set through bath correlation functions
timea = qr.TimeAxis(0.0, 500, 1.0)
cpar1 = dict(ftype="OverdampedBrownian-HighTemperature", reorg=50,
cortime=50, T=300)
cpar2 = dict(ftype="OverdampedBrownian-HighTemperature", reorg=50,
cortime=50, T=300)
with qr.energy_units("1/cm"):
cfce1 = qr.CorrelationFunction(timea, cpar1)
cfce2 = qr.CorrelationFunction(timea, cpar2)
m1.set_transition_environment((0, 1), cfce1)
m2.set_transition_environment((0, 1), cfce1)
m3.set_transition_environment((0, 1), cfce2)
# Aggregate is built
agg.build()
###############################################################################
#
# Definition of the hierarchy
#
###############################################################################
# Hamiltonian and the system-bath interaction operator is needed to
# define the Kubo-Tanimura hierarchy
ham = agg.get_Hamiltonian()
sbi = agg.get_SystemBathInteraction()
# We define the hierarchy
#Hy3 = qr.KTHierarchy(ham, sbi, 3)
#Hy4 = qr.KTHierarchy(ham, sbi, 4)
#Hy5 = qr.KTHierarchy(ham, sbi, 5)
Hy6 = qr.KTHierarchy(ham, sbi, 3)
print("Size of hierarchy of depth",Hy6.depth,"is",Hy6.hsize)
Hy7 = qr.KTHierarchy(ham, sbi, 4)
print("Size of hierarchy of depth",Hy7.depth,"is",Hy7.hsize)
# testing generation of hierarchy indices
#print(Hy.generate_indices(4, level=4))
#
#raise Exception()
###############################################################################
#
# Propagation of the HEOM
#
###############################################################################
# Initial density matrix
rhoi = qr.ReducedDensityMatrix(dim=ham.dim)
with qr.eigenbasis_of(ham):
rhoi.data[2,2] = 0.8
rhoi.data[1,1] = 0.1
rhoi.data[3,3] = 0.1
#print(rhoi)
# Definition of the HEOM propagator
#kprop3 = qr.KTHierarchyPropagator(timea, Hy3)
#kprop4 = qr.KTHierarchyPropagator(timea, Hy4)
#kprop5 = qr.KTHierarchyPropagator(timea, Hy5)
kprop6 = qr.KTHierarchyPropagator(timea, Hy6)
kprop7 = qr.KTHierarchyPropagator(timea, Hy7)
# Propagation of the hierarchy and saving the density operator
t1 = time.time()
#rhot3 = kprop3.propagate(rhoi, report_hierarchy=False, free_hierarchy=False)
#rhot4 = kprop4.propagate(rhoi, report_hierarchy=False, free_hierarchy=False)
#rhot5 = kprop5.propagate(rhoi, report_hierarchy=False, free_hierarchy=False)
rhot6 = kprop6.propagate(rhoi, report_hierarchy=False, free_hierarchy=False)
t2 = time.time()
print("Propagated in", t2-t1,"s")
t1 = time.time()
rhot7 = kprop7.propagate(rhoi, report_hierarchy=False, free_hierarchy=False)
t2 = time.time()
print("Propagated in", t2-t1,"s")
###############################################################################
#
# Graphical output of the results
#
###############################################################################
if _show_plots_:
import matplotlib.pyplot as plt
N = timea.length
with qr.eigenbasis_of(ham):
# plt.plot(timea.data[0:N], rhot3.data[0:N,1,1],"-b")
# plt.plot(timea.data[0:N], rhot3.data[0:N,2,2],"-r")
# plt.plot(timea.data[0:N], rhot3.data[0:N,3,3],"-k")
# plt.plot(timea.data[0:N], rhot4.data[0:N,2,2],"-r")
# plt.plot(timea.data[0:N], rhot4.data[0:N,1,1],"-b")
# plt.plot(timea.data[0:N], rhot4.data[0:N,3,3],"-k")
# plt.plot(timea.data[0:N], rhot5.data[0:N,1,1],"-b")
# plt.plot(timea.data[0:N], rhot5.data[0:N,2,2],"-r")
# plt.plot(timea.data[0:N], rhot5.data[0:N,3,3],"-k")
plt.plot(timea.data[0:N], rhot6.data[0:N,0,0])
plt.plot(timea.data[0:N], rhot6.data[0:N,1,3],"-b")
plt.plot(timea.data[0:N], rhot6.data[0:N,2,3],"-r")
plt.plot(timea.data[0:N], rhot6.data[0:N,1,2],"-k")
plt.plot(timea.data[0:N], rhot7.data[0:N,1,3],"--b")
plt.plot(timea.data[0:N], rhot7.data[0:N,2,3],"--r")
plt.plot(timea.data[0:N], rhot7.data[0:N,1,2],"--k")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,1], "-k")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,2], "-k")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,3], "-b")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,4], "-b")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,5], "-b")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,6], "-r")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,7], "-r")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,8], "-r")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,9], "-r")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,10], "-g")
plt.show()
print("Kernel generation")
ker = Hy6.get_kernel(timea)
ip8 = IntegrodiffPropagator(timea, ham, kernel=ker,
fft=True, timefac=3, decay_fraction=2.0)
#fft=False) #, cutoff_time=100)
rhot8 = ip8.propagate(rhoi)
trc = numpy.zeros(timea.length, dtype=qr.REAL)
for ti in range(timea.length):
trc[ti] = numpy.real(numpy.trace(rhot8.data[ti,:,:]))
if _show_plots_:
N = timea.length
with qr.eigenbasis_of(ham):
#plt.plot(timea.data[0:N], rhot8.data[0:N,0,0])
#plt.plot(timea.data[0:N], trc[0:N],"-m")
plt.plot(timea.data[0:N], ker[0:N,1,1,1,1],"-m")
plt.plot(timea.data[0:N], ker[0:N,1,2,1,2],"-m")
plt.plot(timea.data[0:N], ker[0:N,2,2,2,2],"-m")
plt.show()
plt.plot(timea.data[0:N], rhot8.data[0:N,1,1],"-b")
plt.plot(timea.data[0:N], rhot8.data[0:N,2,2],"-r")
plt.plot(timea.data[0:N], rhot8.data[0:N,1,2],"-k")
plt.plot(timea.data[0:N], rhot6.data[0:N,1,1],"--b")
plt.plot(timea.data[0:N], rhot6.data[0:N,2,2],"--r")
plt.plot(timea.data[0:N], rhot6.data[0:N,1,2],"--k")
plt.show()
print("")
print("***********************************************************")
print("* *")
print("* Demo finished successfully *")
print("* *")
print("***********************************************************")
|
[
"[email protected]"
] | |
93b9fc099bbdf4f52185cf649eff703a84c41fea
|
8adec48dfaee1cdfd6c7f4d2fb3038aa1c17bda6
|
/WProf/build/third_party/twisted_8_1/twisted/flow/.svn/text-base/pipe.py.svn-base
|
8b38e07fb29e21544889a65343483bfd058430b2
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
kusoof/wprof
|
ef507cfa92b3fd0f664d0eefef7fc7d6cd69481e
|
8511e9d4339d3d6fad5e14ad7fff73dfbd96beb8
|
refs/heads/master
| 2021-01-11T00:52:51.152225 | 2016-12-10T23:51:14 | 2016-12-10T23:51:14 | 70,486,057 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,446 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
# Author: Clark Evans ([email protected])
"""
flow.pipe
This contains various filter stages which have exactly one input stage. These
stages take a single input and modify its results, ie a rewrite stage.
"""
from base import *
from wrap import wrap
from twisted.python.failure import Failure
class Pipe(Stage):
""" abstract stage which takes a single input stage """
def __init__(self, source, *trap):
Stage.__init__(self, *trap)
self._source = wrap(source)
def _yield(self):
while not self.results \
and not self.stop \
and not self.failure:
source = self._source
instruction = source._yield()
if instruction:
return instruction
if source.failure:
self.failure = source.failure
return
results = source.results
stop = source.stop
if stop:
self.stop = True
source.results = []
self.process(results, stop)
def process(self, results):
""" process implemented by the pipe
Take a set of possibly empty results and sets the member
variables: results, stop, or failure appropriately
"""
raise NotImplementedError
class Filter(Pipe):
"""
flow equivalent to filter: Filter(function, source, ... )
Yield those elements from a source stage for which a function returns true.
If the function is None, the identity function is assumed, that is, all
items yielded that are false (zero or empty) are discarded.
For example::
def odd(val):
if val % 2:
return True
def range():
yield 1
yield 2
yield 3
yield 4
source = flow.Filter(odd,range)
printFlow(source)
"""
def __init__(self, func, source, *trap):
Pipe.__init__(self, source, *trap)
self._func = func
def process(self, results, stop):
self.results.extend(filter(self._func,results))
class LineBreak(Pipe):
""" pipe stage which breaks its input into lines """
def __init__(self, source, *trap, **kwargs):
Pipe.__init__(self, source, *trap)
self._delimiter = kwargs.get('delimiter','\r\n')
self._maxlen = int(kwargs.get('maxlength', 16384))+1
self._trailer = int(kwargs.get('trailer',False))
self._buffer = []
self._currlen = 0
def process(self, results, stop):
for block in results:
lines = str(block).split(self._delimiter)
if len(lines) < 2:
tail = lines[0]
else:
tail = lines.pop()
if self._buffer:
self._buffer.append(lines.pop(0))
self.results.append("".join(self._buffer))
self._buffer = []
self.results.extend(lines)
self._currlen = 0
if tail:
self._currlen += len(tail)
self._buffer.append(tail)
if stop and self._buffer:
tail = "".join(self._buffer)
if self._trailer:
self.results.append(tail)
else:
raise RuntimeError, "trailing data remains: '%s'" % tail[:10]
|
[
"kusoof@kookaburra.(none)"
] |
kusoof@kookaburra.(none)
|
|
07b9fc3d3d2f5b66826d0b99c52e23bcaeee837f
|
b3bf0dfda920950cbc4215a2f591606473398706
|
/contact_manager/users/apps.py
|
b9b9054731b1bdc5afc95af824ba0d884b4ac2f8
|
[] |
no_license
|
adeelehsan/contact_manager
|
34fa9d4fc9a6e03651b7e81cd144d9380629be33
|
d5455a524726ca8577a628d6b2abb6885291e600
|
refs/heads/master
| 2020-03-25T18:07:28.213197 | 2018-08-08T13:08:34 | 2018-08-08T13:08:34 | 144,013,755 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 258 |
py
|
from django.apps import AppConfig
class UsersAppConfig(AppConfig):
name = "contact_manager.users"
verbose_name = "Users"
def ready(self):
try:
import users.signals # noqa F401
except ImportError:
pass
|
[
"[email protected]"
] | |
0702dfcb63672e54fa4461c0fef1e5dec473a471
|
d4442db5a7ab9db2b04fef640a9864f3fba54758
|
/src/python/WMCore/RequestManager/RequestMaker/Processing/StoreResultsRequest.py
|
6262e7d3ca23c220082d5f4d37caec6a43e306c8
|
[] |
no_license
|
stuartw/WMCore
|
fa25ff19ab5058a635d35d3c58a0ac56a3e079a1
|
38c39c43f7237fd316930839674ac9be3c0ee8cc
|
refs/heads/master
| 2021-01-18T07:18:18.324604 | 2012-10-18T22:30:34 | 2012-10-18T22:30:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,051 |
py
|
#!/usr/bin/env python
"""
_StoreResultsRequest_
"""
from WMCore.RequestManager.RequestMaker.RequestMakerInterface import RequestMakerInterface
from WMCore.RequestManager.DataStructs.RequestSchema import RequestSchema
from WMCore.RequestManager.RequestMaker.Registry import registerRequestType, retrieveRequestMaker
class StoreResultsRequest(RequestMakerInterface):
"""
_StoreResultsRequest_
RequestMaker to two file input data processing requests and workflows
"""
def __init__(self):
RequestMakerInterface.__init__(self)
class StoreResultsSchema(RequestSchema):
"""
_StoreResults_
Data Required for a standard cmsRun two file read processing request.
"""
def __init__(self):
RequestSchema.__init__(self)
# not used yet
self.validateFields = [
'InputDatasets',
'CMSSWVersion',
'ScramArch',
'Group',
'DbsUrl'
]
registerRequestType("StoreResults", StoreResultsRequest, StoreResultsSchema)
|
[
"metson@4525493e-7705-40b1-a816-d608a930855b"
] |
metson@4525493e-7705-40b1-a816-d608a930855b
|
49914a6ca92efeecfc33636379136600b1830cee
|
cb30d1a3a4fa6c8f7a6f89a671fbdb4a808e19e3
|
/c6/prime-iter.py
|
b70bd78b2b00c2e5f38c042c7396b319d0036b5b
|
[] |
no_license
|
systemchip/python-for-everyone
|
0b45172ca5b41c3b5fc1a835fbccf4a479c282ea
|
9fb7f751a97fb6a110079e1e3e1dd9601fb24374
|
refs/heads/master
| 2021-09-02T09:18:22.013704 | 2017-07-17T07:46:19 | 2017-07-17T07:46:19 | 115,913,547 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 924 |
py
|
# 소수를 열거하는 이터레이터 클래스
class PrimeIter:
def __init__(self, max):
""" 최대값을 지정한다 """
self.max = max
def __iter__(self):
""" 값을 초기화한다 """
self.n = 1
return self
def __next__(self):
""" 다음 소수를 찾아서 반환한다 """
is_prime = False
self.n += 1
# 소수를 찾는다
while not is_prime:
is_prime = True
for i in range(2, self.n):
if self.n % i == 0:
is_prime = False
break
if is_prime: break
self.n += 1
# 최대값에 도달하면 예외를 발생시킨다
if self.n >= self.max:
raise StopIteration
return self.n
# 100 이하 소수를 열거한다
it = PrimeIter(100)
for no in it:
print(no, end=",")
|
[
"[email protected]"
] | |
95bdb414ed8a2d51ed63cf8f0efbbc304e79375f
|
6aff904a5c464c3a7437a1136420e4aa10488456
|
/chainer_/chainercv2/models/common.py
|
7327ca12fb9c606a89b3b28f528138b243fcf0b5
|
[
"MIT"
] |
permissive
|
chenjun2hao/imgclsmob
|
8c845ce108f5c58c2be708012e6d0f5d0b0c7548
|
e6dcf77bf9b5505946befec18f5c54c244f70304
|
refs/heads/master
| 2020-06-16T05:40:10.847813 | 2019-07-05T21:05:36 | 2019-07-05T21:05:36 | 195,492,889 | 1 | 0 |
MIT
| 2019-07-06T03:26:20 | 2019-07-06T03:26:20 | null |
UTF-8
|
Python
| false | false | 32,448 |
py
|
"""
Common routines for models in Chainer.
"""
__all__ = ['ReLU6', 'HSwish', 'GlobalAvgPool2D', 'conv1x1', 'conv3x3', 'depthwise_conv3x3', 'ConvBlock',
'conv1x1_block', 'conv3x3_block', 'conv7x7_block', 'dwconv3x3_block', 'dwconv5x5_block', 'PreConvBlock',
'pre_conv1x1_block', 'pre_conv3x3_block', 'ChannelShuffle', 'ChannelShuffle2', 'SEBlock', 'SimpleSequential',
'DualPathSequential', 'Concurrent', 'ParametricSequential', 'ParametricConcurrent', 'Hourglass',
'SesquialteralHourglass', 'MultiOutputSequential', 'Flatten', 'AdaptiveAvgPool2D']
from inspect import isfunction
from chainer import Chain
import chainer.functions as F
import chainer.links as L
class ReLU6(Chain):
"""
ReLU6 activation layer.
"""
def __call__(self, x):
return F.clip(x, 0.0, 6.0)
class Swish(Chain):
"""
Swish activation function from 'Searching for Activation Functions,' https://arxiv.org/abs/1710.05941.
"""
def __call__(self, x):
return x * F.sigmoid(x)
class HSigmoid(Chain):
"""
Approximated sigmoid function, so-called hard-version of sigmoid from 'Searching for MobileNetV3,'
https://arxiv.org/abs/1905.02244.
"""
def __call__(self, x):
return F.clip(x + 3.0, 0.0, 6.0) / 6.0
class HSwish(Chain):
"""
H-Swish activation function from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
"""
def __call__(self, x):
return x * F.clip(x + 3.0, 0.0, 6.0) / 6.0
def get_activation_layer(activation):
"""
Create activation layer from string/function.
Parameters:
----------
activation : function or str
Activation function or name of activation function.
Returns
-------
function
Activation layer.
"""
assert (activation is not None)
if isfunction(activation):
return activation()
elif isinstance(activation, str):
if activation == "relu":
return F.relu
elif activation == "relu6":
return ReLU6()
elif activation == "swish":
return Swish()
# return partial(
# F.swish,
# beta=[1.0])
elif activation == "hswish":
return HSwish()
else:
raise NotImplementedError()
else:
return activation
class GlobalAvgPool2D(Chain):
"""
Global average pooling operation for spatial data.
"""
def __call__(self, x):
return F.average_pooling_2d(x, ksize=x.shape[2:])
def conv1x1(in_channels,
out_channels,
stride=1,
groups=1,
use_bias=False):
"""
Convolution 1x1 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
"""
return L.Convolution2D(
in_channels=in_channels,
out_channels=out_channels,
ksize=1,
stride=stride,
nobias=(not use_bias),
groups=groups)
def conv3x3(in_channels,
out_channels,
stride=1,
pad=1,
dilate=1,
groups=1,
use_bias=False):
"""
Convolution 3x3 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
pad : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilate : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
"""
return L.Convolution2D(
in_channels=in_channels,
out_channels=out_channels,
ksize=3,
stride=stride,
pad=pad,
nobias=(not use_bias),
dilate=dilate,
groups=groups)
def depthwise_conv3x3(channels,
stride):
"""
Depthwise convolution 3x3 layer.
Parameters:
----------
channels : int
Number of input/output channels.
stride : int or tuple/list of 2 int
Stride of the convolution.
"""
return L.Convolution2D(
in_channels=channels,
out_channels=channels,
ksize=3,
stride=stride,
pad=1,
nobias=True,
groups=channels)
class ConvBlock(Chain):
"""
Standard convolution block with Batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
ksize : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Stride of the convolution.
pad : int or tuple/list of 2 int
Padding value for convolution layer.
dilate : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default F.relu
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
ksize,
stride,
pad,
dilate=1,
groups=1,
use_bias=False,
bn_eps=1e-5,
activation=(lambda: F.relu)):
super(ConvBlock, self).__init__()
self.activate = (activation is not None)
with self.init_scope():
self.conv = L.Convolution2D(
in_channels=in_channels,
out_channels=out_channels,
ksize=ksize,
stride=stride,
pad=pad,
nobias=(not use_bias),
dilate=dilate,
groups=groups)
self.bn = L.BatchNormalization(
size=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def __call__(self, x):
x = self.conv(x)
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def conv1x1_block(in_channels,
out_channels,
stride=1,
groups=1,
use_bias=False,
bn_eps=1e-5,
activation=(lambda: F.relu)):
"""
1x1 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default F.relu
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
ksize=1,
stride=stride,
pad=0,
groups=groups,
use_bias=use_bias,
bn_eps=bn_eps,
activation=activation)
def conv3x3_block(in_channels,
out_channels,
stride=1,
pad=1,
dilate=1,
groups=1,
use_bias=False,
bn_eps=1e-5,
activation=(lambda: F.relu)):
"""
3x3 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
pad : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilate : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default F.relu
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
ksize=3,
stride=stride,
pad=pad,
dilate=dilate,
groups=groups,
use_bias=use_bias,
bn_eps=bn_eps,
activation=activation)
def conv5x5_block(in_channels,
out_channels,
stride=1,
pad=2,
dilate=1,
groups=1,
use_bias=False,
bn_eps=1e-5,
activation=(lambda: F.relu)):
"""
5x5 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
pad : int or tuple/list of 2 int, default 2
Padding value for convolution layer.
dilate : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default F.relu
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
ksize=5,
stride=stride,
pad=pad,
dilate=dilate,
groups=groups,
use_bias=use_bias,
bn_eps=bn_eps,
activation=activation)
def conv7x7_block(in_channels,
out_channels,
stride=1,
pad=3,
use_bias=False,
activation=(lambda: F.relu)):
"""
7x7 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
pad : int or tuple/list of 2 int, default 3
Padding value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
activation : function or str or None, default F.relu
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
ksize=7,
stride=stride,
pad=pad,
use_bias=use_bias,
activation=activation)
def dwconv3x3_block(in_channels,
out_channels,
stride=1,
pad=1,
dilate=1,
use_bias=False,
bn_eps=1e-5,
activation=(lambda: F.relu)):
"""
3x3 depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
pad : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilate : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default F.relu
Activation function or name of activation function.
"""
return conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
pad=pad,
dilate=dilate,
groups=out_channels,
use_bias=use_bias,
bn_eps=bn_eps,
activation=activation)
def dwconv5x5_block(in_channels,
out_channels,
stride=1,
pad=2,
dilate=1,
use_bias=False,
bn_eps=1e-5,
activation=(lambda: F.relu)):
"""
5x5 depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
pad : int or tuple/list of 2 int, default 2
Padding value for convolution layer.
dilate : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default F.relu
Activation function or name of activation function.
"""
return conv5x5_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
pad=pad,
dilate=dilate,
groups=out_channels,
use_bias=use_bias,
bn_eps=bn_eps,
activation=activation)
class PreConvBlock(Chain):
"""
Convolution block with Batch normalization and ReLU pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
ksize : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Stride of the convolution.
pad : int or tuple/list of 2 int
Padding value for convolution layer.
dilate : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
ksize,
stride,
pad,
dilate=1,
use_bias=False,
return_preact=False,
activate=True):
super(PreConvBlock, self).__init__()
self.return_preact = return_preact
self.activate = activate
with self.init_scope():
self.bn = L.BatchNormalization(
size=in_channels,
eps=1e-5)
if self.activate:
self.activ = F.relu
self.conv = L.Convolution2D(
in_channels=in_channels,
out_channels=out_channels,
ksize=ksize,
stride=stride,
pad=pad,
nobias=(not use_bias),
dilate=dilate)
def __call__(self, x):
x = self.bn(x)
if self.activate:
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def pre_conv1x1_block(in_channels,
out_channels,
stride=1,
use_bias=False,
return_preact=False,
activate=True):
"""
1x1 version of the pre-activated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
"""
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
ksize=1,
stride=stride,
pad=0,
use_bias=use_bias,
return_preact=return_preact,
activate=activate)
def pre_conv3x3_block(in_channels,
out_channels,
stride=1,
pad=1,
dilate=1,
return_preact=False,
activate=True):
"""
3x3 version of the pre-activated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
pad : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilate : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
"""
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
ksize=3,
stride=stride,
pad=pad,
dilate=dilate,
return_preact=return_preact,
activate=activate)
def channel_shuffle(x,
groups):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
x : chainer.Variable or numpy.ndarray or cupy.ndarray
Input variable.
groups : int
Number of groups.
Returns
-------
chainer.Variable or numpy.ndarray or cupy.ndarray
Resulted variable.
"""
batch, channels, height, width = x.shape
channels_per_group = channels // groups
x = F.reshape(x, shape=(batch, groups, channels_per_group, height, width))
x = F.swapaxes(x, axis1=1, axis2=2)
x = F.reshape(x, shape=(batch, channels, height, width))
return x
class ChannelShuffle(Chain):
"""
Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups):
super(ChannelShuffle, self).__init__()
assert (channels % groups == 0)
self.groups = groups
def __call__(self, x):
return channel_shuffle(x, self.groups)
def channel_shuffle2(x,
groups):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083. The alternative version.
Parameters:
----------
x : chainer.Variable or numpy.ndarray or cupy.ndarray
Input variable.
groups : int
Number of groups.
Returns
-------
chainer.Variable or numpy.ndarray or cupy.ndarray
Resulted variable.
"""
batch, channels, height, width = x.shape
channels_per_group = channels // groups
x = F.reshape(x, shape=(batch, channels_per_group, groups, height, width))
x = F.swapaxes(x, axis1=1, axis2=2)
x = F.reshape(x, shape=(batch, channels, height, width))
return x
class ChannelShuffle2(Chain):
"""
Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups.
The alternative version.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups):
super(ChannelShuffle2, self).__init__()
assert (channels % groups == 0)
self.groups = groups
def __call__(self, x):
return channel_shuffle2(x, self.groups)
class SEBlock(Chain):
"""
Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : int
Number of channels.
reduction : int, default 16
Squeeze reduction value.
approx_sigmoid : bool, default False
Whether to use approximated sigmoid function.
activation : function or str, default F.relu
Activation function or name of activation function.
"""
def __init__(self,
channels,
reduction=16,
approx_sigmoid=False,
activation=(lambda: F.relu)):
super(SEBlock, self).__init__()
mid_cannels = channels // reduction
with self.init_scope():
self.conv1 = conv1x1(
in_channels=channels,
out_channels=mid_cannels,
use_bias=True)
self.activ = get_activation_layer(activation)
self.conv2 = conv1x1(
in_channels=mid_cannels,
out_channels=channels,
use_bias=True)
self.sigmoid = HSigmoid() if approx_sigmoid else F.sigmoid
def __call__(self, x):
w = F.average_pooling_2d(x, ksize=x.shape[2:])
w = self.conv1(w)
w = self.activ(w)
w = self.conv2(w)
w = self.sigmoid(w)
x = x * w
return x
class SimpleSequential(Chain):
"""
A sequential chain that can be used instead of Sequential.
"""
def __init__(self):
super(SimpleSequential, self).__init__()
self.layer_names = []
def __setattr__(self, name, value):
super(SimpleSequential, self).__setattr__(name, value)
if self.within_init_scope and callable(value):
self.layer_names.append(name)
def __delattr__(self, name):
super(SimpleSequential, self).__delattr__(name)
try:
self.layer_names.remove(name)
except ValueError:
pass
def __len__(self):
return len(self.layer_names)
def __call__(self, x):
for name in self.layer_names:
x = self[name](x)
return x
class DualPathSequential(SimpleSequential):
"""
A sequential container for blocks with dual inputs/outputs.
Blocks will be executed in the order they are added.
Parameters:
----------
return_two : bool, default True
Whether to return two output after execution.
first_ordinals : int, default 0
Number of the first blocks with single input/output.
last_ordinals : int, default 0
Number of the final blocks with single input/output.
dual_path_scheme : function
Scheme of dual path response for a block.
dual_path_scheme_ordinal : function
Scheme of dual path response for an ordinal block.
"""
def __init__(self,
return_two=True,
first_ordinals=0,
last_ordinals=0,
dual_path_scheme=(lambda block, x1, x2: block(x1, x2)),
dual_path_scheme_ordinal=(lambda block, x1, x2: (block(x1), x2))):
super(DualPathSequential, self).__init__()
self.return_two = return_two
self.first_ordinals = first_ordinals
self.last_ordinals = last_ordinals
self.dual_path_scheme = dual_path_scheme
self.dual_path_scheme_ordinal = dual_path_scheme_ordinal
def __call__(self, x1, x2=None):
length = len(self.layer_names)
for i, block_name in enumerate(self.layer_names):
block = self[block_name]
if (i < self.first_ordinals) or (i >= length - self.last_ordinals):
x1, x2 = self.dual_path_scheme_ordinal(block, x1, x2)
else:
x1, x2 = self.dual_path_scheme(block, x1, x2)
if self.return_two:
return x1, x2
else:
return x1
class Concurrent(SimpleSequential):
"""
A container for concatenation of modules on the base of the sequential container.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
stack : bool, default False
Whether to concatenate tensors along a new dimension.
"""
def __init__(self,
axis=1,
stack=False):
super(Concurrent, self).__init__()
self.axis = axis
self.stack = stack
def __call__(self, x):
out = []
for name in self.layer_names:
out.append(self[name](x))
if self.stack:
out = F.stack(tuple(out), axis=self.axis)
else:
out = F.concat(tuple(out), axis=self.axis)
return out
class ParametricSequential(SimpleSequential):
"""
A sequential container for modules with parameters.
Blocks will be executed in the order they are added.
"""
def __init__(self):
super(ParametricSequential, self).__init__()
def __call__(self, x, **kwargs):
for name in self.layer_names:
x = self[name](x, **kwargs)
return x
class ParametricConcurrent(SimpleSequential):
"""
A container for concatenation of modules on the base of the sequential container.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
"""
def __init__(self, axis=1):
super(ParametricConcurrent, self).__init__()
self.axis = axis
def __call__(self, x, **kwargs):
out = []
for name in self.layer_names:
out.append(self[name](x, **kwargs))
out = F.concat(tuple(out), axis=self.axis)
return out
class Hourglass(Chain):
"""
A hourglass block.
Parameters:
----------
down_seq : SimpleSequential
Down modules as sequential.
up_seq : SimpleSequential
Up modules as sequential.
skip_seq : SimpleSequential
Skip connection modules as sequential.
merge_type : str, default 'add'
Type of concatenation of up and skip outputs.
return_first_skip : bool, default False
Whether return the first skip connection output. Used in ResAttNet.
"""
def __init__(self,
down_seq,
up_seq,
skip_seq,
merge_type="add",
return_first_skip=False):
super(Hourglass, self).__init__()
assert (len(up_seq) == len(down_seq))
assert (len(skip_seq) == len(down_seq))
assert (merge_type in ["add"])
self.merge_type = merge_type
self.return_first_skip = return_first_skip
self.depth = len(down_seq)
with self.init_scope():
self.down_seq = down_seq
self.up_seq = up_seq
self.skip_seq = skip_seq
def __call__(self, x):
y = None
down_outs = [x]
for down_module_name in self.down_seq.layer_names:
down_module = self.down_seq[down_module_name]
x = down_module(x)
down_outs.append(x)
for i in range(len(down_outs)):
if i != 0:
y = down_outs[self.depth - i]
skip_module_name = self.skip_seq.layer_names[self.depth - i]
skip_module = self.skip_seq[skip_module_name]
y = skip_module(y)
if (y is not None) and (self.merge_type == "add"):
x = x + y
if i != len(down_outs) - 1:
up_module_name = self.up_seq.layer_names[self.depth - 1 - i]
up_module = self.up_seq[up_module_name]
x = up_module(x)
if self.return_first_skip:
return x, y
else:
return x
class SesquialteralHourglass(Chain):
"""
A sesquialteral hourglass block.
Parameters:
----------
down1_seq : SimpleSequential
The first down modules as sequential.
skip1_seq : SimpleSequential
The first skip connection modules as sequential.
up_seq : SimpleSequential
Up modules as sequential.
skip2_seq : SimpleSequential
The second skip connection modules as sequential.
down2_seq : SimpleSequential
The second down modules as sequential.
merge_type : str, default 'con'
Type of concatenation of up and skip outputs.
"""
def __init__(self,
down1_seq,
skip1_seq,
up_seq,
skip2_seq,
down2_seq,
merge_type="cat"):
super(SesquialteralHourglass, self).__init__()
assert (len(down1_seq) == len(up_seq))
assert (len(down1_seq) == len(down2_seq))
assert (len(skip1_seq) == len(skip2_seq))
assert (len(down1_seq) == len(skip1_seq) - 1)
assert (merge_type in ["cat", "add"])
self.merge_type = merge_type
self.depth = len(down1_seq)
with self.init_scope():
self.down1_seq = down1_seq
self.skip1_seq = skip1_seq
self.up_seq = up_seq
self.skip2_seq = skip2_seq
self.down2_seq = down2_seq
def _merge(self, x, y):
if y is not None:
if self.merge_type == "cat":
x = F.concat((x, y), axis=1)
elif self.merge_type == "add":
x = x + y
return x
def __call__(self, x):
y = self.skip1_seq[self.skip1_seq.layer_names[0]](x)
skip1_outs = [y]
for i in range(self.depth):
x = self.down1_seq[self.down1_seq.layer_names[i]](x)
y = self.skip1_seq[self.skip1_seq.layer_names[i + 1]](x)
skip1_outs.append(y)
x = skip1_outs[self.depth]
y = self.skip2_seq[self.skip2_seq.layer_names[0]](x)
skip2_outs = [y]
for i in range(self.depth):
x = self.up_seq[self.up_seq.layer_names[i]](x)
y = skip1_outs[self.depth - 1 - i]
x = self._merge(x, y)
y = self.skip2_seq[self.skip2_seq.layer_names[i + 1]](x)
skip2_outs.append(y)
x = self.skip2_seq[self.skip2_seq.layer_names[self.depth]](x)
for i in range(self.depth):
x = self.down2_seq[self.down2_seq.layer_names[i]](x)
y = skip2_outs[self.depth - 1 - i]
x = self._merge(x, y)
return x
class MultiOutputSequential(SimpleSequential):
"""
A sequential container with multiple outputs.
Blocks will be executed in the order they are added.
"""
def __init__(self):
super(MultiOutputSequential, self).__init__()
def __call__(self, x):
outs = []
for name in self.layer_names:
block = self[name]
x = block(x)
if hasattr(block, "do_output") and block.do_output:
outs.append(x)
return [x] + outs
class Flatten(Chain):
"""
Simple flatten block.
"""
def __call__(self, x):
return x.reshape(x.shape[0], -1)
class AdaptiveAvgPool2D(Chain):
"""
Simple adaptive average pooling block.
"""
def __call__(self, x):
return F.average_pooling_2d(x, ksize=x.shape[2:])
|
[
"[email protected]"
] | |
30f827f6ddccc3e1c29f529fc940673d80998089
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Lazymux/sqlmap/lib/core/optiondict.py
|
28b61b85b21e4eae996bc3bcfdf792f120d171bf
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null |
UTF-8
|
Python
| false | false | 129 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:d81f4b1eec17cebb5bc92268f0648acad5c4739e63d3fe400f570e68878dc011
size 7119
|
[
"[email protected]"
] | |
229f24e48680f186185ca451cd5de90fd1dd6eda
|
0df73a877fd521b5e0ab95bb261751c87f1f4b39
|
/Scripts/getBranchGroupFnPlanning.py
|
39cf13426b15ffa606a9f6bd92b2b1dea52ecc56
|
[] |
no_license
|
bikiranguha/Bus-Map
|
6bcb907c257e2dc4fcc47dd27772159b51fa2b08
|
8ef96e9027e3abb953834bd76981bcc689ef5250
|
refs/heads/master
| 2020-03-08T06:56:55.703128 | 2018-07-15T17:51:12 | 2018-07-15T17:51:12 | 127,983,097 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,868 |
py
|
"""
Function to generate a dictionary which will contain a list of ties of the bus.
Bus will not be present in keys if no ties connected to it
"""
import math
from Queue import Queue
#import sys
#sys.path.insert(0,'C:/Users/Bikiran/Google Drive/Bus Mapping Project Original/Donut Hole Approach/Donut Hole v2')
#from getBusDataFn import getBusData
highImpedanceTieList = []
def makeBranchGroups(planningRaw):
BranchGroupDict = {}
#BranchGroupList = []
with open(planningRaw,'r') as f:
filecontent = f.read()
fileLines = filecontent.split('\n')
branchStartIndex = fileLines.index('0 / END OF GENERATOR DATA, BEGIN BRANCH DATA')+1
branchEndIndex = fileLines.index('0 / END OF BRANCH DATA, BEGIN TRANSFORMER DATA')
#BusDataDict = getBusData(Raw)
for i in range(branchStartIndex,branchEndIndex): # search through branch data
words = fileLines[i].split(',')
#BranchCode = words[2].strip()
R = float(words[3].strip())
X = float(words[4].strip())
Z = math.sqrt(R**2 + X**2)
status = words[-5].strip()
if Z <= 2e-4 and status == '1':
Bus1 = words[0].strip()
Bus2 = words[1].strip()
"""
#check whether all lines with ckt id == '99' are just ties
Bus1Area = BusDataDict[Bus1].area
Bus2Area = BusDataDict[Bus2].area
if Z > 4e-6 and Bus1Area == '222' and Bus2Area == '222':
highImpedanceTieList.append(fileLines[i])
"""
if Bus1 not in BranchGroupDict.keys():
BranchGroupDict[Bus1] = set()
BranchGroupDict[Bus1].add(Bus2)
if Bus2 not in BranchGroupDict.keys():
BranchGroupDict[Bus2] = set()
BranchGroupDict[Bus2].add(Bus1)
# get complete bus groups
CompleteBranchGroupDict = {} # each bus has the full bus group as a set
for Bus in BranchGroupDict.keys(): # scan each key and generates a full bus group set
if Bus in CompleteBranchGroupDict.keys(): # Bus already has the group, so skip
continue
frontier = Queue(maxsize=0)
frontier.put(Bus)
BusGroup = set()
# do something similar to BFS
while not frontier.empty():
currentBus = frontier.get()
frontier.task_done()
BusGroup.add(currentBus)
ties = BranchGroupDict[currentBus]
for tie in ties:
if tie not in BusGroup:
frontier.put(tie)
BusGroup.add(tie)
####
for t in list(BusGroup):
CompleteBranchGroupDict[t] = BusGroup
return CompleteBranchGroupDict
if __name__ == "__main__":
planningRaw = 'hls18v1dyn_1219.raw'
BranchGroupDict = makeBranchGroups(planningRaw)
"""
while True:
searchTerm = raw_input('Enter bus number whose list of ties you are looking for: ')
if searchTerm in BranchGroupDict.keys():
for Bus in list(BranchGroupDict[searchTerm.strip()]):
print Bus
else:
print 'Bus has no ties'
"""
"""
with open('tmp.txt','w') as f:
for line in highImpedanceTieList:
f.write(line)
f.write('\n')
"""
|
[
"Bikiran Guha"
] |
Bikiran Guha
|
cb696f10d6758f10de4b5722c710e854e06b2176
|
6ef3fc3ffa5f33e6403cb7cb0c30a35623a52d0d
|
/samples/generated_samples/vision_v1p3beta1_generated_product_search_delete_product_set_sync.py
|
5fdcd34e44b44a9d64020ff62b4d50001492f599
|
[
"Apache-2.0"
] |
permissive
|
vam-google/python-vision
|
61405506e3992ab89e6a454e4dda9b05fe2571f2
|
09e969fa30514d8a6bb95b576c1a2ae2c1e11d54
|
refs/heads/master
| 2022-08-15T08:40:35.999002 | 2022-07-18T16:04:35 | 2022-07-18T16:04:35 | 254,789,106 | 0 | 0 |
Apache-2.0
| 2020-04-11T03:59:02 | 2020-04-11T03:59:01 | null |
UTF-8
|
Python
| false | false | 1,432 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteProductSet
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-vision
# [START vision_v1p3beta1_generated_ProductSearch_DeleteProductSet_sync]
from google.cloud import vision_v1p3beta1
def sample_delete_product_set():
# Create a client
client = vision_v1p3beta1.ProductSearchClient()
# Initialize request argument(s)
request = vision_v1p3beta1.DeleteProductSetRequest(
name="name_value",
)
# Make the request
client.delete_product_set(request=request)
# [END vision_v1p3beta1_generated_ProductSearch_DeleteProductSet_sync]
|
[
"[email protected]"
] | |
86896efeee052d4f2180465e41b53bf987d9a329
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4137/codes/1575_1333.py
|
2ce3117fcd60bfc1504426b60b1700c097748847
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 93 |
py
|
expressao = (30-(3 ** 2))+(8//3 ** 2) * 10
print((30 - ((3) ** 2)) + (8 // (3) ** 2)* 10 )
|
[
"[email protected]"
] | |
9bcfd3012eec0d28ee28b8ed6b7c79101f1c6b83
|
14a6cacd31c10bb582f8475388f8be1477bea4bc
|
/venv/bin/pip
|
eb26c24b06cec8348bef98595c55150991cad508
|
[] |
no_license
|
YixingLuo/deeplearning
|
2760907039d6cf29c4609c2a82f79cd1b8c4a5ce
|
6c2f712faac345547e8e50560640f61ac9423fd6
|
refs/heads/master
| 2020-04-30T14:29:34.784349 | 2019-03-22T00:48:02 | 2019-03-22T00:48:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 263 |
#!/Users/luoyixing/PycharmProjects/Deeplearning-2/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
1c88935573d4ec33c04ab459f2367009017d9a8e
|
872cd13f25621825db0c598268ecd21b49cc2c79
|
/Lesson_15/client/jim/constants.py
|
9177b988ee7dc22ebac20bffc54f392cdef79639
|
[] |
no_license
|
ss2576/client_server_applications_Python
|
c4e9ebe195d23c8ca73211894aa50a74014013d5
|
9b599e37e5dae5af3dca06e197916944f12129d5
|
refs/heads/master
| 2022-12-15T10:40:22.935880 | 2020-08-12T11:02:21 | 2020-08-12T11:02:21 | 271,764,749 | 0 | 0 | null | 2020-06-12T10:05:00 | 2020-06-12T09:52:03 |
Python
|
UTF-8
|
Python
| false | false | 513 |
py
|
""" Module of constants used in jim protocol """
TYPE = 'type'
REQUEST = 'request'
RESPONSE = 'response'
ACTION = 'action'
TIME = 'time'
BODY = 'body'
CODE = 'code'
MESSAGE = 'message'
USERNAME = 'username'
PASSWORD = 'password'
SENDER = 'sender'
TO = 'to'
TEXT = 'text'
class RequestAction:
""" Class the storage of request actions """
PRESENCE = 'presence'
AUTH = 'auth'
MESSAGE = 'msg'
QUIT = 'quit'
COMMAND = 'command'
START_CHAT = 'start_chat'
ACCEPT_CHAT = 'accept_chat'
|
[
"[email protected]"
] | |
3ea54c81060a1f933135e5577dde53919207f182
|
286b6dc56323f982092ffafbfac8a32dbbaeb7ef
|
/training_assignments/SandipBarde/SandipBarde_day_5_assignment/exception_02.py
|
99842d6af8c79e12aa1ef5f2077b2c9dc80d9a8e
|
[] |
no_license
|
learndevops19/pythonTraining-CalsoftInc
|
ccee0d90aadc00bfdb17f9578620f6bf92f80a4c
|
c5f61516b835339b394876edd1c6f62e7cc6f0c3
|
refs/heads/master
| 2021-02-05T04:27:17.590913 | 2019-11-20T17:27:06 | 2019-11-20T17:27:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 430 |
py
|
class NegativePriceException(Exception):
def __init__(self, msg):
self.message = msg
if __name__ == "__main__":
price = float(input("Enter the price\n"))
try:
if(price < 0):
raise NegativePriceException("Inside the Exception:- Price is less than zero.")
else:
print("Execution completed successfully.")
except NegativePriceException as e:
print(e.message)
|
[
"[email protected]"
] | |
2844c12c08b01fdbe4ddf201e376874c8b13a2d0
|
01f2986123256d03d731303daa68b742ea4fe23d
|
/Второй максимум.py
|
bbc8490544693dbda3ae5d4a80a48a3842bd4ba5
|
[] |
no_license
|
mayhem215/Python
|
65e05176de50b1f589ca991ac5d9f03b4ca00fa2
|
f974d89d52a5aa8553151ea15a8b62e7c7c07cf5
|
refs/heads/master
| 2020-04-08T11:03:20.372914 | 2018-11-27T07:07:09 | 2018-11-27T07:07:09 | 159,291,502 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 200 |
py
|
m1 = int(input())
m2 = int(input())
if m1 < m2:
m1, m2 = m2, m1
n = int(input())
while n != 0:
if n > m1:
m2, m1 = m1, m2
elif n > m2:
m2 = n
n = int(input())
print(m2)
|
[
"[email protected]"
] | |
7234eeb0580a75054d7eeaf9b5af157f790f9236
|
0567517ff7c0366b58e52d7fa96b651e97af5d82
|
/apps/cmdb/views_old.py
|
704a236968e584c5c511aea688b5b2ee231ad001
|
[] |
no_license
|
m6ttl/smartpipes
|
fdb9976b11d6c520953c240872d2574b1a69ec55
|
2d5846143dbf7b44c36491dd1787c36ebbe4fe0d
|
refs/heads/master
| 2022-12-09T10:46:38.594820 | 2020-03-09T13:01:07 | 2020-03-09T13:01:07 | 246,028,233 | 0 | 0 | null | 2022-12-08T03:46:02 | 2020-03-09T12:20:32 |
HTML
|
UTF-8
|
Python
| false | false | 12,215 |
py
|
from __future__ import unicode_literals
# encoding: utf-8
import math
from django.shortcuts import render
from django.shortcuts import HttpResponse
from django.http import HttpResponse
from django.template import loader
from pyecharts import Line3D
from pyecharts import Geo
from pyecharts import GeoLines, Style
from pyecharts import Map
from pyecharts import Gauge
from pyecharts import Bar, Line, Scatter, EffectScatter, Grid, Kline
# Create your views here.
#
# def index(request):
# # request.POST
# # requst.GET
#
# return HttpResponse("My test -- steve_wei !")
REMOTE_HOST = "https://pyecharts.github.io/assets/js"
def echart1(request):
template = loader.get_template('myfirstvis/pyecharts.html')
#
# # value = [20, 190, 253, 77, 65]
# # attr = ['汕头市', '汕尾市', '揭阳市', '阳江市', '肇庆市']
# # map = Map("广东地图示例", width=1200, height=600)
# # map.add("", attr, value, maptype='广东', is_visualmap=True,
# # visual_text_color='#000')
# #map.render()
# # context = dict(
# # myechart=map.render_embed(),
# # host=REMOTE_HOST,
# # script_list=map.get_js_dependencies()
# # )
#
# # gauge = Gauge("仪表盘示例")
# # gauge.add("业务指标", "完成率", 66.66)
# # gauge.show_config()
#
line = Line("折线图示例", width=1200, height=700)
attr = ['周一', '周二', '周三', '周四', '周五', '周六', '周日']
line.add("最高气温", attr, [11, 11, 15, 13, 12, 13, 10],
mark_point=["max", "min"], mark_line=["average"])
line.add("最低气温", attr, [1, -2, 2, 5, 3, 2, 0],
mark_point=["max", "min"], legend_top="50%", mark_line=["average"],
# 设置 dataZoom 控制索引为 0,1 的 x 轴,即第一个和第二个
is_datazoom_show=True, datazoom_xaxis_index=[0, 1])
v1 = [[2320.26, 2320.26, 2287.3, 2362.94],
[2300, 2291.3, 2288.26, 2308.38],
[2295.35, 2346.5, 2295.35, 2345.92],
[2347.22, 2358.98, 2337.35, 2363.8],
[2360.75, 2382.48, 2347.89, 2383.76],
[2383.43, 2385.42, 2371.23, 2391.82],
[2377.41, 2419.02, 2369.57, 2421.15],
[2425.92, 2428.15, 2417.58, 2440.38],
[2411, 2433.13, 2403.3, 2437.42],
[2432.68, 2334.48, 2427.7, 2441.73],
[2430.69, 2418.53, 2394.22, 2433.89],
[2416.62, 2432.4, 2414.4, 2443.03],
[2441.91, 2421.56, 2418.43, 2444.8],
[2420.26, 2382.91, 2373.53, 2427.07],
[2383.49, 2397.18, 2370.61, 2397.94],
[2378.82, 2325.95, 2309.17, 2378.82],
[2322.94, 2314.16, 2308.76, 2330.88],
[2320.62, 2325.82, 2315.01, 2338.78],
[2313.74, 2293.34, 2289.89, 2340.71],
[2297.77, 2313.22, 2292.03, 2324.63],
[2322.32, 2365.59, 2308.92, 2366.16],
[2364.54, 2359.51, 2330.86, 2369.65],
[2332.08, 2273.4, 2259.25, 2333.54],
[2274.81, 2326.31, 2270.1, 2328.14],
[2333.61, 2347.18, 2321.6, 2351.44],
[2340.44, 2324.29, 2304.27, 2352.02],
[2326.42, 2318.61, 2314.59, 2333.67],
[2314.68, 2310.59, 2296.58, 2320.96],
[2309.16, 2286.6, 2264.83, 2333.29],
[2282.17, 2263.97, 2253.25, 2286.33],
[2255.77, 2270.28, 2253.31, 2276.22]]
kline = Kline("K 线图示例", title_top="50%")
kline.add("日K", ["2017/7/{}".format(i + 1) for i in range(31)],
v1, is_datazoom_show=True)
grid = Grid()
grid.add(line, grid_top="60%")
grid.add(kline, grid_bottom="60%")
# grid.render()
context = dict(
myechart=grid.render_embed(),
host=REMOTE_HOST,
script_list=grid.get_js_dependencies()
)
# # value = [155, 10, 66, 78]
# # attr = ["福建", "山东", "北京", "上海"]
# # map = Map("全国地图示例", width=1200, height=600)
# # map.add("", attr, value, maptype='china',
# # is_visualmap=True, is_piecewise=True,
# # visual_text_color="#000",
# # visual_range_text=["", ""],
# # pieces=[
# # {"max": 160, "min": 70, "label": "高数值"},
# # {"max": 69, "min": 0, "label": "低数值"},
# # ])
# #
# # context = dict(
# # myechart=map.render_embed(),
# # host=REMOTE_HOST,
# # script_list=map.get_js_dependencies()
#
# style = Style(
# title_top="#fff",
# title_pos="center",
# width=1200,
# height=600,
# background_color="#404a59"
# )
#
# data_guangzhou = [
# ["广州", "上海"],
# ["广州", "北京"],
# ["广州", "南京"],
# ["广州", "重庆"],
# ["广州", "兰州"],
# ["广州", "杭州"]
# ]
# geolines = GeoLines("GeoLines 示例", **style.init_style)
# geolines.add("从广州出发", data_guangzhou, is_legend_show=False)
#
# context = dict(
# myechart=geolines.render_embed(),
# host=REMOTE_HOST,
# script_list=geolines.get_js_dependencies()
# )
return HttpResponse(template.render(context, request))
# def echart1(request):
# # gauge = Gauge("仪表盘示例")
# # gauge.add("业务指标", "完成率", 66.66)
# # gauge.show_config()
# template = loader.get_template('myfirstvis/pyecharts.html')
# data = [
# ('北京', 28), ('天津', 29), ('石家庄', 29), ('太原', 34), ('呼和浩特', 27), ('哈尔滨', 31), ('长春', 29), ('沈阳', 30), ('上海', 40),
# ('合肥', 40), ('南京', 40), ('济南', 35), ('青岛', 33), ('杭州', 40), ('福州', 37), ('厦门', 34), ('南昌', 39), ('武汉', 38),
# ('长沙', 39),
# ('郑州', 37), ('南宁', 36), ('广州', 33), ('深圳', 28), ('珠海', 30), ('海口', 30), ('三亚', 31), ('西安', 39), ('兰州', 32),
# ('乌鲁木齐', 31),
# ('西宁', 27), ('银川', 30), ('成都', 32), ('重庆', 38), ('贵阳', 29), ('昆明', 22), ('拉萨', 23), ('香港', 30), ('澳门', 30), ]
# #
# # geo = Geo("7月23日全国主要城市最高气温", "数据源自中国天气网", title_color="#000000", title_pos="center",
# # width=1200, height=600, background_color='#FFFFFF')
# # attr, value = geo.cast(data)
# # geo.add("", attr, value, visual_range=[20, 40], visual_text_color="#000000", symbol_size=15, is_visualmap=True)
# # geo.show_config()
#
# geo = Geo("全国主要城市空气质量", "data from pm2.5", title_color="#fff",
# title_pos="center", width=1200,
# height=600, background_color='#404a59')
# attr, value = geo.cast(data)
# geo.add("", attr, value, type="heatmap", is_visualmap=True, visual_range=[0, 300],
# visual_text_color='#fff')
#
# # data = [
# # ('汕头市', 50), ('汕尾市', 60), ('揭阳市', 35),
# # ('阳江市', 44), ('肇庆市', 72)
# # ]
# # geo = Geo("广东城市空气质量", "data from pm2.5", title_color="#fff",
# # title_pos="center", width=1200,
# # height=600, background_color='#404a59')
# # attr, value = geo.cast(data)
# # geo.add("", attr, value, maptype='广东', type="effectScatter",
# # is_random=True, effect_scale=5, is_legend_show=False)
# # # geo.show_config()
# context = dict(
# myechart=geo.render_embed(),
# host=REMOTE_HOST,
# script_list=geo.get_js_dependencies()
# )
# return HttpResponse(template.render(context, request))
# def echart1(request):
#
# template = loader.get_template('myfirstvis/pyecharts.html')
# style = Style(
# title_top="#fff",
# title_pos="center",
# width=1200,
# height=600,
# background_color="#404a59"
# )
# style_geo = style.add(
# is_label_show=True,
# line_curve=0.2,
# line_opacity=0.6,
# legend_text_color="#eee",
# legend_pos="right",
# geo_effect_symbol="plane",
# geo_effect_symbolsize=15,
# label_color=['#a6c84c', '#ffa022', '#46bee9'],
# label_pos="right",
# label_formatter="{b}",
# label_text_color="#eee",
# )
# data_guangzhou = [
# ["广州", "上海"],
# ["广州", "北京"],
# ["广州", "南京"],
# ["广州", "重庆"],
# ["广州", "兰州"],
# ["广州", "杭州"]
# ]
# data_beijing = [
# ["北京", "上海"],
# ["北京", "广州"],
# ["北京", "南京"],
# ["北京", "重庆"],
# ["北京", "兰州"],
# ["北京", "杭州"]
# ]
# geolines1 = GeoLines("GeoLines 示例", **style.init_style)
# geolines1.add("从广州出发", data_guangzhou, **style_geo)
# geolines1.add("从北京出发", data_beijing, **style_geo)
#
# # geo.show_config()
# context = dict(
# myechart=geolines1.render_embed(),
# host=REMOTE_HOST,
# script_list=geolines1.get_js_dependencies()
# )
# return HttpResponse(template.render(context, request))
def index(request):
template = loader.get_template('myfirstvis/pyecharts.html')
l3d = line3d()
context = dict(
myechart=l3d.render_embed(),
host=REMOTE_HOST,
script_list=l3d.get_js_dependencies()
)
return HttpResponse(template.render(context, request))
# def map1():
# value = [20, 190, 253, 77, 65]
# attr = ['汕头市', '汕尾市', '揭阳市', '阳江市', '肇庆市']
# map = Map("广东地图示例", width=1200, height=600)
# map.add("", attr, value, maptype='广东', is_visualmap=True,
# visual_text_color='#000')
# #map.render()
# return map
def line3d():
_data = []
for t in range(0, 25000):
_t = t / 1000
x = (1 + 0.25 * math.cos(75 * _t)) * math.cos(_t)
y = (1 + 0.25 * math.cos(75 * _t)) * math.sin(_t)
z = _t + 2.0 * math.sin(75 * _t)
_data.append([x, y, z])
range_color = [
'#313695', '#4575b4', '#74add1', '#abd9e9', '#e0f3f8', '#ffffbf',
'#fee090', '#fdae61', '#f46d43', '#d73027', '#a50026']
line3d = Line3D("3D line plot demo", width=1200, height=600)
line3d.add("", _data, is_visualmap=True,
visual_range_color=range_color, visual_range=[0, 30],
is_grid3D_rotate=True, grid3D_rotate_speed=180)
return line3d
# def index(request):
# # request.POST
# # requst.GETtest
#
# return HttpResponse("My test test -- steve_wei !" )
# def test(request):
# # request.POST
# # requst.GETtest
#
# return HttpResponse("just test -- steve_wei !" )
def test(request):
# request.POST
# requst.GETtest
return HttpResponse("just test -- steve_wei !" )
#
# data = [
# ('北京',28),('天津',29),('石家庄',29),('太原',34),('呼和浩特',27),('哈尔滨',31),('长春',29),('沈阳',30),('上海',40),
# ('合肥',40),('南京',40),('济南',35),('青岛',33),('杭州',40),('福州',37),('厦门',34),('南昌',39),('武汉',38),('长沙',39),
# ('郑州',37),('南宁',36),('广州',33),('深圳',28),('珠海',30),('海口',30),('三亚',31),('西安',39),('兰州',32),('乌鲁木齐',31),
# ('西宁',27),('银川',30),('成都',32),('重庆',38),('贵阳',29),('昆明',22),('拉萨',23),('香港',30),('澳门',30),]
#
# geo = Geo("7月23日全国主要城市最高气温", "数据源自中国天气网", title_color="#000000", title_pos="center",
# width=1200, height=600, background_color='#FFFFFF')
# attr, value = geo.cast(data)
# geo.add("", attr, value, visual_range=[20, 40], visual_text_color="#000000", symbol_size=15,is_visualmap=True)
# geo.show_config()
# geo.render()
# return HttpResponse("just test -- steve_wei !" )
# return geo
|
[
"[email protected]"
] | |
4a71558e39cfe45057c00d2d00e55cb99ba434b8
|
c3a3ae45f6fb22bdb3c622498c7ff1c2c2732f6a
|
/day20/homework/s12bbs/bbs/views.py
|
7c19039a94703905939c5b42343c2ea255fe2444
|
[] |
no_license
|
huyuedong/S12
|
df6b56cf05bb9f9c4a6e54b6a5228f1715e20245
|
61aa6d91f4e70f87c9b4c4b1e2042d5eeb2e2c3d
|
refs/heads/master
| 2020-12-14T08:50:57.514965 | 2016-07-30T01:45:03 | 2016-07-30T01:45:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,285 |
py
|
from django.shortcuts import render, redirect, HttpResponse
from bbs import models
from django.db.models import F, Q, Count, Sum, aggregates
from bbs import forms
from bbs.bll import uploadfile_handler, comments_handler
from datetime import datetime, timezone
# Create your views here.
# 从数据库中找出所有set_as_top_menu=True的版块,并按照position_index排序
category_list = models.Category.objects.filter(set_as_top_menu=True).order_by("position_index")
# 首页
def index(request):
category_obj = models.Category.objects.get(position_index=1) # 找到第一个版块
article_list = models.Article.objects.filter(status='published') # 找到所有的发布的文章
return render(request, 'bbs/index.html', {
'category_list': category_list,
'article_list': article_list,
'category_obj': category_obj,
})
# 版块页面
def category(request, category_id):
category_obj = models.Category.objects.get(id=category_id)
if category_obj.position_index == 1: # 首页
article_list = models.Article.objects.filter(status='published')
else:
article_list = models.Article.objects.filter(category_id=category_obj.id, status='published')
return render(request, 'bbs/index.html', {
'category_list': category_list, # 顶部菜单
'category_obj': category_obj, # 版块对象
'article_list': article_list, # 文章列表
})
# 文章页面
def article_detail(request, article_id):
article_obj = models.Article.objects.get(id=article_id)
return render(request, "bbs/article_detail.html", {
"category_list": category_list,
"article_obj": article_obj,
})
# 评论提交
def post_comment(request):
if request.method == "POST":
new_comment_obj = models.Comment(
comment_type=request.POST.get("comment_type"),
parent_comment_id=request.POST.get("parent_comment_id", None),
article_id=request.POST.get("article_id"),
user_id=request.user.userprofile.id,
comment=request.POST.get("comment"),
)
new_comment_obj.save()
return HttpResponse("OK")
# 获取评论
def get_comments(request, article_id):
article_obj = models.Article.objects.get(id=article_id)
# comment_set = article_obj.comment_set.select_related().filter(comment_type=1) # 只取评论
comment_set = article_obj.comment_set.select_related()
comment_tree = comments_handler.build_comment_tree(comment_set)
html_str = comments_handler.render_comment_tree(comment_tree)
return HttpResponse(html_str)
def new_article(request):
if request.method == "POST":
article_form = forms.ArticleForm(request.POST, request.FILES) # 验证数据和文件
if article_form.is_valid(): # 使用form进行验证
form_data = article_form.cleaned_data
form_data["author_id"] = request.user.userprofile.id # 文章作者
form_data["pub_date"] = datetime.now(timezone.utc)
new_article_img_path = uploadfile_handler.uploadfile_handle(request)
form_data["head_img"] = new_article_img_path
new_article_obj = models.Article(**form_data) # 返回文章id
new_article_obj.save()
return render(request, "bbs/new_article.html", {"new_article_obj": new_article_obj})
else:
print(article_form.errors)
all_category_list = models.Category.objects.all()
return render(request, "bbs/new_article.html", {"category_list": all_category_list})
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.