blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
128e3552fba11da5d11b9eed232b4459e42a308a | 215f92fc89aec2bcdb5e1c111da5dbd5d48375ce | /01demo练习/thdemotwo.py | 9ee992accebdb5b730f35c46b89e5ffa3ca28a0e | [] | no_license | dugzzuli/kerasDemo | 2d5f2d9397ee29a116227fcd53eaf9198c0a582e | 877b9b0654ba48d265d44a3921dcbacaa0da68ed | refs/heads/master | 2021-01-20T11:39:27.068016 | 2017-03-05T09:47:06 | 2017-03-05T09:47:06 | 83,959,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | import tensorflow as tf
a=tf.placeholder(tf.float32)
b=tf.placeholder(tf.float32)
add=tf.add(a,b)
sess=tf.Session()
binding={a:1.5,b:2.5}
c=sess.run(add,feed_dict=binding)
print(c) | [
"[email protected]"
] | |
c525825e7355610203d2e7630a60c974092775c3 | 84cdb06106bd3ab0d953cb449bbb7601e012f843 | /AN_setGlobalFrame.py | 944312dd0bbd33220a5fac9b6f944546378cc88a | [] | no_license | chibi-ta/nuke | 73b956548bb1b885ecddcc3473c983c5eb523f0e | 10f3f610c8559833dc2082b53d02c4c462ac299e | refs/heads/master | 2020-08-15T07:33:50.193365 | 2019-12-16T08:48:22 | 2019-12-16T08:48:22 | 215,301,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | ###############################
# set global frames
#
###############################
| [
"[email protected]"
] | |
b840655eac84395212fa9eeaf096d52cc1716142 | f683b3f8fc87c831d17038d5aeb19811bd95838c | /model.py | 0f8639d8d2a3e5bc914824bb949500068287fd58 | [] | no_license | mpslxz/3d-mri-brain-tumor-segmentation-using-autoencoder-regularization | 530a03c84e1337e2421ea733519f46ffaa4a7b96 | 6d19dfbf1ba4a3bb620721c8c254355fa62e9db0 | refs/heads/master | 2020-05-25T17:13:51.568880 | 2019-05-15T15:35:10 | 2019-05-15T15:35:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,287 | py | # Keras implementation of the paper:
# 3D MRI Brain Tumor Segmentation Using Autoencoder Regularization
# by Myronenko A. (https://arxiv.org/pdf/1810.11654.pdf)
# Author of this code: Suyog Jadhav (https://github.com/IAmSUyogJadhav)
import keras.backend as K
from keras.losses import mse
from keras.layers import Conv3D, Activation, Add, UpSampling3D, Lambda, Dense
from keras.layers import Input, Reshape, Flatten, Dropout
from keras.optimizers import adam
from keras.models import Model
try:
from group_norm import GroupNormalization
except ImportError:
import urllib.request
print('Downloading group_norm.py in the current directory...')
url = 'https://raw.githubusercontent.com/titu1994/Keras-Group-Normalization/master/group_norm.py'
urllib.request.urlretrieve(url, "group_norm.py")
from group_norm import GroupNormalization
def green_block(inp, filters, data_format='channels_first', name=None):
"""
green_block(inp, filters, name=None)
------------------------------------
Implementation of the special residual block used in the paper. The block
consists of two (GroupNorm --> ReLu --> 3x3x3 non-strided Convolution)
units, with a residual connection from the input `inp` to the output. Used
internally in the model. Can be used independently as well.
Parameters
----------
`inp`: An keras.layers.layer instance, required
The keras layer just preceding the green block.
`filters`: integer, required
No. of filters to use in the 3D convolutional block. The output
layer of this green block will have this many no. of channels.
`data_format`: string, optional
The format of the input data. Must be either 'chanels_first' or
'channels_last'. Defaults to `channels_first`, as used in the paper.
`name`: string, optional
The name to be given to this green block. Defaults to None, in which
case, keras uses generated names for the involved layers. If a string
is provided, the names of individual layers are generated by attaching
a relevant prefix from [GroupNorm_, Res_, Conv3D_, Relu_, ], followed
by _1 or _2.
Returns
-------
`out`: A keras.layers.Layer instance
The output of the green block. Has no. of channels equal to `filters`.
The size of the rest of the dimensions remains same as in `inp`.
"""
inp_res = Conv3D(
filters=filters,
kernel_size=(1, 1, 1),
strides=1,
data_format=data_format,
name=f'Res_{name}' if name else None)(inp)
# axis=1 for channels_first data format
# No. of groups = 8, as given in the paper
x = GroupNormalization(
groups=8,
axis=1 if data_format == 'channels_first' else 0,
name=f'GroupNorm_1_{name}' if name else None)(inp)
x = Activation('relu', name=f'Relu_1_{name}' if name else None)(x)
x = Conv3D(
filters=filters,
kernel_size=(3, 3, 3),
strides=1,
padding='same',
data_format=data_format,
name=f'Conv3D_1_{name}' if name else None)(x)
x = GroupNormalization(
groups=8,
axis=1 if data_format == 'channels_first' else 0,
name=f'GroupNorm_2_{name}' if name else None)(x)
x = Activation('relu', name=f'Relu_2_{name}' if name else None)(x)
x = Conv3D(
filters=filters,
kernel_size=(3, 3, 3),
strides=1,
padding='same',
data_format=data_format,
name=f'Conv3D_2_{name}' if name else None)(x)
out = Add(name=f'Out_{name}' if name else None)([x, inp_res])
return out
# From keras-team/keras/blob/master/examples/variational_autoencoder.py
def sampling(args):
"""Reparameterization trick by sampling from an isotropic unit Gaussian.
# Arguments
args (tensor): mean and log of variance of Q(z|X)
# Returns
z (tensor): sampled latent vector
"""
z_mean, z_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean = 0 and std = 1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_var) * epsilon
def loss(input_shape, inp, out_VAE, z_mean, z_var, e=1e-8):
"""
loss(input_shape, inp, out_VAE, z_mean, z_var, e=1e-8)
------------------------------------------------------
Since keras does not allow custom loss functions to have arguments
other than the true and predicted labels, this function acts as a wrapper
that allows us to implement the custom loss used in the paper, involving
outputs from multiple layers.
L = L<dice> + 0.1 ∗ L<L2> + 0.1 ∗ L<KL>
- L<dice> is the dice loss between input and segmentation output.
- L<L2> is the L2 loss between the output of VAE part and the input.
- L<KL> is the standard KL divergence loss term for the VAE.
Parameters
----------
`input_shape`: A 4-tuple, required
The shape of an image as the tuple (c, H, W, D), where c is
the no. of channels; H, W and D is the height, width and depth of the
input image, respectively.
`inp`: An keras.layers.Layer instance, required
The input layer of the model. Used internally.
`out_VAE`: An keras.layers.Layer instance, required
The output of VAE part of the decoder. Used internally.
`z_mean`: An keras.layers.Layer instance, required
The vector representing values of mean for the learned distribution
in the VAE part. Used internally.
`z_var`: An keras.layers.Layer instance, required
The vector representing values of variance for the learned distribution
in the VAE part. Used internally.
`e`: Float, optional
A small epsilon term to add in the denominator to avoid dividing by
zero and possible gradient explosion.
Returns
-------
loss_(y_true, y_pred): A custom keras loss function
This function takes as input the predicted and ground labels, uses them
to calculate the dice loss. Combined with the L<KL> and L<L2 computed
earlier, it returns the total loss.
"""
c, H, W, D = input_shape
n = c * H * W * D
loss_L2 = mse(inp, out_VAE)
loss_KL = (1 / n) * K.sum(
K.square(z_mean) + z_var - K.log(z_var) - 1,
axis=-1
)
def loss_(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(K.abs(y_true_f * y_pred_f), axis=-1)
loss_dice = (2. * intersection) / (
K.sum(K.square(y_true_f), -1) + K.sum(K.square(y_pred_f), -1) + e)
return loss_dice + 0.1 * loss_L2 + 0.1 * loss_KL
return loss_
def build_model(input_shape=(4, 160, 192, 128)):
"""
build_model(input_shape=(4, 160, 192, 128))
-------------------------------------------
Creates the model used in the BRATS2018 winning solution
by Myronenko A. (https://arxiv.org/pdf/1810.11654.pdf)
Parameters
----------
`input_shape`: A 4-tuple, optional.
Shape of the input image. Must be a 4D image of shape (c, H, W, D),
where, each of H, W and D are divisible by 2^4. Defaults to the crop
size used in the paper, i.e., (4, 160, 192, 128).
Returns
-------
`model`: A keras.models.Model instance
The created model.
"""
c, H, W, D = input_shape
assert len(input_shape) == 4, "Input shape must be a 4-tuple"
assert ~(H % 16) and ~(W % 16) and ~(D % 16), \
"All the input dimensions must be divisible by 16"
# -------------------------------------------------------------------------
# Encoder
# -------------------------------------------------------------------------
## Input Layer
inp = Input(input_shape)
## The Initial Block
x = Conv3D(
filters=32,
kernel_size=(3, 3, 3),
strides=1,
padding='same',
data_format='channels_first',
name='Input_x1')(inp)
## Dropout (0.2)
x = Dropout(0.2)(x)
## Green Block x1 (output filters = 32)
x1 = green_block(x, 32, name='x1')
x = Conv3D(
filters=32,
kernel_size=(3, 3, 3),
strides=2,
padding='same',
data_format='channels_first',
name='Enc_DownSample_32')(x1)
## Green Block x2 (output filters = 64)
x = green_block(x, 64, name='Enc_64_1')
x2 = green_block(x, 64, name='x2')
x = Conv3D(
filters=64,
kernel_size=(3, 3, 3),
strides=2,
padding='same',
data_format='channels_first',
name='Enc_DownSample_64')(x2)
## Green Blocks x2 (output filters = 128)
x = green_block(x, 128, name='Enc_128_1')
x3 = green_block(x, 128, name='x3')
x = Conv3D(
filters=128,
kernel_size=(3, 3, 3),
strides=2,
padding='same',
data_format='channels_first',
name='Enc_DownSample_128')(x3)
## Green Blocks x4 (output filters = 256)
x = green_block(x, 256, name='Enc_256_1')
x = green_block(x, 256, name='Enc_256_2')
x = green_block(x, 256, name='Enc_256_3')
x4 = green_block(x, 256, name='x4')
# -------------------------------------------------------------------------
# Decoder
# -------------------------------------------------------------------------
## GT (Groud Truth) Part
# -------------------------------------------------------------------------
### Green Block x1 (output filters=128)
x = Conv3D(
filters=128,
kernel_size=(1, 1, 1),
strides=1,
data_format='channels_first',
name='Dec_GT_ReduceDepth_128')(x4)
x = UpSampling3D(
size=2,
data_format='channels_first',
name='Dec_GT_UpSample_128')(x)
x = Add(name='Input_Dec_GT_128')([x, x3])
x = green_block(x, 128, name='Dec_GT_128')
### Green Block x1 (output filters=64)
x = Conv3D(
filters=64,
kernel_size=(1, 1, 1),
strides=1,
data_format='channels_first',
name='Dec_GT_ReduceDepth_64')(x)
x = UpSampling3D(
size=2,
data_format='channels_first',
name='Dec_GT_UpSample_64')(x)
x = Add(name='Input_Dec_GT_64')([x, x2])
x = green_block(x, 64, name='Dec_GT_64')
### Green Block x1 (output filters=32)
x = Conv3D(
filters=32,
kernel_size=(1, 1, 1),
strides=1,
data_format='channels_first',
name='Dec_GT_ReduceDepth_32')(x)
x = UpSampling3D(
size=2,
data_format='channels_first',
name='Dec_GT_UpSample_32')(x)
x = Add(name='Input_Dec_GT_32')([x, x1])
x = green_block(x, 32, name='Dec_GT_32')
### Blue Block x1 (output filters=32)
x = Conv3D(
filters=32,
kernel_size=(3, 3, 3),
strides=1,
padding='same',
data_format='channels_first',
name='Input_Dec_GT_Output')(x)
### Output Block
out_GT = Conv3D(
filters=3, # No. of tumor classes is 3
kernel_size=(1, 1, 1),
strides=1,
data_format='channels_first',
activation='sigmoid',
name='Dec_GT_Output')(x)
## VAE (Variational Auto Encoder) Part
# -------------------------------------------------------------------------
### VD Block (Reducing dimensionality of the data)
x = GroupNormalization(groups=8, axis=1, name='Dec_VAE_VD_GN')(x4)
x = Activation('relu', name='Dec_VAE_VD_relu')(x)
x = Conv3D(
filters=16,
kernel_size=(3, 3, 3),
strides=2,
padding='same',
data_format='channels_first',
name='Dec_VAE_VD_Conv3D')(x)
# Not mentioned in the paper, but the author used a Flattening layer here.
x = Flatten(name='Dec_VAE_VD_Flatten')(x)
x = Dense(256, name='Dec_VAE_VD_Dense')(x)
### VDraw Block (Sampling)
z_mean = Dense(128, name='Dec_VAE_VDraw_Mean')(x)
z_var = Dense(128, name='Dec_VAE_VDraw_Var')(x)
x = Lambda(sampling, name='Dec_VAE_VDraw_Sampling')([z_mean, z_var])
### VU Block (Upsizing back to a depth of 256)
x = Dense(1 * 10 * 12 * 8)(x)
x = Activation('relu')(x)
x = Reshape((1, 10, 12, 8))(x)
x = Conv3D(
filters=256,
kernel_size=(1, 1, 1),
strides=1,
data_format='channels_first',
name='Dec_VAE_ReduceDepth_256')(x)
x = UpSampling3D(
size=2,
data_format='channels_first',
name='Dec_VAE_UpSample_256')(x)
### Green Block x1 (output filters=128)
x = Conv3D(
filters=128,
kernel_size=(1, 1, 1),
strides=1,
data_format='channels_first',
name='Dec_VAE_ReduceDepth_128')(x)
x = UpSampling3D(
size=2,
data_format='channels_first',
name='Dec_VAE_UpSample_128')(x)
x = green_block(x, 128, name='Dec_VAE_128')
### Green Block x1 (output filters=64)
x = Conv3D(
filters=64,
kernel_size=(1, 1, 1),
strides=1,
data_format='channels_first',
name='Dec_VAE_ReduceDepth_64')(x)
x = UpSampling3D(
size=2,
data_format='channels_first',
name='Dec_VAE_UpSample_64')(x)
x = green_block(x, 64, name='Dec_VAE_64')
### Green Block x1 (output filters=32)
x = Conv3D(
filters=32,
kernel_size=(1, 1, 1),
strides=1,
data_format='channels_first',
name='Dec_VAE_ReduceDepth_32')(x)
x = UpSampling3D(
size=2,
data_format='channels_first',
name='Dec_VAE_UpSample_32')(x)
x = green_block(x, 32, name='Dec_VAE_32')
### Blue Block x1 (output filters=32)
x = Conv3D(
filters=32,
kernel_size=(3, 3, 3),
strides=1,
padding='same',
data_format='channels_first',
name='Input_Dec_VAE_Output')(x)
### Output Block
out_VAE = Conv3D(
filters=4,
kernel_size=(1, 1, 1),
strides=1,
data_format='channels_first',
name='Dec_VAE_Output')(x)
# Build and Compile the model
out = out_GT
model = Model(inp, out) # Create the model
model.compile(
adam(lr=1e-4),
loss(input_shape, inp, out_VAE, z_mean, z_var),
metrics=['accuracy']
)
return model
| [
"[email protected]"
] | |
3731fec82bb9260c5221238d2534ad1e78cd1e48 | b06183c3dafd1632a7ac4ff2774f00df65952598 | /projectpages/migrations/0008_add_default_for_unused_title.py | 35713cf7c1e8db15bd2f5f9c952d73b56226a89f | [] | no_license | xinyil/cdh-web | 6fd85ec0787bd2bbfcebfe5e294b5241a247d0a2 | 1d999332411e08a7b1173fdb990625bdac4ba8cf | refs/heads/master | 2021-01-23T21:47:11.694494 | 2017-08-29T18:37:25 | 2017-08-29T18:37:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-01-31 18:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projectpages', '0007_project_app_remaining_verbose_names'),
]
operations = [
migrations.AlterField(
model_name='projectpage',
name='title',
field=models.CharField(default='CDH @ Princeton Projects', max_length=500),
),
]
| [
"[email protected]"
] | |
00bab8ef4a3912126a0038c0f792df4ab9d600f2 | adea9fc9697f5201f4cb215571025b0493e96b25 | /napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/global_/afi_safis/afi_safi/ipv4_unicast/prefix_limit/state/__init__.py | 0c7bd21e99cb56313e90bd719a179419b589f0d2 | [
"Apache-2.0"
] | permissive | andyjsharp/napalm-yang | d8a8b51896ef7c6490f011fe265db46f63f54248 | ef80ebbfb50e188f09486380c88b058db673c896 | refs/heads/develop | 2021-09-09T02:09:36.151629 | 2018-03-08T22:44:04 | 2018-03-08T22:44:04 | 114,273,455 | 0 | 0 | null | 2018-03-08T22:44:05 | 2017-12-14T16:33:35 | Python | UTF-8 | Python | false | false | 30,854 | py |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
unicode = str
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/afi-safis/afi-safi/ipv4-unicast/prefix-limit/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__max_prefixes','__prevent_teardown','__shutdown_threshold_pct','__restart_timer',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__shutdown_threshold_pct = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'0..100']}), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:percentage', is_config=False)
self.__max_prefixes = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
self.__restart_timer = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='decimal64', is_config=False)
self.__prevent_teardown = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'bgp', u'global', u'afi-safis', u'afi-safi', u'ipv4-unicast', u'prefix-limit', u'state']
def _get_max_prefixes(self):
"""
Getter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv4_unicast/prefix_limit/state/max_prefixes (uint32)
YANG Description: Maximum number of prefixes that will be accepted
from the neighbour
"""
return self.__max_prefixes
def _set_max_prefixes(self, v, load=False):
"""
Setter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv4_unicast/prefix_limit/state/max_prefixes (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_max_prefixes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_max_prefixes() directly.
YANG Description: Maximum number of prefixes that will be accepted
from the neighbour
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """max_prefixes must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
})
self.__max_prefixes = t
if hasattr(self, '_set'):
self._set()
def _unset_max_prefixes(self):
self.__max_prefixes = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
def _get_prevent_teardown(self):
"""
Getter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv4_unicast/prefix_limit/state/prevent_teardown (boolean)
YANG Description: Do not tear down the BGP session when the maximum
prefix limit is exceeded, but rather only log a
warning. The default of this leaf is false, such
that when it is not specified, the session is torn
down.
"""
return self.__prevent_teardown
def _set_prevent_teardown(self, v, load=False):
"""
Setter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv4_unicast/prefix_limit/state/prevent_teardown (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_prevent_teardown is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prevent_teardown() directly.
YANG Description: Do not tear down the BGP session when the maximum
prefix limit is exceeded, but rather only log a
warning. The default of this leaf is false, such
that when it is not specified, the session is torn
down.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """prevent_teardown must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
})
self.__prevent_teardown = t
if hasattr(self, '_set'):
self._set()
def _unset_prevent_teardown(self):
self.__prevent_teardown = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)
def _get_shutdown_threshold_pct(self):
"""
Getter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv4_unicast/prefix_limit/state/shutdown_threshold_pct (oc-types:percentage)
YANG Description: Threshold on number of prefixes that can be received
from a neighbour before generation of warning messages
or log entries. Expressed as a percentage of
max-prefixes
"""
return self.__shutdown_threshold_pct
def _set_shutdown_threshold_pct(self, v, load=False):
"""
Setter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv4_unicast/prefix_limit/state/shutdown_threshold_pct (oc-types:percentage)
If this variable is read-only (config: false) in the
source YANG file, then _set_shutdown_threshold_pct is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_shutdown_threshold_pct() directly.
YANG Description: Threshold on number of prefixes that can be received
from a neighbour before generation of warning messages
or log entries. Expressed as a percentage of
max-prefixes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'0..100']}), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:percentage', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """shutdown_threshold_pct must be of a type compatible with oc-types:percentage""",
'defined-type': "oc-types:percentage",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'0..100']}), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:percentage', is_config=False)""",
})
self.__shutdown_threshold_pct = t
if hasattr(self, '_set'):
self._set()
def _unset_shutdown_threshold_pct(self):
self.__shutdown_threshold_pct = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'0..100']}), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:percentage', is_config=False)
def _get_restart_timer(self):
"""
Getter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv4_unicast/prefix_limit/state/restart_timer (decimal64)
YANG Description: Time interval in seconds after which the BGP session
is re-established after being torn down due to exceeding
the max-prefix limit.
"""
return self.__restart_timer
def _set_restart_timer(self, v, load=False):
"""
Setter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv4_unicast/prefix_limit/state/restart_timer (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_restart_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_restart_timer() directly.
YANG Description: Time interval in seconds after which the BGP session
is re-established after being torn down due to exceeding
the max-prefix limit.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='decimal64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """restart_timer must be of a type compatible with decimal64""",
'defined-type': "decimal64",
'generated-type': """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='decimal64', is_config=False)""",
})
self.__restart_timer = t
if hasattr(self, '_set'):
self._set()
def _unset_restart_timer(self):
self.__restart_timer = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='decimal64', is_config=False)
max_prefixes = __builtin__.property(_get_max_prefixes)
prevent_teardown = __builtin__.property(_get_prevent_teardown)
shutdown_threshold_pct = __builtin__.property(_get_shutdown_threshold_pct)
restart_timer = __builtin__.property(_get_restart_timer)
_pyangbind_elements = {'max_prefixes': max_prefixes, 'prevent_teardown': prevent_teardown, 'shutdown_threshold_pct': shutdown_threshold_pct, 'restart_timer': restart_timer, }
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/afi-safis/afi-safi/ipv4-unicast/prefix-limit/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__max_prefixes','__prevent_teardown','__shutdown_threshold_pct','__restart_timer',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__shutdown_threshold_pct = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'0..100']}), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:percentage', is_config=False)
self.__max_prefixes = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
self.__restart_timer = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='decimal64', is_config=False)
self.__prevent_teardown = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'bgp', u'global', u'afi-safis', u'afi-safi', u'ipv4-unicast', u'prefix-limit', u'state']
def _get_max_prefixes(self):
"""
Getter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv4_unicast/prefix_limit/state/max_prefixes (uint32)
YANG Description: Maximum number of prefixes that will be accepted
from the neighbour
"""
return self.__max_prefixes
def _set_max_prefixes(self, v, load=False):
"""
Setter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv4_unicast/prefix_limit/state/max_prefixes (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_max_prefixes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_max_prefixes() directly.
YANG Description: Maximum number of prefixes that will be accepted
from the neighbour
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """max_prefixes must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
})
self.__max_prefixes = t
if hasattr(self, '_set'):
self._set()
def _unset_max_prefixes(self):
self.__max_prefixes = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
def _get_prevent_teardown(self):
"""
Getter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv4_unicast/prefix_limit/state/prevent_teardown (boolean)
YANG Description: Do not tear down the BGP session when the maximum
prefix limit is exceeded, but rather only log a
warning. The default of this leaf is false, such
that when it is not specified, the session is torn
down.
"""
return self.__prevent_teardown
def _set_prevent_teardown(self, v, load=False):
"""
Setter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv4_unicast/prefix_limit/state/prevent_teardown (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_prevent_teardown is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prevent_teardown() directly.
YANG Description: Do not tear down the BGP session when the maximum
prefix limit is exceeded, but rather only log a
warning. The default of this leaf is false, such
that when it is not specified, the session is torn
down.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """prevent_teardown must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
})
self.__prevent_teardown = t
if hasattr(self, '_set'):
self._set()
def _unset_prevent_teardown(self):
self.__prevent_teardown = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)
def _get_shutdown_threshold_pct(self):
"""
Getter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv4_unicast/prefix_limit/state/shutdown_threshold_pct (oc-types:percentage)
YANG Description: Threshold on number of prefixes that can be received
from a neighbour before generation of warning messages
or log entries. Expressed as a percentage of
max-prefixes
"""
return self.__shutdown_threshold_pct
def _set_shutdown_threshold_pct(self, v, load=False):
"""
Setter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv4_unicast/prefix_limit/state/shutdown_threshold_pct (oc-types:percentage)
If this variable is read-only (config: false) in the
source YANG file, then _set_shutdown_threshold_pct is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_shutdown_threshold_pct() directly.
YANG Description: Threshold on number of prefixes that can be received
from a neighbour before generation of warning messages
or log entries. Expressed as a percentage of
max-prefixes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'0..100']}), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:percentage', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """shutdown_threshold_pct must be of a type compatible with oc-types:percentage""",
'defined-type': "oc-types:percentage",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'0..100']}), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:percentage', is_config=False)""",
})
self.__shutdown_threshold_pct = t
if hasattr(self, '_set'):
self._set()
def _unset_shutdown_threshold_pct(self):
self.__shutdown_threshold_pct = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'0..100']}), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:percentage', is_config=False)
def _get_restart_timer(self):
"""
Getter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv4_unicast/prefix_limit/state/restart_timer (decimal64)
YANG Description: Time interval in seconds after which the BGP session
is re-established after being torn down due to exceeding
the max-prefix limit.
"""
return self.__restart_timer
def _set_restart_timer(self, v, load=False):
"""
Setter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv4_unicast/prefix_limit/state/restart_timer (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_restart_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_restart_timer() directly.
YANG Description: Time interval in seconds after which the BGP session
is re-established after being torn down due to exceeding
the max-prefix limit.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='decimal64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """restart_timer must be of a type compatible with decimal64""",
'defined-type': "decimal64",
'generated-type': """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='decimal64', is_config=False)""",
})
self.__restart_timer = t
if hasattr(self, '_set'):
self._set()
def _unset_restart_timer(self):
self.__restart_timer = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='decimal64', is_config=False)
max_prefixes = __builtin__.property(_get_max_prefixes)
prevent_teardown = __builtin__.property(_get_prevent_teardown)
shutdown_threshold_pct = __builtin__.property(_get_shutdown_threshold_pct)
restart_timer = __builtin__.property(_get_restart_timer)
_pyangbind_elements = {'max_prefixes': max_prefixes, 'prevent_teardown': prevent_teardown, 'shutdown_threshold_pct': shutdown_threshold_pct, 'restart_timer': restart_timer, }
| [
"[email protected]"
] | |
08e32712de6dfc74682fa18f88f2835ce5668717 | 18b3ad3b0e1f7f10969738251e1201d01dfbc6bf | /backup_files/practice/gen.py | 661be7c0dbbedce50723d0e3a8dbccd475d5b3ee | [] | no_license | sahthi/backup2 | 11d509b980e731c73733b1399a8143780779e75a | 16bed38f0867fd7c766c2a008c8d43b0660f0cb0 | refs/heads/master | 2020-03-21T12:39:56.890129 | 2018-07-09T08:12:46 | 2018-07-09T08:12:46 | 138,565,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | def fruits():
yield 'mango'
yield 'apple'
yield 'banana'
a=fruits()
print next(a)
print next(a)
| [
"[email protected]"
] | |
b6d8eb75dbd9e88603c435cc6ffd7e2e57ec9f3a | 584db1be8b6bdedaa56d186692ad72da5ee07164 | /patron/compute/claims.py | e9f0cfb779698b649b32e293be86a6086a17fee7 | [
"Apache-2.0"
] | permissive | casbin/openstack-patron | 66006f57725cf1c3d735cd5529d3459fd77384c8 | b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25 | refs/heads/master | 2023-05-31T05:23:37.721768 | 2015-12-31T12:18:17 | 2015-12-31T12:18:17 | 382,054,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,496 | py | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Claim objects for use with resource tracking.
"""
from oslo_log import log as logging
from oslo_serialization import jsonutils
from patron import context
from patron import exception
from patron.i18n import _
from patron.i18n import _LI
from patron import objects
from patron.objects import base as obj_base
from patron.virt import hardware
LOG = logging.getLogger(__name__)
class NopClaim(object):
"""For use with compute drivers that do not support resource tracking."""
def __init__(self, migration=None):
self.migration = migration
self.claimed_numa_topology = None
@property
def disk_gb(self):
return 0
@property
def memory_mb(self):
return 0
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
self.abort()
def abort(self):
pass
def __str__(self):
return "[Claim: %d MB memory, %d GB disk]" % (self.memory_mb,
self.disk_gb)
class Claim(NopClaim):
"""A declaration that a compute host operation will require free resources.
Claims serve as marker objects that resources are being held until the
update_available_resource audit process runs to do a full reconciliation
of resource usage.
This information will be used to help keep the local compute hosts's
ComputeNode model in sync to aid the scheduler in making efficient / more
correct decisions with respect to host selection.
"""
def __init__(self, context, instance, tracker, resources, overhead=None,
limits=None):
super(Claim, self).__init__()
# Stash a copy of the instance at the current point of time
if isinstance(instance, obj_base.NovaObject):
self.instance = instance.obj_clone()
else:
# This does not use copy.deepcopy() because it could be
# a sqlalchemy model, and it's best to make sure we have
# the primitive form.
self.instance = jsonutils.to_primitive(instance)
self._numa_topology_loaded = False
self.tracker = tracker
if not overhead:
overhead = {'memory_mb': 0}
self.overhead = overhead
self.context = context
# Check claim at constructor to avoid mess code
# Raise exception ComputeResourcesUnavailable if claim failed
self._claim_test(resources, limits)
@property
def disk_gb(self):
return self.instance['root_gb'] + self.instance['ephemeral_gb']
@property
def memory_mb(self):
return self.instance['memory_mb'] + self.overhead['memory_mb']
@property
def numa_topology(self):
if self._numa_topology_loaded:
return self._numa_topology
else:
if isinstance(self.instance, obj_base.NovaObject):
self._numa_topology = self.instance.numa_topology
else:
try:
self._numa_topology = (
objects.InstanceNUMATopology.get_by_instance_uuid(
context.get_admin_context(), self.instance['uuid'])
)
except exception.NumaTopologyNotFound:
self._numa_topology = None
self._numa_topology_loaded = True
return self._numa_topology
def abort(self):
"""Compute operation requiring claimed resources has failed or
been aborted.
"""
LOG.debug("Aborting claim: %s" % self, instance=self.instance)
self.tracker.abort_instance_claim(self.context, self.instance)
def _claim_test(self, resources, limits=None):
"""Test if this claim can be satisfied given available resources and
optional oversubscription limits
This should be called before the compute node actually consumes the
resources required to execute the claim.
:param resources: available local compute node resources
:returns: Return true if resources are available to claim.
"""
if not limits:
limits = {}
# If an individual limit is None, the resource will be considered
# unlimited:
memory_mb_limit = limits.get('memory_mb')
disk_gb_limit = limits.get('disk_gb')
numa_topology_limit = limits.get('numa_topology')
msg = _("Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d "
"GB")
params = {'memory_mb': self.memory_mb, 'disk_gb': self.disk_gb}
LOG.info(msg % params, instance=self.instance)
reasons = [self._test_memory(resources, memory_mb_limit),
self._test_disk(resources, disk_gb_limit),
self._test_numa_topology(resources, numa_topology_limit),
self._test_pci()]
reasons = reasons + self._test_ext_resources(limits)
reasons = [r for r in reasons if r is not None]
if len(reasons) > 0:
raise exception.ComputeResourcesUnavailable(reason=
"; ".join(reasons))
LOG.info(_LI('Claim successful'), instance=self.instance)
def _test_memory(self, resources, limit):
type_ = _("memory")
unit = "MB"
total = resources['memory_mb']
used = resources['memory_mb_used']
requested = self.memory_mb
return self._test(type_, unit, total, used, requested, limit)
def _test_disk(self, resources, limit):
type_ = _("disk")
unit = "GB"
total = resources['local_gb']
used = resources['local_gb_used']
requested = self.disk_gb
return self._test(type_, unit, total, used, requested, limit)
def _test_pci(self):
pci_requests = objects.InstancePCIRequests.get_by_instance_uuid(
self.context, self.instance['uuid'])
if pci_requests.requests:
can_claim = self.tracker.pci_tracker.stats.support_requests(
pci_requests.requests)
if not can_claim:
return _('Claim pci failed.')
def _test_ext_resources(self, limits):
return self.tracker.ext_resources_handler.test_resources(
self.instance, limits)
def _test_numa_topology(self, resources, limit):
host_topology = resources.get('numa_topology')
requested_topology = self.numa_topology
if host_topology:
host_topology = objects.NUMATopology.obj_from_db_obj(
host_topology)
pci_requests = objects.InstancePCIRequests.get_by_instance_uuid(
self.context, self.instance['uuid'])
pci_stats = None
if pci_requests.requests:
pci_stats = self.tracker.pci_tracker.stats
instance_topology = (
hardware.numa_fit_instance_to_host(
host_topology, requested_topology,
limits=limit,
pci_requests=pci_requests.requests,
pci_stats=pci_stats))
if requested_topology and not instance_topology:
if pci_requests.requests:
return (_("Requested instance NUMA topology together with"
" requested PCI devices cannot fit the given"
" host NUMA topology"))
else:
return (_("Requested instance NUMA topology cannot fit "
"the given host NUMA topology"))
elif instance_topology:
self.claimed_numa_topology = instance_topology
def _test(self, type_, unit, total, used, requested, limit):
"""Test if the given type of resource needed for a claim can be safely
allocated.
"""
LOG.info(_LI('Total %(type)s: %(total)d %(unit)s, used: %(used).02f '
'%(unit)s'),
{'type': type_, 'total': total, 'unit': unit, 'used': used},
instance=self.instance)
if limit is None:
# treat resource as unlimited:
LOG.info(_LI('%(type)s limit not specified, defaulting to '
'unlimited'), {'type': type_}, instance=self.instance)
return
free = limit - used
# Oversubscribed resource policy info:
LOG.info(_LI('%(type)s limit: %(limit).02f %(unit)s, '
'free: %(free).02f %(unit)s'),
{'type': type_, 'limit': limit, 'free': free, 'unit': unit},
instance=self.instance)
if requested > free:
return (_('Free %(type)s %(free).02f '
'%(unit)s < requested %(requested)d %(unit)s') %
{'type': type_, 'free': free, 'unit': unit,
'requested': requested})
class ResizeClaim(Claim):
"""Claim used for holding resources for an incoming resize/migration
operation.
"""
def __init__(self, context, instance, instance_type, image_meta, tracker,
resources, overhead=None, limits=None):
self.context = context
self.instance_type = instance_type
self.image_meta = image_meta
super(ResizeClaim, self).__init__(context, instance, tracker,
resources, overhead=overhead,
limits=limits)
self.migration = None
@property
def disk_gb(self):
return (self.instance_type['root_gb'] +
self.instance_type['ephemeral_gb'])
@property
def memory_mb(self):
return self.instance_type['memory_mb'] + self.overhead['memory_mb']
@property
def numa_topology(self):
return hardware.numa_get_constraints(
self.instance_type, self.image_meta)
def _test_pci(self):
pci_requests = objects.InstancePCIRequests.\
get_by_instance_uuid_and_newness(
self.context, self.instance['uuid'], True)
if pci_requests.requests:
claim = self.tracker.pci_tracker.stats.support_requests(
pci_requests.requests)
if not claim:
return _('Claim pci failed.')
def _test_ext_resources(self, limits):
return self.tracker.ext_resources_handler.test_resources(
self.instance_type, limits)
def abort(self):
"""Compute operation requiring claimed resources has failed or
been aborted.
"""
LOG.debug("Aborting claim: %s" % self, instance=self.instance)
self.tracker.drop_resize_claim(
self.context,
self.instance, instance_type=self.instance_type,
image_meta=self.image_meta)
| [
"[email protected]"
] | |
683f0fedb24262051c20c75fd23485ddd7391ca7 | d411108f449cd6f3d96e16ec5d6f53558f3cce92 | /api/model/list_model.py | eb7c8079943d4824c77f4a630327ca89c01ad41a | [] | no_license | lameay/mm-rest-api | 557a020977933e0f48a84cbcdfbc2095663a14f0 | 869e8165d08f658969a8cb115faf2f2a0405208c | refs/heads/master | 2020-03-19T05:36:13.681720 | 2018-06-05T20:20:57 | 2018-06-05T20:20:57 | 135,947,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | uploads = []
def add_upload_to_hist(file_details):
uploads.append(file_details)
def get_upload_history():
return uploads | [
"[email protected]"
] | |
ca72386ff7abf199881fcdd9533553feadeba21e | 153fade36c325e603c20b4cadef330ff512d647e | /main.py | a1febf6a280484ad0a8e720c3d782f87bdbf0a8f | [] | no_license | jaideepmurkute/Multi-purpose-Disentangling-Variational-Autoencoders-for-ECG-data | cbed432ddc71824008d06967fc6290e40a029582 | 1411f68439136b478119ec97ec51b5be2f3b4247 | refs/heads/master | 2022-12-12T09:18:33.436800 | 2019-11-09T23:21:01 | 2019-11-09T23:21:01 | 220,714,205 | 2 | 0 | null | 2022-11-22T04:18:17 | 2019-11-09T22:56:23 | Python | UTF-8 | Python | false | false | 21,718 | py | import os
import time
import argparse
import numpy as np
import matplotlib.pyplot as plt
import torch.nn.init
import torch.optim as optim
from torch.optim.lr_scheduler import ExponentialLR
from gvae_ecg.trainingVAE import train, test, train_unsupervised_classifier
import sys
# from utils import get_data_loader, get_model_reference, plot_training_history, min_max_scaling
from gvae_ecg.utils import *
from gvae_ecg.metrics_ecg import mutual_info_metric_shapes
import shutil
seed = 41
np.random.seed(seed)
torch.manual_seed(seed)
torch.manual_seed(seed)
if not torch.cuda.is_available():
print("No GPU w/ CUDA visible ...")
else:
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
print("PyTorch is using GPU: ", torch.cuda.get_device_name(0))
cuda_available = torch.cuda.is_available()
device = torch.device("cuda" if cuda_available else "cpu")
use_gpu_id = 0 # Set which GPU ID to use
if cuda_available:
if torch.cuda.device_count == 1:
use_gpu_id = 0
torch.cuda.set_device(use_gpu_id)
def arg_parser():
parser = argparse.ArgumentParser(description='VAE configurations')
parser.add_argument('--model_name', type=str, nargs='?', default='vae_beta_clinical_iid',
help='Unique name for the model')
parser.add_argument('--sample_treatment', type=str, nargs='?', default='iid',
help='Choose from iid or group')
parser.add_argument('--learning_type', type=str, nargs='?', default='unsupervised',
help='supervised or unsupervised')
parser.add_argument('--finetune_encoder', type=bool, nargs='?', default=True,
help='Whether or not to update encoder weights when training classifier for unsupervised models')
parser.add_argument('--architecture_type', type=str, nargs='?', default='cnn',
help='Choose from cnn or mlp')
parser.add_argument('--model_type', type=str, nargs='?', default='other',
help='Choose from ibp OR other(for cnn-vae and ml-vae) OR baseline')
parser.add_argument('--dataset_type', type=str, nargs='?', default='clinical',
help='simulated or clinical')
parser.add_argument('--penalty_type', type=str, nargs='?', default='beta',
help='vanilla or beta')
parser.add_argument('--beta', type=int, default=5, help='beta penalty for VAE training (default: 5)')
parser.add_argument('--validate_during_training', type=bool, default=False,
help='To get test/validation set performance after each epoch.')
parser.add_argument('--test_after_training', type=bool, default=False, help='To get test set performance at the end.')
parser.add_argument('--log_history_w_tensorboard', type=bool, default=False,
help='Log progress with Tensor-board. Currently not supported')
parser.add_argument('--train_classifier', type=bool, default=True,
help='Just train classifier from existing or new model(new model case is same as baseline)')
parser.add_argument('--num_classes', type=int, default=10,
help='Number of heart segments to be classified')
parser.add_argument('--use_lr_schedule', type=bool, nargs='?', default=True,
help='Whether To use learning rate decay based on ReduceLROnPlateau')
parser.add_argument('--best_epoch_tolerance', type=int, default=3,
help='Epochs to wait with no improvement, for Early Stopping event')
parser.add_argument('--epochs', type=int, default=50, help='Max. number of epochs to train VAE')
parser.add_argument('--lr', type=float, default=3e-4, help='VAE learning rate')
parser.add_argument('--unsup_clf_epochs', type=int, default=35,
help='Max. number of epochs to train classifier after unsupervised training')
parser.add_argument('--unsup_clf_lr', type=float, default=1e-3,
help='learning rate for classification task training after unsupervised VAE training')
parser.add_argument('--batch_size', type=int, default=128, help='input batch size for training (default: 512)')
parser.add_argument('--bottleneck_size', type=int, default=100, help='VAE bottleneck size (default: 100)')
parser.add_argument('--optimizer_type', type=str, default='adam', help='Choose optimizer: adam OR SGD')
parser.add_argument('--load_optim_from_checkpoint', type=bool, default=False,
help='Load optimizer - for training continuation')
parser.add_argument('--l2_penalty', type=float, default=0.0, help='L2-weight decay penalty')
parser.add_argument('--SGD_nesterov', type=bool, default=True, help='enables SGD Nesterov momentum')
parser.add_argument('--SGD_momentum', type=float, default=0.9, help='SGD Nesterov momentum value')
parser.add_argument('--samples_to_read', type=int, default=10,
help='How many samples to read. -1 for all samples. Useful for testing changes.')
parser.add_argument('--print_log_every', type=int, default=20,
help='print loss and accuracy values after these many batches during training.')
parser.add_argument('--device', type=object, default=device,
help='Device being used by PyTorch.')
parser.add_argument('--alpha0', type=float, default=10,
help='alpha value for Beta distribution.')
parser.add_argument('--temp', type=float, default=1.,
help='temp')
parser.add_argument('--temp_prior', type=float, default=1.,
help='temp_prior')
return parser.parse_args()
global args
args = arg_parser()
# ---------------------------------------------------------------------------------------------------------------------
def train_model(args, data_dir, plots_store_dir, model_store_dir):
model = get_model_reference(args, model_store_dir, training_flag=True, load_from_checkpoint=False)
optimizer, scheduler = get_optimizer(args, model_store_dir, model)
train_scores = np.zeros(args.epochs)
train_acc = np.zeros(args.epochs)
NLL_log_train = []
KLD_log_train = []
ELBO_log_train = []
XENTROPY_log_train = []
ACCURACY_log_train = []
NLL_log_train_epoch = []
KLD_log_train_epoch = []
ELBO_log_train_epoch = []
XENTROPY_log_train_epoch = []
ACCURACY_log_train_epoch = []
NLL_log_val_epoch = []
KLD_log_val_epoch = []
ELBO_log_val_epoch = []
XENTROPY_log_val_epoch = []
ACCURACY_log_val_epoch = []
NLL_log_val = []
KLD_log_val = []
ELBO_log_val = []
XENTROPY_log_val = []
ACCURACY_log_val = []
all_elbo_plot_log = []
learning_rate_log = []
validation_scores = np.zeros(args.epochs)
validation_acc = np.zeros(args.epochs)
epoch_times = np.zeros(args.epochs)
if args.learning_type == 'supervised' or args.learning_type == 'baseline':
best_valid = -1
# best_valid = sys.maxsize
elif args.learning_type == 'unsupervised':
best_valid = sys.maxsize
train_loader = get_data_loader(args, mode='train', data_dir=data_dir)
if args.validate_during_training:
val_loader = get_data_loader(args, mode='validation', data_dir=data_dir)
start = time.time()
best_epoch_number = -1
print("\n + Starting training ...")
if args.learning_type == 'unsupervised':
best_model = None
for epoch in range(1, args.epochs + 1):
# learning_rate_log.append(scheduler.get_lr())
train_scores[epoch - 1], all_accuracy_train, all_NLL_train, all_KLD_train, all_elbo_train, \
all_xentropy_train, all_elbo_plot, optimizer, scheduler = train(args, train_loader, model, optimizer,
epoch, scheduler)
NLL_log_train_epoch.append(np.mean(all_NLL_train))
KLD_log_train_epoch.append(np.mean(all_KLD_train))
ELBO_log_train_epoch.append(np.mean(all_elbo_train))
XENTROPY_log_train_epoch.append(np.mean(all_xentropy_train))
ACCURACY_log_train_epoch.append(np.mean(all_accuracy_train))
print("==========>Summary(Avg.) of epoch: {}".format(epoch))
print("---->On train set: ")
if args.model_type == 'baseline':
print("XEntropy: {:.5f} \t Accuracy: {:.5f}".format(XENTROPY_log_train_epoch[-1],
ACCURACY_log_train_epoch[-1]))
else:
print("ELBO: {:.5f} \t KLD: {:.5f} \t NLL: {:.5f} \t XEntropy: {:.5f} \t Accuracy: {:.5f}".format(
ELBO_log_train_epoch[-1], KLD_log_train_epoch[-1], NLL_log_train_epoch[-1], XENTROPY_log_train_epoch[-1],
ACCURACY_log_train_epoch[-1]))
NLL_log_train += all_NLL_train
KLD_log_train += all_KLD_train
ELBO_log_train += all_elbo_train
all_elbo_plot_log += all_elbo_plot
if args.learning_type == 'supervised':
train_acc[epoch - 1] = np.mean(all_accuracy_train)
XENTROPY_log_train += all_xentropy_train
ACCURACY_log_train += all_accuracy_train
if args.validate_during_training:
validation_scores[epoch - 1], all_accuracy_val, all_NLL_val, all_KLD_val, all_elbo_val, \
all_xentropy_val = test(args, model_store_dir, val_loader, model, epoch,
store_outputs=False)
NLL_log_val += all_NLL_val
KLD_log_val += all_KLD_val
ELBO_log_val += all_elbo_val
if args.learning_type == 'supervised':
validation_acc[epoch - 1] = np.mean(all_accuracy_val)
XENTROPY_log_val += all_xentropy_val
ACCURACY_log_val += all_accuracy_val
NLL_log_val_epoch.append(np.mean(all_NLL_val))
KLD_log_val_epoch.append(np.mean(all_KLD_val))
ELBO_log_val_epoch.append(np.mean(all_elbo_val))
XENTROPY_log_val_epoch.append(np.mean(all_xentropy_val))
ACCURACY_log_val_epoch.append(np.mean(all_accuracy_val))
epoch_times[epoch - 1] = time.time() - start
if args.validate_during_training:
print("On validation set: ")
print("ELBO: {:.5f} \t KLD: {:.5f} \t NLL: {:.5f} \t XEntropy: {:.5f} \t Accuracy: {:.5f}".format(
ELBO_log_val_epoch[-1], KLD_log_val_epoch[-1], NLL_log_val_epoch[-1], XENTROPY_log_val_epoch[-1],
ACCURACY_log_val_epoch[-1]))
is_best = False
if args.validate_during_training:
if args.learning_type == 'supervised':
print("validation_acc[epoch - 1]: ", validation_acc[epoch - 1])
is_best = validation_acc[epoch - 1] > best_valid
best_valid = max(best_valid, validation_acc[epoch - 1])
elif args.learning_type == 'unsupervised':
is_best = ELBO_log_val_epoch[-1] < best_valid
if is_best:
best_valid = ELBO_log_val_epoch[-1]
else:
if args.learning_type == 'supervised':
is_best = train_acc[epoch - 1] > best_valid
if is_best:
best_valid = train_acc[epoch - 1]
# is_best = np.mean(all_elbo_train) < best_valid
# best_valid = np.mean(all_elbo_train)
if args.learning_type == 'unsupervised:':
is_best = np.mean(all_elbo_train) < best_valid
if is_best:
best_valid = np.mean(all_elbo_train)
best_model = model
if is_best:
best_epoch_number = epoch
torch.save(model.state_dict(), model_store_dir + '/' + args.model_name + '_best.pt')
print("New best epoch found: ", epoch)
# torch.save(model.state_dict(), model_store_dir + '/' + args.model_name + '_last_epoch.pt')
torch.save(model.state_dict(), model_store_dir + '/' + args.model_name + '.pt')
if (epoch - best_epoch_number) == args.best_epoch_tolerance:
print("Best epoch not found since last {} epochs. Stopping training.".format(args.best_epoch_tolerance))
# break
print("Last best Epoch: ", best_epoch_number)
print("-" * 50)
print("model stored at: ", model_store_dir + '/' + args.model_name + '.pt')
print("-" * 40)
print("Best epoch: ", best_epoch_number)
print("Average training loss: ", np.mean(train_scores))
print("Average training accuracy: ", np.mean(train_acc))
if args.validate_during_training:
print("Average validation loss: ", np.mean(validation_scores))
print("Average validation accuracy: ", np.mean(validation_acc))
print("-" * 40)
if args.learning_type == 'unsupervised':
print("Training for classification task ...")
model.train()
classifier_lr = args.unsup_clf_lr
classifier_max_epochs = args.unsup_clf_epochs
optimizer = optim.Adam(model.parameters(), lr=classifier_lr, weight_decay=0.0005)
scheduler = ExponentialLR(optimizer, gamma=0.95, last_epoch=-1)
# scheduler_interval = args.unsup_clf_scheduler_interval
if args.finetune_encoder:
print("Keeping whole network un-frozen. Back-propagating only through the classification branch.")
else:
for name, child in model.named_children():
if 'conv' in name or 'bn' in name:
print("Freezing layer: {}".format(name))
for param in child.parameters():
param.requires_grad = False
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=classifier_lr)
print("optimizer updated to only compute gradients of the un-frozen layers ...")
xentropy_clf_log_train = []
accuracy_clf_log_train = []
xentropy_clf_log_val = []
accuracy_clf_log_val = []
best_accuracy = -1
best_epoch = 1
for epoch in range(1, classifier_max_epochs + 1):
print("Starting epoch {}".format(epoch))
model, xentropy_clf, accuracy_clf, optimizer = train_unsupervised_classifier(train_loader, model, args.dataset_type,
optimizer, args)
if args.validate_during_training:
# args, model_store_dir, test_loader, model, log_likelihood, epoch, store_outputs=False
_, all_accuracy_val, _, _, _, all_xentropy_val = test(args, model_store_dir, val_loader, model, epoch)
model.train()
xentropy_clf_log_val += all_xentropy_val
accuracy_clf_log_val += all_accuracy_val
xentropy_clf_log_train += xentropy_clf
accuracy_clf_log_train += accuracy_clf
epoch_accuracy = np.mean(accuracy_clf)
if epoch_accuracy > best_accuracy:
best_epoch = epoch
best_accuracy = epoch_accuracy
print("New best classification epoch found: {}. Best Accuracy: {:.5f}".format(epoch, best_accuracy))
torch.save(model.state_dict(), model_store_dir + '/' + args.model_name + '_classification_best.pt')
torch.save(model.state_dict(), model_store_dir + '/' + args.model_name + '_classification.pt')
if (epoch - best_epoch) == args.best_epoch_tolerance:
print("No best epoch found since {} epochs. Existing classifier training ...".
format(args.best_epoch_tolerance))
break
print()
if args.validate_during_training:
num_plots = 2
else:
num_plots = 1
plt.figure()
plt.subplot(num_plots, 1, 1)
plt.plot(accuracy_clf_log_train, color='blue')
plt.plot(accuracy_clf_log_val, color='green')
plt.legend(['Train', 'Validation'])
plt.ylabel("Classification Accuracy")
if args.validate_during_training:
plt.subplot(num_plots, 1, 2)
plt.plot(xentropy_clf_log_train, color='blue')
plt.plot(xentropy_clf_log_val, color='green')
plt.legend(['Train', 'Validation'])
plt.ylabel("Classification X-entropy")
plt.savefig(fname=plots_store_dir+'/unsup_clf_training_log', dpi=400)
all_elbo_plot_log = min_max_scaling(all_elbo_plot_log)
plt.figure()
plt.plot(all_elbo_plot_log)
plt.savefig('all_elbo_plot_log')
for normalize_plot in [True, False]:
plot_training_history(train_scores, validation_scores, plots_store_dir, learning_rate_log,
NLL_log_train, NLL_log_val, KLD_log_train, KLD_log_val, ELBO_log_train,
ELBO_log_val, XENTROPY_log_train, XENTROPY_log_val, ACCURACY_log_train,
ACCURACY_log_val, args.validate_during_training, args.learning_type, NLL_log_train_epoch,
KLD_log_train_epoch, ELBO_log_train_epoch, XENTROPY_log_train_epoch,
ACCURACY_log_train_epoch, NLL_log_val_epoch, KLD_log_val_epoch, ELBO_log_val_epoch,
XENTROPY_log_val_epoch, ACCURACY_log_val_epoch, normalize_plot)
def test_model(args, model_store_dir, data_dir, dataset_split=''):
model = get_model_reference(args, model_store_dir, training_flag=False, load_from_checkpoint=True)
model.eval()
test_loader = get_data_loader(args, mode=dataset_split, data_dir=data_dir)
print("Evaluating on {} set: ".format(dataset_split))
# args, val_loader, model, log_liklihood_VT, epoch
total_avg_loss, all_accuracy, all_NLL, all_KLD, all_elbo, all_xentropy = test(args, model_store_dir, test_loader, model,
0, store_outputs=True)
print("Avg. NLL: ", np.mean(all_NLL))
print("Avg. KLD: ", np.mean(all_KLD))
print("Avg. ELBO: ", np.mean(all_elbo))
print("Avg. cross-entropy: ", np.mean(all_xentropy))
print("Avg. Accuracy on {} set: {:5f}".format(args.dataset_type, np.mean(all_accuracy)))
if __name__ == '__main__':
# -------------------------------- House-keeping ---------------------------------------------------------------
if args.penalty_type == 'vanilla':
args.beta = 1
if args.dataset_type == 'simulated':
data_store_dir = './data/simulated_dataset'
elif args.dataset_type == 'clinical':
data_store_dir = './data/clinical_dataset'
model_home_dir = args.learning_type + '_' + args.penalty_type + '_' + args.architecture_type
model_store_home = './model_store'
model_store_dir = model_store_home + '/' + args.model_name
# model_store_dir = model_store_home + '/' + args.learning_type+'_'+args.penalty_type+'_'+args.architecture_type.lower()
# plots_store_home = './plots_store'
# plots_store_dir = plots_store_home + '/' + args.model_name
plots_store_dir = model_store_dir + '/plots'
choice = int(input("Enter choice: 1] Train \t 2] Test \n"))
if not os.path.exists(model_store_home):
os.mkdir(model_store_home)
if not os.path.exists(model_store_dir):
os.mkdir(model_store_dir)
elif os.path.exists(model_store_dir):
model_store_dir_files = os.listdir(model_store_dir)
if args.model_name in model_store_dir_files and choice == 1:
new_model_name = args.model_name+'_'+str(np.random.randint(1, 100, 1)[0])
print("Model named {} already exists. Naming new model to be trained as {}.".format(args.model_name,
new_model_name))
model_name = new_model_name
#if not os.path.exists(plots_store_home):
# os.mkdir(plots_store_home)
if not os.path.exists(plots_store_dir):
os.mkdir(plots_store_dir)
# ------------------------------------------------------------------------------------------------------------
if choice == 1:
files_to_backup = ['./main.py', './trainingVAE.py', './cnn_model.py', './cnn_model_new1.py', './commonModels.py', './metrics_ecg.py', './trainingVAE_clinical.py', './utils.py']
for file_path in files_to_backup:
try:
if os.path.isfile(file_path) or os.path.isdir(file_path):
file_name = file_path.split('/')[-1]
backup_path = model_store_dir+'/'+file_name
print("Copying {} to {}".format(file_path, backup_path))
shutil.copy2(file_path, backup_path)
else:
print("could not backup file {}, File not found at mentioned path ...".format(file_path))
except Exception as e:
print("Exception occurred during creating backup code files... ")
print(e)
train_model(args, data_store_dir, plots_store_dir, model_store_dir)
elif choice == 2:
test_model(args, model_store_dir, data_store_dir, dataset_split='test')
plot_roc_multiclass(args, model_store_dir)
# test_loader = get_data_loader(args, mode='test', data_dir=data_store_dir)
# metric, marginal_entropies, cond_entropies = mutual_info_metric_shapes(model, test_loader, args.batch_size)
# print("metric: ", metric)
# print("marginal_entropies: ", marginal_entropies)
# print("cond_entropies: ", cond_entropies)
else:
print("Invalid choice choice ...")
| [
"[email protected]"
] | |
5323b41765e2d837b833cbcbff34ca4b7956d4b6 | 2a03b71e74d61163df4963b59680779a894157d7 | /src/GUI/objectDel.py | 39f90dcb15b8ce8f3846ce43a7ecc9f327058010 | [] | no_license | flammer222222/surgutGAS | 72ceb003ebe65296767d234924ea8844ba0ffe20 | 9f308b94e91b68f2c76ef4e4ee982e73f3611120 | refs/heads/main | 2023-06-17T18:47:07.206945 | 2021-07-22T10:48:31 | 2021-07-22T10:48:31 | 369,439,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,330 | py | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'g.ui'
##
## Created by: Qt User Interface Compiler version 5.15.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
class Ui_objectDelDialog(object):
def setupUi(self, objectDelDialog):
if not objectDelDialog.objectName():
objectDelDialog.setObjectName(u"objectDelDialog")
objectDelDialog.resize(400, 122)
self.verticalLayout_2 = QVBoxLayout(objectDelDialog)
self.verticalLayout_2.setObjectName(u"verticalLayout_2")
self.verticalLayout = QVBoxLayout()
self.verticalLayout.setObjectName(u"verticalLayout")
self.delLabel = QLabel(objectDelDialog)
self.delLabel.setObjectName(u"delLabel")
self.delLabel.setAlignment(Qt.AlignCenter)
self.verticalLayout.addWidget(self.delLabel)
self.okButton = QPushButton(objectDelDialog)
self.okButton.setObjectName(u"okButton")
self.verticalLayout.addWidget(self.okButton)
self.cancelButton = QPushButton(objectDelDialog)
self.cancelButton.setObjectName(u"cancelButton")
self.verticalLayout.addWidget(self.cancelButton)
self.verticalLayout_2.addLayout(self.verticalLayout)
self.retranslateUi(objectDelDialog)
QMetaObject.connectSlotsByName(objectDelDialog)
# setupUi
def retranslateUi(self, objectDelDialog):
objectDelDialog.setWindowTitle(QCoreApplication.translate("objectDelDialog", u"\u0423\u0434\u0430\u043b\u0438\u0442\u044c \u043e\u0431\u044a\u0435\u043a\u0442", None))
self.delLabel.setText(QCoreApplication.translate("objectDelDialog", u"\u0423\u0434\u0430\u043b\u0438\u0442\u044c \u043e\u0431\u044a\u0435\u043a\u0442?", None))
self.okButton.setText(QCoreApplication.translate("objectDelDialog", u"\u041e\u043a", None))
self.cancelButton.setText(QCoreApplication.translate("objectDelDialog", u"\u041e\u0442\u043c\u0435\u043d\u0430", None))
# retranslateUi
| [
"[email protected]"
] | |
3c504eebb43342f64f0935276470ec1c675b95fe | 79c87411cc51a92c6a802dc0c058468f6e9ed0bf | /aptfinder/scraper/settings.py | e00e467fa9ca087da0c0e1072770e87cf59695e9 | [
"MIT"
] | permissive | steinitzu/aptfinder | 34920b27cc05730925016a39adb27f0b897cbd00 | bea9353a825478ab924fc5ffe1e8e2e4e4a3955c | refs/heads/master | 2021-05-04T09:04:37.373874 | 2016-11-15T14:43:34 | 2016-11-15T14:43:34 | 69,709,417 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,190 | py | # -*- coding: utf-8 -*-
# Scrapy settings for aptscraper project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'aptfinder.scraper'
SPIDER_MODULES = ['aptfinder.scraper.spiders']
NEWSPIDER_MODULE = 'aptfinder.scraper.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'aptscraper (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'aptscraper.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'aptscraper.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'aptfinder.scraper.pipelines.SQLAlchemyPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 2
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"[email protected]"
] | |
9ad1dd7635bb00b203c18388b0dc6c26a88cfaae | cf8e81a3833f81565db7ebc23ca795ca769a0ecf | /task/migrations/0007_auto_20210210_2106.py | 0f3bfc0878cab62d58a01b663c6e2de5d30dc435 | [] | no_license | woozlic/EmailSender | 510f704aa5c00fa263eca87bb16fe29607d21ff7 | de2b12117f4c4410ffeeae6f8201a92edbdc2ade | refs/heads/master | 2023-03-02T00:59:31.229611 | 2021-02-14T17:10:51 | 2021-02-14T17:10:51 | 333,693,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | # Generated by Django 3.1.5 on 2021-02-10 16:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('task', '0006_auto_20210210_2023'),
]
operations = [
migrations.AlterField(
model_name='license',
name='date_end',
field=models.DateField(null=True, verbose_name='Дата окончания полномочий по управлению домом'),
),
migrations.AlterField(
model_name='license',
name='date_start',
field=models.DateField(null=True, verbose_name='Дата начала полномочий по управлению домом'),
),
migrations.AlterField(
model_name='license',
name='reason_to_exclude',
field=models.CharField(max_length=250, null=True, verbose_name='Основание исключения'),
),
]
| [
"[email protected]"
] | |
f1d138b08723bdd45adf1f3d7addcc9c4f943b75 | 99fd07f6db5b564285c6bc5463801b0c7f3ae6d5 | /50 Pow(x, n).py | e03e629babb3fc0f9ad1a62e716d9a40aeb45cc2 | [] | no_license | Celiacaoyanan/LeetCode | 0ad3da059b6cb74d405ff4b349ec82682e963530 | 8ddd236d90e753cc91e3c08d9ea540d31041291e | refs/heads/master | 2020-04-16T09:32:02.209018 | 2019-02-27T04:45:32 | 2019-02-27T04:45:32 | 165,467,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | class Solution(object):
def myPow(self, x, n):
"""
:type x: float
:type n: int
:rtype: float
"""
if n==0:
return 1
if n<0:
return 1/self.myPow(x,-n)
if n%2==0:
return self.myPow(x*x, n//2)
if n%2==1:
return x*self.myPow(x*x,n//2)
| [
"[email protected]"
] | |
8e41ae56d4e3027e611d8039cedae09c60b3e69b | 1fb9909751f875a86bcf49a3f5ac9eb106fe7e6b | /Main.py | 008e03a200fb7c92e4f2183caaee4712bbe893d7 | [
"MIT"
] | permissive | madevelascom/MineSweeper | 38beec598efcb5d546902abffe57645f6319fecd | 8e2ad816c216df78c52ac1f28b4603ff04a85fdc | refs/heads/master | 2021-05-29T11:21:30.129749 | 2015-10-24T06:25:35 | 2015-10-24T06:25:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,241 | py | ################################################################
# -*- coding: utf-8 -*-
# Title: Buscaminas
# Developed by: Madelyne Velasco
# SavedVariables: board, match
# Notes: Minesweeper game that supports different size games.
# It can save game on files and load them later.
# TODO: Support several languages
################################################################
__author__ = 'Administrator'
from numpy import zeros, shape
from random import *
import time
import pickle
################################################################
# Function: menu()
# Main menu
################################################################
def menu():
print('========================================')
print('========================================')
print('¡Bienvenidos a Buscaminas!')
print('Seleccione alguna de las opciones para continuar ')
print('1.- Nuevo Juego Inicial (8x8) ')
print('2.- Nuevo Juego Intermedio (16x16)')
print('3.- Nuevo Juego Avanzado (30x16)')
print('4.- Nuevo Juego Personalizado (Tamaño a escoger)')
print('5.- Cargar una partida Guardada')
print('6.- Acerca del juego')
print('0.- Salir')
print('=======================================')
get_option()
################################################################
# Function: get_option()
# Initialize each option of the menu. Calls several functions.
################################################################
def get_option():
INSTRUCTIONS = ("""
OBJETIVO
Encontrar los cuadrados vacíos evitando las minas. ¡Mientras más rápido, mejor!
EL TABLERO
Buscaminas tiene tres tableros predefinidos en varias dificultades:
[*]Principiante: 8x8 con 10 minas
[*]Intermedio: 16x16 con 40 minas
[*]Avanzado: 30x16 con 99 minas
También puedes crear un tablero personalizado de tamaño máximo 30x30 con hasta 841 minas.
CÓMO JUGAR
Las reglas son sencillas:
[*]Descubre una mina y el juego termina.
[*]Descubre un cuadrado vacío y el juego continía
[*]Descubre un número y te dará información sobre cuantas minas se encuentran escondidas en
los cuadrados circundantes.""")
GOODBYE = ("""
_
.-T | _
| | | / |
| | | / /`|
_ | | |/ / /
\`\| '.' / /
\ \`-. '--|
\ ' |
\ .` /
| |""")
option = input('Ingrese alguna de las opciones anteriores para continuar: ')
if option == "6":
option = input(INSTRUCTIONS)
while not option_is_valid(option):
option = input('Entrada incorrecta, escriba 6 para ayuda.Ingrese una opcion del menú válida: ')
if option == "6":
option = input(INSTRUCTIONS)
if option == "1":
x = 8
y = 8
mines = 10
board = create_board(x, y)
match = bomb_maping(x, y, mines)
play_game(board, match, x, y)
elif option == "2":
x = 16
y = 16
mines = 40
board = create_board(x, y)
match = bomb_maping(x, y, mines)
play_game(board, match, x, y)
elif option == "3":
x = 30
y = 16
mines = 99
board = create_board(x, y)
match = bomb_maping(x, y, mines)
play_game(board, match, x, y)
elif option == "4":
x = input('Ingrese el ancho de la cuadrilla (Máximo 30')
y = input('Ingrese el alto de la cuadrilla (Máximo 30')
mines = input ('Ingrese la cantida de minas. Máximo xy/2')
while not per_size_is_valid(x, y, mines):
print('Alguna de las opciones ingresadas no es válida')
x = input('Ingrese el ancho de la cuadrilla (Máximo 30')
y = input('Ingrese el alto de la cuadrilla (Máximo 30')
mines = input ('Ingrese la cantida de minas. Máximo xy/2')
x = int(x)
y = int(y)
mines = int(mines)
board = create_board(x, y)
match = bomb_maping(x, y, mines)
play_game(board, match, x, y)
elif option == "5":
[board,match] = load_game()
if board == [0, 0] or match == [0, 0]:
print('No hay una partida guardada con anterioridad. \n')
get_option()
else:
[x, y] = shape(board)
mines = 0
for i in range (len(match)-1):
for j in range (len(match[i])-1):
if match[i, j] == '-1':
mines += 1
play_game(board, match, x-1, y-1 )
else:
print (GOODBYE)
print ('Gracias por iniciar el juego. Lo esperamos una próxima ocasión.')
################################################################
# Function: option_is_valid(option_input)
# Determines TRUE or FALSE statement for get_option
################################################################
def option_is_valid(option_input):
try:
option_input = int(option_input)
if option_input >= 0 and option_input <=7:
return True
else:
return False
except:
return False
################################################################
# Function: per_size_is_valid(x_size, y_size, mines)
# Determines TRUE or FALSE statement for the custom game.
# Verifies if the given dimensions are between boundaries
################################################################
def per_size_is_valid(x_size, y_size, mines):
try:
x_size = int(x_size)
y_size = int(y_size)
mines = int(mines)
if x_size>0 and x_size <=30 and y_size>0 and y_size<=30 and mines<=x_size*y_size/2:
return True
else:
return False
except:
return False
################################################################
# Function: create_board(x_size, y_size)
# Creates visual board for the player. Size is given by chosen
# option
################################################################
def create_board(x_size, y_size):
board = zeros([x_size+2, y_size+2], dtype = str)
for i in range(1, len(board)-1):
for j in range(1, len(board[i])-1):
board[i,j] = ' '
for i in range(0, x_size+1):
board[i, 0] = i
board[i, y_size+1] = i
for j in range(0, y_size+1):
board [0, j] = j
board [x_size+1, j] = j
return board
################################################################
# Function: bomb_maping(x_size, y_size, mines)
# Creates hidden map of the mines and their surroundings. Size
# is given by chosen option.
################################################################
def bomb_maping(x_size, y_size, mines):
x_size += 2
y_size += 2
pox_mines = []
for i in range(mines):
row = randint(1, x_size-2)
col = randint(1, y_size-2)
new_mine = [row, col]
while new_mine in pox_mines:
row = randint(1, x_size-2)
col = randint(1, y_size-2)
new_mine = [row, col]
pox_mines.append(new_mine)
match_board = zeros((x_size, y_size), dtype = int)
for i in range(len(pox_mines)):
[row, col] = pox_mines[i]
match_board[row, col] = -1
for i in range(len(pox_mines)):
[row, col] = pox_mines[i]
SURROUNDING = ((row-1, col-1),(row-1, col), (row-1, col+1),
(row , col-1), (row , col+1),
(row+1, col-1),(row+1 , col),(row+1, col+1))
for (surr_row, surr_col) in SURROUNDING:
if(surr_row != 0 and surr_row != x_size-1 and surr_col != 0 and surr_col != y_size-1) \
and (match_board[surr_row, surr_col] != -1):
match_board[surr_row, surr_col] += 1
return match_board
################################################################
# Function: get_move(x, y):
# Receives string for the coords of unveiling cell. Range
# between given size of the game
################################################################
def get_move(x, y):
INSTRUCTIONS = ("""
Primero ingresa la fila, luego la columna separadas con un punto (.).
Para añadir una bandera, escribe \"f\" al final de las coordenadas (Ej: 5.4f donde sería la quinta
en la cuarta columna donde iría la bandera. Para salir escriba \"e\" y para guardar \"s\".
\n Ingrese las coordenadas de la celda: """)
global is_ended
is_ended = False
move = input('Ingrese las coordenadas de una celda. Escriba \"H"\ para ayuda: ')
if move == 'H' or move == 'h':
move = input(INSTRUCTIONS)
if move == 'S' or move == 's':
print('El juego ha sido guardado.')
save_game(board_display, mine_camp)
return (0, 0, '3')
if move == 'E' or move == 'e':
question = input('Presione Y para salir, N para continuar o S para salir y guardar: ')
while not end_is_valid(question):
question = input('Presione Y para salir, N para continuar o S para salir y guardar: ')
if question == 'Y' or question == 'y':
is_ended = True
return (0, 0, '2')
elif question == 'N' or question == 'n':
move = input('Ingrese las coordenadas de una celda. Escriba \"H"\ para ayuda: ')
elif question == 'S' or question == 's':
is_ended = True
save_game(board_display, mine_camp)
return (0, 0, '3')
while not move_is_valid(move, x, y):
move = input('Ingrese las coordenadas de una celda. Escriba \"H"\ para ayuda: ')
if move == 'H' or move == 'h':
move = input(INSTRUCTIONS)
if move == 'E' or move == 'e':
question = input('Presione Y para salir, N para continuar o S para salir y continuar: ')
while not end_is_valid(question):
question = input('Presione Y para salir, N para continuar o S para salir y guardar: ')
if question == 'Y' or question == 'y':
is_ended = True
move = ('1.1')
row = 1
col = 1
flag = 2
return (row, col, flag)
elif question == 'N' or question == 'n':
move = input('Ingrese las coordenadas de una celda. Escriba \"H"\ para ayuda: ')
elif question == 'S' or question == 's':
is_ended = True
move = ('1.1')
row = 1
col = 1
flag = 2
save_game(board_display, mine_camp)
return (row, col, flag)
if move == 'S' or move == 's':
save_game(board_display, mine_camp)
move = input('Ingrese las coordenadas de una celda. Escriba \"H"\ para ayuda: ')
if is_ended == False:
chain = len(move)
vec = list(move)
row = 0
col = 0
flag = 0
k = vec.index('.')
if vec[-1] == 'F' or vec[-1] == 'f':
chain -= 1
flag = 1
for i in range(k):
a = int(vec[i])
row += a*10**(k-i-1)
for i in range (k+1, chain):
a = int(vec[i])
col += a*10**(chain-i-1)
else:
flag = 2
return (row, col, flag)
################################################################
# Function: move_is_valid(move_input, x, y)
# Determines if the string gives valid coords or string.
################################################################
def move_is_valid(move_input, x, y):
chain = len(move_input)
vec = list(move_input)
if not ('.' in vec):
return False
else:
k = vec.index('.')
if vec[-1] == 'F' or vec[-1] == 'f':
chain -= 1
row = 0
col = 0
for i in range(k):
if vec[i].isdigit():
a = int(vec[i])
row += a*10**(k-i-1)
else:
return False
for i in range(k+1, chain):
if vec[i].isdigit():
a = int(vec[i])
col += a*10**(chain-i-1)
else:
return False
if row > 0 and row <=x and col > 0 and col <=y:
return True
else:
return False
################################################################
# Function: end_is_valid(end_input)
# Determines if the given input for ending the game is valid
################################################################
def end_is_valid(end_input):
if end_input == 'Y' or end_input == 'y':
return True
elif end_input == 'N' or end_input == 'n':
return True
elif end_input == 'S' or end_input == 's':
return True
else:
return False
################################################################
# Function: is_flagged(cell_content)
# Determines if an specific cell on the game board has a flag
################################################################
def is_flagged(cell_content):
if cell_content == "F":
return True
else:
return False
################################################################
# Function: is_visible(cell_content)
# Determines if an specific cell on the game board was discovered.
################################################################
def is_visible(cell_content):
if cell_content != " ":
return True
else:
return False
################################################################
# Function: is_mine(cell_content)
# Determines if an specific cell on the hidden map has a mine
################################################################
def is_mine(cell_content):
if cell_content == -1:
return True
else:
return False
################################################################
# Function: show(board_display, mine_camp, row, col)
# Discovers the content of the cell. Results may vary depending
# of flags, mines, etc.
################################################################
def show(board_display, mine_camp, row, col):
a = mine_camp[row,col]
b = board_display[row,col]
[x_size, y_size] = shape(board_display)
if is_visible(b) or is_flagged(b):
return
elif is_mine(a) and not is_flagged(b):
return
elif a > 0:
board_display[row,col] = mine_camp[row,col]
return board_display
elif a == 0:
board_display[row,col] = '0'
SURROUNDING = ((row-1, col-1),(row-1, col),(row-1, col+1),
(row , col-1), (row , col+1),
(row+1, col-1),(row+1, col),(row+1, col+1))
for(surr_row, surr_col) in SURROUNDING:
if(surr_row != 0 and surr_row != x_size and surr_col != 0 and surr_col != y_size):
show(board_display, mine_camp, surr_row, surr_col)
################################################################
# Function: save_game(board_display, mine_camp)
# Saves the current game to files. Creates two files and not txt
################################################################
def save_game(board_display, mine_camp):
file_display = 'last_game'
file_Mine = 'mine_camp'
display = open(file_display, 'wb')
mines = open(file_Mine, 'wb')
pickle.dump(board_display, display)
pickle.dump(mine_camp, mines)
display.close()
mines.close()
return
################################################################
# Function: load_game()
# Loads game from previously saved game.
################################################################
def load_game():
file_display = 'last_game'
file_Mine = 'mine_camp'
try:
obj1 = open(file_display, 'rb')
obj2 = open(file_Mine, 'rb')
display = pickle.load(obj1)
mines = pickle.load(obj2)
except:
mines = [0, 0]
display = [0, 0]
return (display, mines)
################################################################
# Function: game_is_solved(board_display, x_size, y_size)
# Determines if the game is solved and no mines exploded
################################################################
def game_is_solved(board_display, x_size, y_size):
for i in range(1, x_size):
for j in range(1, y_size):
a = board_display[i, j]
if is_visible(a) or is_flagged(a):
continue
elif a != 'X':
return False
else:
return False
return True
################################################################
# Function: play_game(board_display, mine_camp, mines, x, y):
# Initialize the game. Size varies by the chosen option
################################################################
def play_game(board, mine_c, x, y):
global is_playing
global is_ended
global board_display
global mine_camp
is_playing = True
is_ended = False
board_display = board
mine_camp = mine_c
counter = 0
start_time = time.clock()
while is_playing and not game_is_solved(board_display, x, y) and not is_ended:
counter += 1
print('\n Esta es la jugada No. ', counter)
print(board_display)
(x_pox, y_pox, flag) = get_move(x, y)
a = board_display[x_pox, y_pox]
b = mine_camp[x_pox, y_pox]
if flag == 1:
if not is_flagged(a) and not is_visible(a):
board_display[x_pox, y_pox] = 'F'
elif is_flagged(a) and is_visible(a):
board_display[x_pox, y_pox] = ' '
elif flag == 2 :
is_ended = True
break
else:
if is_visible(a) or is_flagged(a):
pass
elif is_mine(b):
is_playing = False
board_display[x_pox, y_pox] = 'X'
else:
show(board_display, mine_camp, x_pox, y_pox)
print (board_display)
end_time = time.clock()
total_time = int(end_time - start_time)
BOMB = ("""
\|/
.-*-
/
_|_
," ".
(\ / O O \ /)
\| _ |/
\ (_) /
_/.___,\_
(_/ \_)
""")
FROG = ("""
.-. .-.
( o )_( o )
__ / '-' '-' \ __
/ / "
| \ _____, / |
\ \`-._______.-'/ /
_.-` /\) (/\ `-._
(_ / / /.___.\ \ \ _)
(_.(_/ / (_ _) \ \_)._)
(_(_)_) (_(_)_)
""")
if game_is_solved(board_display,x, y) and not is_ended:
print(FROG)
print('Felicidades')
print('Tu tiempo de juego fue de ', total_time, ' segundos.')
elif not is_ended:
print(BOMB)
print('¡Ops! ¡Pisaste una mina!')
print('Tu tiempo de juego fue de ', total_time, ' segundos.')
else:
print('¡Gracias por jugar!. Te esperamos nuevamente. \n')
print('Tu tiempo de juego fue de ', total_time, ' segundos.')
nan = input('Presione ENTER para volver al menú principal \n')
menu()
#Initialize the game
menu()
| [
"[email protected]"
] | |
4aaea11d748bd58774a48e24347a85f4a811c11d | 3369d14cabd82bcb1c889f5650adb8e73a23d5fa | /fileManager/migrations/0001_initial.py | e397f401bb5cf57a3b2eed2ae5b7ac234a93ed5b | [] | no_license | ckyle93/SlugChat | f371aeb7ba4eb6e53b3ba4951949e810333e61b7 | 5ca92ca322476a6384049be261d1f8dd1668c18f | refs/heads/master | 2021-01-15T15:37:19.259980 | 2016-06-06T19:30:28 | 2016-06-06T19:30:28 | 55,637,444 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,588 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-06 09:11
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
],
),
migrations.CreateModel(
name='FileDB',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fileObj', models.FileField(upload_to=b'./static/uploads/')),
('fileName', models.CharField(default=b'', max_length=20)),
('className', models.CharField(default=b'', max_length=20)),
],
),
migrations.AddField(
model_name='comment',
name='file',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='fileManager.FileDB'),
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='home.User'),
),
]
| [
"[email protected]"
] | |
cf601c0ee15a68156018bb68a475e88866cae0dd | 52365d5de86c1ed716834b69b6f4f21ca486d8cb | /mainWin.py | 39f9c439f736e2a0b6e2675243d10223c9d31928 | [
"MIT"
] | permissive | ImTheSquid/Image-Duplicate-Detector | f03bf579cad0c0dfc5eb2a86aee35b1a2cc636b1 | de97b0b4003c6e92ca826c338e28dca04ca26e89 | refs/heads/master | 2020-08-22T00:52:29.655719 | 2019-11-29T17:04:38 | 2019-11-29T17:04:38 | 216,284,371 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,083 | py | import sys
from PyQt5.QtGui import QIcon, QPixmap, QImageReader
from PyQt5.QtWidgets import QApplication, QWidget, QTabWidget, QHBoxLayout
from albums.main import Albums
from date_sorter.main import DateSorter
from duplicate_finder.main import DuplicateFinder
class Runner(QWidget):
def __init__(self):
super().__init__()
self.init_gui()
def init_gui(self):
# Init the basic window frame
self.setWindowTitle('Jack\'s Photo Utilities v.2.2')
pix = QPixmap(QImageReader('assets/icon.png').read())
self.setWindowIcon(QIcon(pix))
layout = QHBoxLayout()
tabs = QTabWidget()
duplicate_detector = DuplicateFinder()
tabs.addTab(duplicate_detector, 'Duplicate Finder')
date_sorter = DateSorter()
tabs.addTab(date_sorter, 'Date Sorter')
albums = Albums()
tabs.addTab(albums, 'Albums')
layout.addWidget(tabs)
self.setLayout(layout)
self.show()
if __name__ == '__main__':
app = QApplication([])
win = Runner()
sys.exit(app.exec_())
| [
"[email protected]"
] | |
c6649a36faead5e54e1936c6f3778dc240ebcc15 | edd7d92bc969fc67cf9a453fb8765fc72c935841 | /DataManager/ibdtb_db.py | 1d7c0f6d9b4f46a4cad576a5b8d0a7ef078db217 | [] | no_license | ibd-zhe/tbcrawler | 31a924f4f51dbe789de9b8a885b019efe8c08e02 | a85f55331567c77ad511c5de1693d7b27b5a2704 | refs/heads/master | 2021-01-12T16:15:00.904735 | 2016-10-28T06:14:20 | 2016-10-28T06:14:20 | 71,960,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | import psycopg2
ibd_dbinfo = "dbname='ibdtbdata' user='ec2-user' host='54.223.65.44' password='8326022'"
class IbdDB:
def __init__(self):
self.dbinfo = ibd_dbinfo
self.con = psycopg2.connect(self.dbinfo)
self.cur = self.con.cursor()
def insert_item(self, item):
self.cur.execute(
"""INSERT INTO tb_item (itemid, shopid, sku_changed) VALUES (%s, 72076881, TRUE)""", (item['itemid'], )) | [
"[email protected]"
] | |
e675ecdd11aba2438113bae9622a34b521bfcb1d | 908f005cd9c6254a3c6a9bf2934f9321993f1891 | /w3schools/STOP PYTHON FUNCTIONS.py | 437ddf2de0668229574fd9e093f03c340723e470 | [] | no_license | AdrianoKim/ListaDeExerciciosPythonBrasilAdrianoKim | b08efc21fc349af19ca841ec05f335cbf93e0277 | dacb014d874af8ffb71d4b681e683acff58e6fa0 | refs/heads/master | 2023-04-30T05:24:45.616970 | 2021-05-18T17:58:16 | 2021-05-18T17:58:16 | 346,440,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,067 | py | # A function is a block of code which only runs when it is called.
# You can pass data, known as parameters, into a function.
# A function can return data as a result.
# Creating a Function
# In Python a function is defined using the def keyword:
# Example
def my_function():
print("Hello from a function")
# Calling a Function
# To call a function, use the function name followed by parenthesis:
# Example
def my_function():
print("Hello from a function")
my_function()
# Arguments
# Information can be passed into functions as arguments.
# Arguments are specified after the function name, inside the parentheses. You can add as many arguments as you want, just separate them with a comma.
# The following example has a function with one argument (fname). When the function is called, we pass along a first name, which is used inside the function to print the full name:
# Example
def my_function(fname):
print(fname + " Refsnes")
my_function("Emil")
my_function("Tobias")
my_function("Linus")
# Arguments are often shortened to args in Python documentations.
# ADVERTISEMENT
# Parameters or Arguments?
# The terms parameter and argument can be used for the same thing: information that are passed into a function.
# From a function's perspective:
# A parameter is the variable listed inside the parentheses in the function definition.
# An argument is the value that is sent to the function when it is called.
# Number of Arguments
# By default, a function must be called with the correct number of arguments. Meaning that if your function expects 2 arguments, you have to call the function with 2 arguments, not more, and not less.
# Example
# This function expects 2 arguments, and gets 2 arguments:
def my_function(fname, lname):
print(fname + " " + lname)
my_function("Emil", "Refsnes")
# If you try to call the function with 1 or 3 arguments, you will get an error:
# Example
# This function expects 2 arguments, but gets only 1:
def my_function(fname, lname):
print(fname + " " + lname)
# my_function("Emil") error because it have only 1 argument
# Arbitrary Arguments, *args
# If you do not know how many arguments that will be passed into your function, add a * before the parameter name in the function definition.
# This way the function will receive a tuple of arguments, and can access the items accordingly:
# Example
# If the number of arguments is unknown, add a * before the parameter name:
def my_function(*kids):
print("The youngest child is " + kids[2])
my_function("Emil", "Tobias", "Linus")
# Arbitrary Arguments are often shortened to *args in Python documentations.
# Keyword Arguments
# You can also send arguments with the key = value syntax.
# This way the order of the arguments does not matter.
# Example
def my_function(child3, child2, child1):
print("The youngest child is " + child3)
my_function(child1 = "Emil", child2 = "Tobias", child3 = "Linus")
# The phrase Keyword Arguments are often shortened to kwargs in Python documentations.
# Arbitrary Keyword Arguments, **kwargs
# If you do not know how many keyword arguments that will be passed into your function, add two asterisk: ** before the parameter name in the function definition.
# This way the function will receive a dictionary of arguments, and can access the items accordingly:
# Example
# If the number of keyword arguments is unknown, add a double ** before the parameter name:
def my_function(**kid):
print("His last name is " + kid["lname"])
my_function(fname = "Tobias", lname = "Refsnes")
# Arbitrary Kword Arguments are often shortened to **kwargs in Python documentations.
# Default Parameter Value
# The following example shows how to use a default parameter value.
# If we call the function without argument, it uses the default value:
# Example
def my_function(country = "Norway"):
print("I am from " + country)
my_function("Sweden")
my_function("India")
my_function()
my_function("Brazil")
# Passing a List as an Argument
# You can send any data types of argument to a function (string, number, list, dictionary etc.), and it will be treated as the same data type inside the function.
# E.g. if you send a List as an argument, it will still be a List when it reaches the function:
# Example
def my_function(food):
for x in food:
print(x)
fruits = ["apple", "banana", "cherry"]
my_function(fruits)
# Return Values
# To let a function return a value, use the return statement:
# Example
def my_function(x):
return 5 * x
print(my_function(3))
print(my_function(5))
print(my_function(9))
# The pass Statement
# function definitions cannot be empty, but if you for some reason have a function definition with no content, put in the pass statement to avoid getting an error.
# Example
def myfunction():
pass
# Recursion
# Python also accepts function recursion, which means a defined function can call itself.
# Recursion is a common mathematical and programming concept. It means that a function calls itself.
# This has the benefit of meaning that you can loop through data to reach a result.
# The developer should be very careful with recursion as it can be quite easy to slip into writing a
# function which never terminates, or one that uses excess amounts of memory or processor power. However, when written correctly recursion can be a very efficient and mathematically-elegant approach to programming.
# In this example, tri_recursion() is a function that we have defined to call itself ("recurse").
# We use the k variable as the data, which decrements (-1) every time we recurse. The recursion ends
# when the condition is not greater than 0 (i.e. when it is 0).
# To a new developer it can take some time to work out how exactly this works,
# best way to find out is by testing and modifying it.
# Example
# Recursion Example
def tri_recursion(k):
if(k > 0):
result = k + tri_recursion(k - 1)
print(result)
else:
result = 0
return result
print("\n\nRecursion Example Results")
tri_recursion(6)
| [
"[email protected]"
] | |
da61153658c1a1546d199a5ffc3fdae21b1a6eee | 6a95d5ae90dcf6533efbafd8d2262584f7edec2f | /eventsys/asgi.py | f4d3bfd62e339de481e6e0fa20044202d034e501 | [] | no_license | nischal7777/Event-Management-System | 2c255fdd2dc079726d4795094314d0417320c23e | aa94c21dea82313e1ca79aff381b99ca0ae583b9 | refs/heads/master | 2023-06-01T14:08:00.680481 | 2021-06-25T07:00:23 | 2021-06-25T07:00:23 | 380,153,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | """
ASGI config for showtracker project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'eventsys.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
aa988b8ce259daf5db64411098ac0f04dcfd25b1 | 1bedc95b9f3cd18d540f75920cbd9d1e70ce09be | /asset_management/migrations/0002_auto_20210318_2141.py | b2ae986c7a13de1182622c20d999edb37520db40 | [] | no_license | kchikweshe/mysite | 4d0538515695948212835e7dffc950295e90f4fc | 7e6e75a2ceb6d0458bb5913789da272774be3ea5 | refs/heads/main | 2023-04-16T13:50:37.590142 | 2021-04-02T15:51:28 | 2021-04-02T15:51:28 | 349,225,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | # Generated by Django 3.1.5 on 2021-03-18 21:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('asset_management', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='employee',
name='address',
field=models.OneToOneField(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='asset_management.address'),
),
migrations.AlterField(
model_name='employee',
name='user',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
] | |
15cc026caf92e686cd434e555486d7c738f54164 | bd2ddf9e3ed52bdc3e2f21a6d84efc05019cc6b0 | /Python/chapter2/2-6/while-simple.py | 1e243ce202db70161fe52b11ce142fc3044dba58 | [] | no_license | YosukeKira/main | c59bf564e8414ed2c33c914d4df8fb8ff4906c85 | f4331a18b2b7fc3f987dc8409e73485d2915e550 | refs/heads/master | 2023-01-09T19:16:57.641270 | 2020-11-09T03:56:23 | 2020-11-09T03:56:23 | 274,428,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | energy = 3
while energy > 0:
print("+ 走る")
print("| energy=" , energy)
energy -= 1
| [
"[email protected]"
] | |
9c1daed847874624035b1c28f070d58f2460cf58 | 4a9c183aa131ccf6d06489f9ab217e7930c2fa04 | /Tarea Phyton/base.py | cd47c2f9810ee4931d1dfb63cb7f611376c9c894 | [] | no_license | compilador1980/tarea_phyton | 0a5ccdd87c0c1ed6c70394a0e155c272e1f5eea6 | 33d9acd04a3b9f9ec5f450c16b239e1b9a87ef09 | refs/heads/master | 2020-05-21T11:39:50.814671 | 2013-06-04T15:20:57 | 2013-06-04T15:20:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | from Elementos.funciones import *
print suma(5,5)
| [
"[email protected]"
] | |
513f57e248f69bb6f41548255f8ac3f79b75b5c9 | 361ea993eabd156d08de7bba73590634a405799f | /worksheets/tasks/problem1.py | e2d01e1a8ff5352d71a6bc36753ea5b5d523c0ec | [] | no_license | mucahidyazar/python | 0237a974e3082ef44c5b596001dede5b0a08be57 | a3226b8d438f0f775b3150dc3ad1d35cc58bea15 | refs/heads/master | 2022-12-06T16:52:05.795931 | 2020-09-02T16:51:28 | 2020-09-02T16:51:28 | 289,234,707 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | # Problem 1
# Kullanıcıdan aldığınız 3 tane sayıyı çarparak ekrana yazdırın. Ekrana yazdırma işlemini format metoduyla yapmaya çalışın.
x = int(input('x = '))
y = int(input('y = '))
z = int(input('z = '))
print("x+y+z'nin toplami = ", x+y+z) | [
"[email protected]"
] | |
805f1d32454909b54dac3af8aaf26dbb5b664b24 | c3b7ec12befaefc91bcba7f73a10c885969ef39c | /africastalking/Service.py | 65d3ddb4e7c58a21542f3fa01b3b680e0bfbdd5f | [
"MIT"
] | permissive | Dickens-odera/africastalking-python | 02db5fa8b90be3becad335bee547d677f1feb147 | a4e43a89d879bd2892e0efed7f3299e9eb87065c | refs/heads/master | 2020-03-30T07:06:32.500469 | 2018-07-17T13:23:32 | 2018-07-17T13:23:32 | 150,915,743 | 1 | 0 | MIT | 2018-09-30T01:02:06 | 2018-09-30T01:02:06 | null | UTF-8 | Python | false | false | 4,120 | py | import re
import threading
import requests
def validate_amount(amount_str):
try:
parts = amount_str.split(' ')
return len(parts[0]) == 3 and float(parts[1])
except ValueError:
return False
def validate_phone(phone_str):
try:
return re.match('^\+\d{1,3}\d{3,}$', phone_str)
except ValueError:
return False
class AfricasTalkingException(Exception):
pass
class Service(object):
def __init__(self, username, api_key):
if type(username) is not str:
raise RuntimeError('username has to be of type str.')
if type(api_key) is not str:
raise RuntimeError('api_key has to be of type str.')
self._PRODUCTION_DOMAIN = 'africastalking.com'
self._SANDBOX_DOMAIN = 'sandbox.africastalking.com'
self._username = username
self._api_key = api_key
self._headers = {
'Accept': 'application/json',
'User-Agent': 'africastalking-python/2.0.0',
'ApiKey': self._api_key
}
self._baseUrl = 'https://api.' + self._PRODUCTION_DOMAIN
self._init_service()
def _is_sandbox(self):
return self._username == 'sandbox'
def _make_url(self, path):
return self._baseUrl + path
def _init_service(self):
raise NotImplementedError
@staticmethod
def __make_get_request(url, headers, data, params, callback=None):
res = requests.get(
url=url,
headers=headers,
params=params,
data=data
)
if callback is None or callback == {}:
return res
else:
callback(res)
@staticmethod
def __make_post_request(url, headers, data, params, callback=None):
res = requests.post(
url=url,
headers=headers,
params=params,
data=data,
)
if callback is None or callback == {}:
return res
else:
callback(res)
def _make_request(self, url, method, headers, data, params, callback=None):
method = method.upper()
if callback is None:
if method == 'GET':
res = self.__make_get_request(url=url, headers=headers, data=data, params=params)
elif method == 'POST':
res = self.__make_post_request(url=url, headers=headers, data=data, params=params)
else:
raise AfricasTalkingException('Unexpected HTTP method: ' + method)
if 200 <= res.status_code < 300:
if res.headers.get('content-type') == 'application/json':
return res.json()
else:
return res.text
else:
raise AfricasTalkingException(res.text)
elif not callable(callback):
raise RuntimeError('callback has to be callable. e.g. a function')
else:
def cb(response):
if 200 <= response.status_code < 300:
if response.headers.get('content-type') == 'application/json':
callback(None, response.json())
else:
callback(None, response.text)
else:
callback(AfricasTalkingException(response.text), None)
if method == 'GET':
_target = self.__make_get_request
elif method == 'POST':
_target = self.__make_post_request
else:
raise AfricasTalkingException('Unexpected HTTP method: ' + method)
thread = threading.Thread(target=_target, args=(url, headers, data, params, cb))
thread.start()
return thread
class APIService(Service):
def __init__(self, username, api_key):
super(APIService, self).__init__(username, api_key)
def _init_service(self):
self._baseUrl = 'https://api.'
if self._is_sandbox():
self._baseUrl += self._SANDBOX_DOMAIN
else:
self._baseUrl += self._PRODUCTION_DOMAIN
| [
"[email protected]"
] | |
3dc6d25443509139669e6dbf8c8c93942bb0ab79 | d94aee0c3f625f2897e96d8bdc6c415085b8b28a | /zquery/zquery.py | 4de8c4ded7f358d1ca845bf973bf6cd70b5c4544 | [
"Apache-2.0"
] | permissive | scalershare/zquery | 947901ab8cb7f0861f8f0d1e9e8b61260a7062b0 | 8c27afe25c0e5282b39f678d18521ee7c3d06cef | refs/heads/master | 2021-01-16T18:38:06.734682 | 2016-11-12T14:05:04 | 2016-11-12T14:05:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,312 | py | """query via command-line.
Usage:
zquery [(-q|-r|-a)] [--depth=<co>] <user>
zquery question <url>
zquery answer <url>
zquery column <url>
zquery post <url>
zquery collection <url>
Options:
-q 提问
-r 回答
-a 文章
--depth=<co> 显示的条数
Example:
zquery -a --depth=15 excited-vczh
zquery excited-vczh
zquery post https://zhuanlan.zhihu.com/p/19780644
"""
from docopt import docopt
from .queryPprint import *
def cli():
"""command-line interface"""
args = docopt(__doc__)
if args["-q"] == True:
pprint_user_ask(args["<user>"], int(args["--depth"]))
elif args["-r"] == True:
pprint_user_answer(args["<user>"], int(args["--depth"]))
elif args["-a"] == True:
pprint_user_article(args["<user>"], int(args["--depth"]))
elif args["post"] == True:
pprint_post(args['<url>'])
elif args["question"] == True:
pprint_question(args['<url>'])
elif args["column"] == True:
pprint_column(args['<url>'])
elif args["answer"] == True:
pprint_answer(args['<url>'])
elif args["collection"] == True:
pprint_collection(args['<url>'])
else:
pprint_user_base(args['<user>'])
if __name__ == '__main__':
cli()
| [
"[email protected]"
] | |
7646204379447b56955aa6b303fab717d7518a76 | 8c16f31749e45bc3ade4488e1d8cdd4dae38ee84 | /musicdb/songs/migrations/0004_auto_20201212_0545.py | 75ff2e7aec44c04f8e04c48ce4c2dca643c3d6c9 | [] | no_license | kcinesser/musicdb_rest_api | d4608ef726fc873440940b2a34a8c47b8cf472b3 | da1de4fe91452562cb8b68674d8668c1e994af96 | refs/heads/master | 2023-01-31T19:07:03.907157 | 2020-12-17T01:53:23 | 2020-12-17T01:53:23 | 322,148,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | # Generated by Django 3.1.4 on 2020-12-12 05:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('songs', '0003_auto_20201212_0535'),
]
operations = [
migrations.RenameField(
model_name='song',
old_name='difficult',
new_name='difficulty',
),
]
| [
"[email protected]"
] | |
f7edda16292b591d72e168d21f307677e7e9cd64 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/compute/azure-mgmt-vmwarecloudsimple/generated_samples/list_rg_virtual_machines.py | 3350eb2ce4267329d7b095ad8ffd006d48a546b2 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,585 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.vmwarecloudsimple import VMwareCloudSimple
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-vmwarecloudsimple
# USAGE
python list_rg_virtual_machines.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = VMwareCloudSimple(
credential=DefaultAzureCredential(),
subscription_id="{subscription-id}",
)
response = client.virtual_machines.list_by_resource_group(
resource_group_name="myResourceGroup",
)
for item in response:
print(item)
# x-ms-original-file: specification/vmwarecloudsimple/resource-manager/Microsoft.VMwareCloudSimple/stable/2019-04-01/examples/ListRGVirtualMachines.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
7e6593ea570407037b2fff5467b21a19f35f5438 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2396/60686/288174.py | 2d7453bac84ec6803ea496e9177bc52bf1c47762 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | nums = int(input())
list_input = input().split(" ")
for i in range(nums):
list_input[i] = int(list_input[i])
res = []
for i in range(nums):
index = list_input.index(min(list_input[i:len(list_input)]))
list_temp = list_input[i:index + 1]
list_temp = list_temp[::-1]
for j in range(0, i):
list_input[j] = list_input[j]
for j in range(i, len(list_temp) + i):
list_input[j] = list_temp[j - i]
res.append(index + 1)
for i in range(len(res)):
print(res[i], end=" ")
| [
"[email protected]"
] | |
8b924c4c2f536c2a4bd0c98da670d5f52b10d4be | 9dd54195eb65db228c3e3e7afad3f3031718262c | /cryptanalyzer/cryptanalyzer/wsgi.py | bdd565cda73ae64afff9d8a54561bbde519bfe9f | [
"ISC"
] | permissive | curioswati/crypt-analyzer | ffc22004d6424623922243d106263e2eb0558049 | 2ae73f58afdeaa6d01410b77bb995f2202eec2d9 | refs/heads/master | 2021-07-21T09:19:59.832497 | 2021-02-24T12:35:55 | 2021-02-24T12:35:55 | 98,267,603 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | """
WSGI config for cryptanalyzer project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cryptanalyzer.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| [
"[email protected]"
] | |
2d4c9254e4b6499f0dca587712dccea2c14e09bb | 39d79f3c6836e8d8db8f6a2350e9ae418fdeb8bb | /RecetarioApp/RecipesApp/migrations/0001_initial.py | 34b14d958f1f4e793949a7088cae47e286c5fa86 | [] | no_license | ValentinaAlvarezG/eomma-recipes | e43ec371f5201cf4821a3cc765d6c8e81f83858a | 45a6ebaca2487a2d11c8f2ec2ca46e7083ddbc2a | refs/heads/main | 2023-02-02T07:11:00.172602 | 2020-12-20T18:06:16 | 2020-12-20T18:06:16 | 323,133,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,233 | py | # Generated by Django 3.1.2 on 2020-12-19 14:53
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('price_per_unit', models.FloatField()),
],
),
migrations.CreateModel(
name='MeasurementUnit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('portions', models.FloatField()),
],
),
migrations.CreateModel(
name='RecipeIngredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('qty', models.FloatField()),
('measurement', models.CharField(max_length=30)),
('ingredient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='RecipesApp.ingredient')),
('recipe', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='RecipesApp.recipe')),
],
),
migrations.AddField(
model_name='recipe',
name='ingredients',
field=models.ManyToManyField(through='RecipesApp.RecipeIngredient', to='RecipesApp.Ingredient'),
),
migrations.AddField(
model_name='recipe',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='ingredient',
name='measurement_unit',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='RecipesApp.measurementunit'),
),
migrations.AddField(
model_name='ingredient',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='Equivalence',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('measurement', models.CharField(max_length=100)),
('equivalence', models.FloatField()),
('measurement_unit', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='RecipesApp.measurementunit')),
],
),
]
| [
"[email protected]"
] | |
1c612e0688255c02261a66a580a34cbb8785ef5e | add74ecbd87c711f1e10898f87ffd31bb39cc5d6 | /xcp2k/classes/_molecular_dipoles12.py | c946740f9db569853440eec47ddb4d64ed167222 | [] | no_license | superstar54/xcp2k | 82071e29613ccf58fc14e684154bb9392d00458b | e8afae2ccb4b777ddd3731fe99f451b56d416a83 | refs/heads/master | 2021-11-11T21:17:30.292500 | 2021-11-06T06:31:20 | 2021-11-06T06:31:20 | 62,589,715 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,397 | py | from xcp2k.inputsection import InputSection
from xcp2k.classes._each490 import _each490
class _molecular_dipoles12(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Add_last = None
self.Common_iteration_levels = None
self.Filename = None
self.Log_print_key = None
self.Periodic = None
self.Reference = None
self.Reference_point = None
self.EACH = _each490()
self._name = "MOLECULAR_DIPOLES"
self._keywords = {'Add_last': 'ADD_LAST', 'Common_iteration_levels': 'COMMON_ITERATION_LEVELS', 'Filename': 'FILENAME', 'Log_print_key': 'LOG_PRINT_KEY', 'Periodic': 'PERIODIC', 'Reference': 'REFERENCE', 'Reference_point': 'REFERENCE_POINT'}
self._subsections = {'EACH': 'EACH'}
self._aliases = {'Ref': 'Reference', 'Ref_point': 'Reference_point'}
self._attributes = ['Section_parameters']
@property
def Ref(self):
"""
See documentation for Reference
"""
return self.Reference
@property
def Ref_point(self):
"""
See documentation for Reference_point
"""
return self.Reference_point
@Ref.setter
def Ref(self, value):
self.Reference = value
@Ref_point.setter
def Ref_point(self, value):
self.Reference_point = value
| [
"[email protected]"
] | |
246a7f0a4d97f481a539bf00d26b59aad288ed45 | bfed97badbe18122b95ca078b6a0275450f4c709 | /mysite/settings.py | b06e6258e2820abfdf66ca65448100a7acc01516 | [] | no_license | shimogapradeep/djangogirls-tutorial | 4ed54a00d9569e36b795c2b9ae325ad5554cd724 | 471e536da7b19caeff10a30eb18167007b24e3b6 | refs/heads/master | 2020-09-10T14:18:25.028378 | 2019-11-14T14:05:27 | 2019-11-14T14:05:27 | 221,713,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,207 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'x9=*o!fl_7)pe=ijdm%5xv!!6*uyyyc327bw$5ou3tk)2r*sk-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/London'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"[email protected]"
] | |
c5f1a956ced0101d5d8e487eb64c882b5e31f804 | 962126ab3f110e9dc1753e09d2c23bcad5690069 | /src/cosmos_to_caaj/src/main.py | dfd465d6b70ad6628648b9a2649c7ad9762be7a2 | [] | no_license | shuremura/defiscan | 5d592402b80e5b2c33d7628d081a611037f9bee7 | e955cfe6e314690ab29df3164a1e99988e01aa8f | refs/heads/master | 2023-08-28T12:30:38.692797 | 2021-10-11T15:05:50 | 2021-10-11T15:05:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,952 | py | import requests
import time
import sys
import logging
from caaj_plugin.caaj_plugin import *
from cosmos_to_caaj.cosmos_plugin import *
from cosmos_to_caaj.transaction import *
from decimal import *
from datetime import datetime as dt
import pandas as pd
import os
logger = logging.getLogger(name=__name__)
logger.addHandler(logging.NullHandler())
getcontext().prec = 50
def set_root_logger():
root_logger = logging.getLogger(name=None)
root_logger.setLevel(logging.INFO)
if not root_logger.hasHandlers():
fmt = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s", "%Y-%m-%dT%H:%M:%S")
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(fmt)
root_logger.addHandler(stream_handler)
def output_caaj(caajs, address):
df = pd.DataFrame(caajs)
df = df.sort_values('time')
result_file_name = f'{os.path.dirname(__file__)}/../output/cosmos_caaj_{address}.csv'
df.to_csv(result_file_name, index=False, columns=['time', 'platform', 'transaction_id', 'debit_title', 'debit_amount', 'debit_from', 'debit_to', 'credit_title', 'credit_amount', 'credit_from', 'credit_to', 'comment'])
def main():
cosmos = CosmosPlugin()
caajs = []
print('start cosmos_to_caaj')
address = sys.argv[1]
num_transactions = 50
last_id = 0
while num_transactions >= 50:
time.sleep(5)
response = requests.get(
'https://api.cosmostation.io/v1/account/new_txs/%s' % address,
params={'from': last_id, 'limit': 50})
transactions = response.json()
num_transactions = len(transactions)
for transaction in transactions:
last_id = transaction['header']['id']
if cosmos.can_handle(transaction) == False:
raise ValueError('not cosmos transaction')
caajs.extend(cosmos.get_caajs(transaction, address))
output_caaj(caajs, address)
if __name__== '__main__':
set_root_logger()
main()
| [
"[email protected]"
] | |
52870074aa71e5403e15d8242cf9e372287fba8d | 4f8e24a29f9e89d47b389ef4aa912789aec00419 | /app/views/search.py | 34fc0565cb30ee7601ecffb7e37f30202ad18d93 | [] | no_license | rahulrathod07/task | e770948c394b081632b31801e13a8781a2ea867c | 30f1575a3363ce07755d2f0ff8a92c3af7963e6c | refs/heads/master | 2023-04-01T21:42:56.124081 | 2021-04-04T16:35:41 | 2021-04-04T16:35:41 | 343,816,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,295 | py | from app import app
from app.auth import token_required, admin_required
from app import db
from app.models import Movies
from flask import jsonify, request
# Search movies
@app.route('/search', methods=['GET'])
@token_required
def search_movie(current_user):
filters = []
name = request.args.get('name')
if name:
criteria = '%{}%'.format(name)
filters.append(Movies.name.like(criteria))
director = request.args.get('director')
if director:
criteria = '%{}%'.format(director)
filters.append(Movies.director.like(criteria))
imdb_score = request.args.get('imdb_score')
if imdb_score:
filters.append(Movies.popularity >= imdb_score)
popularity = request.args.get('99popularity')
if popularity:
filters.append(Movies.popularity >= popularity)
genre = request.args.get('genre')
if genre:
sanitized_genres = [x.lower().strip() for x in genre.split(',')]
for x in sanitized_genres:
filters.append(Movies.genres.any(name=x))
movies = Movies.query.filter(*filters).all()
if not movies:
return jsonify({'message': 'No movies found.', 'movies': movies})
return jsonify({'message': 'Movies filtered successfully.', 'movies': [movie.serialized for movie in movies]})
| [
"rrahulrathod07.com"
] | rrahulrathod07.com |
05cbf10f9f57b4968c60c2c9250f317ccae6996c | 2eb0f5cfbcbac633c7f052fc1c971c7bd0436779 | /查找/二分查找.py | c984a0a2df12dc1bdf3c24403cc533faff716d5e | [] | no_license | yukkiball/Python-Algorithm | 209781bddac91df6633e0211bed0a7c5a9ee9643 | 2d8fb53074d67d0d78d4e253cafeca1eaf0d4e88 | refs/heads/master | 2020-05-25T11:02:27.083976 | 2019-05-23T12:16:18 | 2019-05-23T12:16:18 | 187,771,510 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | def search_rec(list, x, lo, hi):
"""递归版二分查找返回小于等于该元素的最大秩"""
if lo > hi:
return lo - 1
mid = (lo + hi) // 2
# if list[mid] > x:
# return search_rec(list, x, lo, mid - 1)
# elif list[mid] < x:
# return search_rec(list, x, mid + 1, hi)
# else:
# return mid
if list[mid] > x:
return search_rec(list, x, lo, mid - 1)
else:
return search_rec(list, x, mid + 1, hi)
def search_iter(list, x, lo, hi):
"""二分查找迭代版返回小于等于该元素的最大秩"""
while lo <= hi:
mid = (lo + hi) >> 1
# if list[mid] < x:
# lo = mid + 1
# elif list[mid] > x:
# hi = mid - 1
# else:
# return mid
if list[mid] > x:
hi = mid - 1
else:
lo = mid + 1
return lo - 1
#测试
l1 = [1, 3, 4, 5, 6, 7]
print(search_rec(l1, 7, 0, len(l1) - 1))
print(search_iter(l1, 3, 0, len(l1) - 1))
| [
"[email protected]"
] | |
27ef5b55f149267aaed782727e3bcb637e9e1835 | e6372d9ef1eb7c328a19e7119ef97dbc73916477 | /.configs/sway/status.py | b759f0c9f802d86b5139fa9d2441c8144e59a387 | [] | no_license | DOCgould/ChristiansConfigs | 02be8779fc71d60d039ee47aa5c9b96294b326f1 | 3cc279514a499307902e0026b0fc03a50402cac6 | refs/heads/master | 2022-09-15T06:01:51.093997 | 2022-09-05T23:03:30 | 2022-09-05T23:03:30 | 181,237,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,101 | py | #!/usr/bin/env python3
from datetime import datetime
from psutil import disk_usage, sensors_battery
from psutil._common import bytes2human
from socket import gethostname, gethostbyname
from subprocess import check_output
from sys import stdout
from time import sleep
def write(data):
stdout.write("%s\n" % data)
stdout.flush()
def refresh():
disk = bytes2human(disk_usage("/").free)
ip = gethostbyname(gethostname())
try:
ssid = check_output("iwgetid -r", shell=True).strip().decode("utf-8")
ssid = "(%s)" % ssid
except Exception:
ssid = "None"
bluetooth = check_output(
"systemctl status bluetooth | grep Status",
shell=True).strip().decode("utf-8")
battery = int(sensors_battery().percent)
status = "Charging" if sensors_battery().power_plugged else "Discharging"
date = datetime.now().strftime("%h %d %A %I:%M")
format = "Space: %s | Internet: %s %s | Bluetooth %s | Battery: %s%% %s | Date: %s"
write(format % (disk, ip, ssid, bluetooth, battery, status, date))
if __name__ == "__main__":
refresh()
| [
"[email protected]"
] | |
02b2836472461122ca19ee0c6866835e1f5e0dba | 8131f0053b4a38e4c4769e7e143d51369b23cc95 | /scripts/analyse_dataset.py | 6e72eb45e5190e0663a99f96f8c6760d1160c504 | [] | no_license | yungsalami/linuxtest | 9af756038777c99a505b70f0f66adaa98cc32438 | 0d7442bd78f9899536a109e87a4c4639ade82a58 | refs/heads/master | 2023-06-07T23:42:39.301111 | 2021-07-03T08:06:14 | 2021-07-03T08:06:14 | 379,878,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,342 | py | import click
from pathlib import Path
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
import pandas as pd
from project_a5.io import read_particles
import pickle
def reconstruct_direction(features):
# This is a dummy reconstruction.
# Do not change.
return (
features["direction"]
+ np.random.default_rng(1337).normal(size=len(features)) * 0.2
)
def reconstruct_energy(features, path):
# Do not change.
with open(path, "rb") as f:
model = pickle.load(f)
y = model.predict(features)
return y
def reconstruct_particle_id(features):
# We skip particle ID reconstruction for now.
# Do not change.
return np.nan
@click.command()
@click.argument("group-name", type=str)
@click.argument("input-directory", type=click.Path(exists=True, dir_okay=True))
@click.option(
"--output-directory",
"-d",
default=None,
type=click.Path(file_okay=False, dir_okay=True),
)
@click.option("--direction", is_flag=True, default=False)
@click.option("--energy", is_flag=True, default=False)
@click.option("--particle-id", is_flag=True, default=False)
def main(
group_name,
input_directory,
output_directory,
direction,
energy,
particle_id,
):
# set up input/output directories and paths
if output_directory is not None:
output_directory = Path(output_directory) / group_name
output_directory.mkdir(parents=True, exist_ok=True)
input_directory = Path(input_directory) / group_name
energy_regressor_path = input_directory / "energy_regressor.pkl"
output_path = output_directory / "dataset.h5"
# load data
features = pd.read_hdf(input_directory / "features.h5", "events")
particles = read_particles(input_directory / "events.h5")
particles = particles.set_index("event_id")
features.dropna(inplace=True)
particles = particles.loc[features.index.intersection(particles.index)]
if direction:
# For now, use a dummy reconstruction.
print("Reconstructing Direction")
reco_direction = reconstruct_direction(particles)
if energy:
print("Reconstructing Energy")
reco_energy = reconstruct_energy(features, energy_regressor_path)
else:
reco_energy = np.nan
if particle_id:
print("Reconstructing Particle ID")
reco_particle_id = reconstruct_particle_id(features)
else:
reco_particle_id = np.nan
features["reco_direction"] = reco_direction
features["reco_energy"] = reco_energy
features["reco_particle_id"] = reco_particle_id
data = features.join(particles, rsuffix="r", how="inner")
data.to_hdf(output_path, "events")
# ===
# Overview Plots
# ===
print("Plotting")
fig = plt.figure(constrained_layout=True)
gs = plt.GridSpec(2, 2, figure=fig)
axes = [
fig.add_subplot(gs[0, 0]),
fig.add_subplot(gs[0, 1]),
fig.add_subplot(gs[1, 0]),
fig.add_subplot(gs[1, 1]),
]
ax = axes[0]
*_, im = ax.hist2d(
data["direction"], data["reco_direction"], bins=50, norm=LogNorm()
)
ax.set_xlabel("Direction")
ax.set_ylabel("Reconstructed Direction")
fig.colorbar(im, ax=ax, label="Events")
ax = axes[1]
if energy:
*_, im = ax.hist2d(
np.log10(data["energy"]),
np.log10(data["reco_energy"]),
bins=50,
norm=LogNorm(),
)
ax.set_xlabel("Energy")
ax.set_ylabel("Reconstructed Energy")
fig.colorbar(im, ax=ax, label="Events")
else:
ax.text(0.1, 0.5, "No Energy Reconstruction")
ax.axis("off")
ax = axes[2]
if particle_id:
*_, im = ax.hist2d(
data["particle_id"], data["reco_particle_id"], bins=50, norm=LogNorm()
)
ax.set_xlabel("Particle Id")
ax.set_ylabel("Reconstructed Particle Id")
fig.colorbar(im, ax=ax, label="Events")
else:
ax.text(0.1, 0.5, "No Particle ID Reconstruction")
ax.axis("off")
ax = axes[3]
ax.axis("off")
# ---
# Save or Show Plot
# ---
if output_directory is None:
plt.show()
else:
fig.savefig(output_directory / Path(__file__).with_suffix(".pdf").name)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
811b79b6db9f4dacb0b1d77a35e3f002726487bd | 2cf9dc082fc1d61154d31c7e234e75c47ac85262 | /stayfresh/stayfresh/settings.py | b3d419d511aadcd4592d799bbbe5c056f3fb8af6 | [] | no_license | ianthpun/sfproject | dfdc86564effcca2fdf797a66d08853f536b223e | a44274b1a3de5a4e4668233852fb9f73044ee364 | refs/heads/master | 2021-04-06T20:47:18.939484 | 2018-03-14T20:31:21 | 2018-03-14T20:31:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,356 | py | """
Django settings for stayfresh project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0a4rkquc+2nm$*)z$+8-^(bc!pm+a9t*m0fue!(ez2pzry#ez*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
CRISPY_TEMPLATE_PACK ='bootstrap3'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'sfapp',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'stayfresh.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'stayfresh.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL ='login'
LOGIN_URL = 'login/' | [
"[email protected]"
] | |
9de897c9dcbfa33cbc463cd5adaa0f6fe4e75f18 | 195f6b6c52a27b307bc1b35d35f851bf06b45e1d | /papers/TSE_SI_2020/stage_1.py | 5c6ab250ff91d88f5926c8dc55dbb3d8af4ce0b0 | [] | no_license | SoapClancy/Wind | 2f3bf4648584ec0c5ff48fe255b1e9d6c1b9798a | 33bf7c91c3cc6e075026356d51d5a7c8de2de3df | refs/heads/master | 2023-06-12T01:35:21.736828 | 2021-07-04T11:56:10 | 2021-07-04T11:56:10 | 274,250,027 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,164 | py | import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from Ploting.fast_plot_Func import *
from project_utils import *
from prepare_datasets import WF_RATED_POUT_MAPPER, NUMBER_OF_WT_MAPPER, CLUSTER_TO_WF_MAPPER, WF_TO_CLUSTER_MAPPER, \
Croatia_WF_LOCATION_MAPPER, AVAILABLE_WF_NAMES
import numpy as np
import datetime
from WT_WF_Class import WF
from File_Management.path_and_file_management_Func import *
from File_Management.load_save_Func import *
from Regression_Analysis.DataSet_Class import DeepLearningDataSet
from Filtering.OutlierAnalyser_Class import DataCategoryData
import copy
import pandas as pd
from typing import Callable
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow import keras
from Regression_Analysis.DeepLearning_Class import BayesianConv1DBiLSTM
from prepare_datasets import load_raw_wt_from_txt_file_and_temperature_from_csv
from Ploting.fast_plot_Func import *
import re
from papers.TSE_SI_2020.utils import preds_continuous_var_plot, cal_continuous_var_error, \
turn_preds_into_univariate_pdf_or_cdf_like
from matplotlib import pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
from typing import Union, Sequence
import datetime
from locale import setlocale, LC_ALL
from BivariateAnalysis_Class import MethodOfBins
from matplotlib import cm, colors
from Ploting.adjust_Func import *
from scipy import stats
from UnivariateAnalysis_Class import ECDF, UnivariatePDFOrCDFLike
import json
from scipy.stats import pearsonr, spearmanr, kendalltau
from scipy.io.matlab import savemat, loadmat
setlocale(LC_ALL, "en_US")
sns.set()
tfd = eval("tfp.distributions")
tfpl = eval("tfp.layers")
tfb = eval("tfp.bijectors")
tfp_util = eval("tfp.util")
tfp_math = eval("tfp.math")
tf.keras.backend.set_floatx('float32')
PRED_BY = "mean"
assert PRED_BY in {"mean", "median"}
BATCH_SIZE = 25000
SHARED_DIR_PATH = project_path_ / r"Data\Raw_measurements\TSE_SI_2020_WF\shared_data"
IMPUTE_INDIVIDUAL_DATA_PATH = project_path_ / r"Data\Results\Forecasting\impute_data\individual"
IMPUTE_CLUSTER_DATA_PATH = project_path_ / r"Data\Results\Forecasting\impute_data\cluster"
IMPUTE_ALL_DATA_PATH = project_path_ / r"Data\Results\Forecasting\impute_data\all"
NN_MODEL_PATH = project_path_ / r"Data\Results\Forecasting\NN_model"
NN_MODEL_PREDICTION_PATH = project_path_ / r"Data\Results\Forecasting\NN_model_predictions"
# Prepare data for NN
class EveryThingDataSet(DeepLearningDataSet):
def __init__(self, *args, data: pd.DataFrame, geo_loc: str, use_corr_impute: str = '', **kwargs):
assert type(data) == pd.DataFrame
self.geo_loc = geo_loc
if use_corr_impute == '':
predictor_cols = ('wind speed', 'air density', 'wind direction',)
dependant_cols = ('wind speed', 'air density', 'wind direction',)
quantile_transformed_col = ('wind speed', 'air density', 'wind direction',)
else:
predictor_cols = list(data.columns)
# N.B. WD is before AD, not good for Copula, but be transposed in test phase
dependant_cols = [x for x in data.columns if geo_loc in x]
quantile_transformed_col = tuple(list(data.columns))
super().__init__(
*args,
original_data_set=data,
quantile_transformed_col=quantile_transformed_col,
predictor_cols=tuple(predictor_cols),
dependant_cols=tuple(dependant_cols),
name=self.geo_loc + 'EveryThing' + use_corr_impute + '_training',
transformation_args_folder_path=project_path_ / ''.join(
['Data/Results/Forecasting/NN_model_DataSet_transformation_args/',
self.geo_loc, '/EveryThing', use_corr_impute, '/transformation_args/']
),
**kwargs
)
class OPRDataSet(DeepLearningDataSet):
def __init__(self, *args, data: pd.DataFrame, geo_loc: str, **kwargs):
assert type(data) == pd.DataFrame
self.geo_loc = geo_loc
super().__init__(
*args,
original_data_set=data,
quantile_transformed_col=('wind speed', 'air density', 'wind direction', 'active power output'),
one_hot_transformed_col=('normally operating number',),
predictor_cols=('wind speed', 'air density', 'wind direction', 'active power output',
'normally operating number'),
dependant_cols=('normally operating number',),
name=self.geo_loc + 'OPR' + '_training',
transformation_args_folder_path=project_path_ / ''.join(
['Data/Results/Forecasting/NN_model_DataSet_transformation_args/',
self.geo_loc, '/OPR/transformation_args/']
),
**kwargs
)
def get_natural_resources_or_opr_or_copula_data(geo_loc: str, task: str, only_return_for_nn: bool = True, *,
use_corr_impute: str,
res_name: str):
assert task in {"training", "test"}
assert res_name in {'EveryThing', 'OPR', 'Copula'}
if use_corr_impute != '':
assert use_corr_impute in {'_cluster_', '_all_'}
file_path = SHARED_DIR_PATH / fr"all/{WF_TO_CLUSTER_MAPPER[geo_loc]} cluster {geo_loc} WF.csv"
data = pd.read_csv(file_path, index_col="time stamp")
data.index = pd.DatetimeIndex(data.index)
file_path = IMPUTE_INDIVIDUAL_DATA_PATH / fr"{WF_TO_CLUSTER_MAPPER[geo_loc]} cluster {geo_loc}" \
" WF imputed natural resources.csv"
data_impute = pd.read_csv(file_path, index_col='time stamp')
data_impute.index = pd.DatetimeIndex(data_impute.index)
test_periods = load_pkl_file(SHARED_DIR_PATH / rf"{WF_TO_CLUSTER_MAPPER[geo_loc]} cluster test_periods.pkl")
training_mask = data.index < (test_periods[geo_loc][0] + datetime.timedelta(days=7))
test_mask = data.index >= test_periods[geo_loc][0]
if res_name == 'EveryThing':
data.drop(labels=['active power output', 'normally operating number'], axis=1, inplace=True)
data = data[data_impute.columns]
if use_corr_impute == '':
data.iloc[:data_impute.shape[0]] = data_impute.values
else:
if use_corr_impute == '_cluster_':
file_path = IMPUTE_CLUSTER_DATA_PATH / fr"{WF_TO_CLUSTER_MAPPER[geo_loc]} cluster" \
" imputed natural resources.csv"
else:
file_path = IMPUTE_ALL_DATA_PATH / "all imputed natural resources.csv"
all_impute_data = pd.read_csv(
file_path,
index_col=0,
header=0,
)
all_impute_data.index = pd.DatetimeIndex(all_impute_data.index)
# get extra cols
extra_col_names = []
for i, col_name in enumerate(all_impute_data.columns):
if geo_loc not in col_name:
extra_col_names.append(col_name)
data.columns = [geo_loc + '_' + x for x in data.columns]
data.loc[training_mask] = all_impute_data.loc[data.index[training_mask], data.columns]
data = pd.merge(data, all_impute_data.loc[:, extra_col_names],
left_index=True, right_index=True, how='left')
elif res_name == 'OPR':
data = data[[*data_impute.columns, 'active power output', 'normally operating number']]
data.loc[:data_impute.index[-1], data_impute.columns] = data_impute.values
else:
to_delete_mask = np.isnan(data['wind speed'].values)
data_impute.loc[to_delete_mask[:data_impute.shape[0]], 'wind speed'] = np.nan
data = data[[*data_impute.columns, 'active power output', 'normally operating number']]
data.loc[:data_impute.index[-1], data_impute.columns] = data_impute.values
# data[training_mask].to_csv(
# "./training/" +
# f"{WF_TO_CLUSTER_MAPPER[geo_loc]} cluster {geo_loc} WF imputed natural resources {use_corr_impute}.csv"
# )
# data[test_mask].to_csv(
# f"./test/{WF_TO_CLUSTER_MAPPER[geo_loc]} cluster {geo_loc} WF imputed natural resources {use_corr_impute}.csv"
# )
mask = training_mask if task == "training" else test_mask
# scatter(data.iloc[:, 0], data.iloc[:, 3], title='all', x_lim=(-0.5, 29.5), y_lim=(-0.05, None))
# scatter(data[training_mask].iloc[:, 0], data[training_mask].iloc[:, 3], title='training',
# x_lim=(-0.5, 29.5), y_lim=(-0.05, None))
# scatter(data[test_mask].iloc[:, 0], data[test_mask].iloc[:, 3], title='test',
# x_lim=(-0.5, 29.5), y_lim=(-0.05, None))
# scatter(data[np.bitwise_and(test_mask,
# data['normally operating number'] == NUMBER_OF_WT_MAPPER[geo_loc])].iloc[:, 0],
# data[np.bitwise_and(test_mask,
# data['normally operating number'] == NUMBER_OF_WT_MAPPER[geo_loc])].iloc[:, 3],
# title='test full',
# x_lim=(-0.5, 29.5), y_lim=(-0.05, None))
# series(data[test_mask]['normally operating number'])
data = data[mask]
if res_name == 'Copula':
wf_obj = WF(
data=data,
obj_name=f'{WF_TO_CLUSTER_MAPPER[geo_loc]} cluster {geo_loc} WF',
rated_active_power_output=WF_RATED_POUT_MAPPER[geo_loc],
predictor_names=('wind speed', 'wind direction', 'air density'),
dependant_names=('active power output',),
number_of_wind_turbine=NUMBER_OF_WT_MAPPER[geo_loc],
)
return wf_obj
if res_name == 'EveryThing':
data_set = EveryThingDataSet(data=data, geo_loc=geo_loc, use_corr_impute=use_corr_impute)
else:
data_set = OPRDataSet(data=data, geo_loc=geo_loc)
data_set_windowed = data_set.windowed_dataset(
x_window_length=datetime.timedelta(days=7),
y_window_length=datetime.timedelta(hours=1),
x_y_start_index_diff=datetime.timedelta(days=7),
window_shift=datetime.timedelta(hours=1),
batch_size=BATCH_SIZE
)
if only_return_for_nn:
return data_set_windowed[0]
else:
return data_set, data_set_windowed
class TSE2020SIBayesianConv1DBiLSTM(BayesianConv1DBiLSTM):
def __init__(self, input_shape, output_shape, *, dense_hypers_units):
super().__init__(
input_shape=input_shape, output_shape=output_shape, batch_size=BATCH_SIZE,
conv1d_hypers_filters=9, conv1d_hypers_padding="same", conv1d_hypers_kernel_size=3,
maxpool1d_hypers_padding="valid", maxpool1d_hypers_pool_size=2,
bilstm_hypers_units=42,
use_encoder_decoder=False,
dense_hypers_units=dense_hypers_units
)
def get_distribution_layer(self, dtype=tf.float32):
pass
class NaturalResourcesBayesianConv1DBiLSTM(TSE2020SIBayesianConv1DBiLSTM):
def __init__(self, input_shape, output_shape):
super(NaturalResourcesBayesianConv1DBiLSTM, self).__init__(
input_shape,
output_shape,
dense_hypers_units=tfpl.MixtureSameFamily.params_size(
num_components=3,
component_params_size=tfpl.MultivariateNormalTriL.params_size(3)
)
)
def get_distribution_layer(self, dtype=tf.float32):
dist_layer = tfpl.MixtureSameFamily(num_components=3,
component_layer=tfpl.MultivariateNormalTriL(3),
convert_to_tensor_fn=tfd.Distribution.sample)
return dist_layer
class OPRBayesianConv1DBiLSTM(TSE2020SIBayesianConv1DBiLSTM):
def __init__(self, input_shape, output_shape, category_number):
self.category_number = category_number
super(OPRBayesianConv1DBiLSTM, self).__init__(
input_shape,
output_shape,
dense_hypers_units=tfpl.OneHotCategorical.params_size(self.category_number)
)
def get_distribution_layer(self, dtype=tf.float32):
dist_layer = tfpl.OneHotCategorical(self.category_number)
return dist_layer
def get_nn_model(input_shape, output_shape, *, res_name, category_number: int = None):
if res_name == 'EveryThing':
model = NaturalResourcesBayesianConv1DBiLSTM(input_shape, output_shape)
else:
assert category_number is not None
model = OPRBayesianConv1DBiLSTM(input_shape, output_shape, category_number)
return model
def train_nn_model(geo_loc: str, res_name, *, use_corr_impute: str, continue_training: bool):
print("★" * 79)
print(f"Train {geo_loc} WF {res_name}{use_corr_impute}")
print("★" * 79)
# Get data
training_data_for_nn = get_natural_resources_or_opr_or_copula_data(geo_loc, "training",
res_name=res_name,
use_corr_impute=use_corr_impute)
test_data_for_nn = get_natural_resources_or_opr_or_copula_data(geo_loc, "test",
res_name=res_name,
use_corr_impute=use_corr_impute)
# Define NLL. NB, KL part and its weight/scale factor has been passed through divergence_fn or regularizer
def nll(y_true, y_pred):
return -y_pred.log_prob(y_true)
# Build model
model = get_nn_model(input_shape=training_data_for_nn.element_spec[0].shape[1:],
output_shape=training_data_for_nn.element_spec[1].shape[1:],
res_name=res_name,
category_number=NUMBER_OF_WT_MAPPER[geo_loc] + 1)
model = model.build()
try_to_find_folder_path_otherwise_make_one(NN_MODEL_PATH / f'{geo_loc}/{res_name}{use_corr_impute}')
# Define Callbacks
class SaveCallback(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
if epoch % 150 == 0:
model.save_weights(NN_MODEL_PATH / f'{geo_loc}/{res_name}{use_corr_impute}/epoch_{epoch}.h5')
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=50000)
# Compile model
if res_name == 'EveryThing':
metrics = ['mse']
else:
metrics = ['accuracy']
model.compile(loss=nll, optimizer=tf.keras.optimizers.Adam(0.00001), metrics=metrics,
experimental_run_tf_function=False)
model.summary()
_debug_training_data_for_nn = list(training_data_for_nn.as_numpy_iterator())
_debug_training_data_for_nn_x = _debug_training_data_for_nn[0][0][[0]]
_debug_training_data_for_nn_y = _debug_training_data_for_nn[0][1][[0]]
if continue_training:
model.load_weights(NN_MODEL_PATH / f'{geo_loc}/{res_name}{use_corr_impute}/to_continue.h5')
model.fit(
training_data_for_nn, verbose=1, epochs=100_005,
validation_data=test_data_for_nn, validation_freq=50, validation_batch_size=BATCH_SIZE // 2,
callbacks=[SaveCallback(), early_stopping]
)
model.save_weights(NN_MODEL_PATH / f'{geo_loc}/{res_name}{use_corr_impute}/final.h5')
return model
def test_nn_model(geo_loc: str, res_name: str, ensemble_size: int = 3000, *,
use_corr_impute: str, use_training_set: bool = False):
if use_training_set:
assert ensemble_size == 1
# Get data
test_data_set, test_data_windowed = get_natural_resources_or_opr_or_copula_data(
geo_loc,
"training" if use_training_set else "test",
False,
res_name=res_name,
use_corr_impute=use_corr_impute
)
# Build and get model
tester = get_nn_model(input_shape=test_data_windowed[0].element_spec[0].shape[1:],
output_shape=test_data_windowed[0].element_spec[1].shape[1:],
res_name=res_name,
category_number=NUMBER_OF_WT_MAPPER[geo_loc] + 1)
tester = tester.build()
tester.load_weights(NN_MODEL_PATH / f'{geo_loc}/{res_name}{use_corr_impute}/final.h5')
# Select samples to test
test_samples_x, test_samples_y = [], []
gen = test_data_windowed[1]
for i, sample in enumerate(gen(True)):
test_samples_x.append(sample[0].numpy())
test_samples_y.append(sample[1].numpy())
test_samples_x = np.array(test_samples_x)
test_samples_y = np.array(test_samples_y)
# Formal test
prediction_results = np.full((ensemble_size, *test_samples_y.shape), np.nan)
for i in range(ensemble_size):
prediction_results[i] = tester(test_samples_x).sample().numpy()
# Inverse transform
if res_name == 'EveryThing':
# N.B. WD is before AD, not good for Copula, but be transposed in test phase (see EveryThingDataSet)
if use_corr_impute != '':
inverse_transform_names = [(f'{geo_loc}_wind speed', 'quantile'),
(f'{geo_loc}_wind direction', 'quantile'),
(f'{geo_loc}_air density', 'quantile')]
else:
inverse_transform_names = [('wind speed', 'quantile'),
('air density', 'quantile'),
('wind direction', 'quantile')]
else:
inverse_transform_names = [('normally operating number', 'one_hot')
for _ in range(NUMBER_OF_WT_MAPPER[geo_loc] + 1)]
test_samples_y_inv = test_data_set.inverse_transform(test_samples_y, inverse_transform_names)
prediction_results_inv = np.full((ensemble_size, *test_samples_y_inv.shape), np.nan)
for i in range(prediction_results_inv.shape[0]):
prediction_results_inv[i] = test_data_set.inverse_transform(prediction_results[i], inverse_transform_names)
if res_name == 'EveryThing':
# N.B. WD is before AD, not good for Copula, but be transposed in test phase (see EveryThingDataSet)
if use_corr_impute != '':
temp = copy.deepcopy(test_samples_y_inv[:, :, 1])
test_samples_y_inv[:, :, 1] = copy.deepcopy(test_samples_y_inv[:, :, 2])
test_samples_y_inv[:, :, 2] = temp
temp = copy.deepcopy(prediction_results_inv[:, :, :, 1])
prediction_results_inv[:, :, :, 1] = copy.deepcopy(prediction_results_inv[:, :, :, 2])
prediction_results_inv[:, :, :, 2] = temp
# Save
save_path = NN_MODEL_PREDICTION_PATH / fr"{geo_loc}/{res_name}{use_corr_impute}"
try_to_find_folder_path_otherwise_make_one(save_path)
for j in range(2):
error = test_samples_y_inv[:, 0, j] - prediction_results_inv[0, :, 0, j]
if j == 0:
savemat(save_path / "training_set_ws_err.mat", {"error": error})
else:
savemat(save_path / "training_set_ad_err.mat", {"error": error})
save_pkl_file(save_path / "training_set_predictions.pkl" if use_training_set else "test_set_predictions.pkl",
{
"test_samples_y_inv": test_samples_y_inv,
"prediction_results_inv": prediction_results_inv
})
temp = load_pkl_file(save_path / "test_set_predictions.pkl")
for j in range(temp['test_samples_y_inv'].shape[-1]):
ax = series(temp['test_samples_y_inv'][:, 0, j], color='red')
if PRED_BY == 'mean':
ax = series(np.mean(temp['prediction_results_inv'], axis=0)[:, 0, j], ax=ax, color='royalblue')
else:
ax = series(np.median(temp['prediction_results_inv'], axis=0)[:, 0, j], ax=ax, color='royalblue')
series(temp['prediction_results_inv'][:300, :, 0, j].T, color='grey', linewidth=0.5, ax=ax, alpha=0.1,
zorder=-1)
ax = series(temp['test_samples_y_inv'][:, 0, j], color='red')
ax = series(np.percentile(temp['prediction_results_inv'], 2.5, axis=0)[:, 0, j], ax=ax,
color='green', linestyle='--')
series(np.percentile(temp['prediction_results_inv'], 97.5, axis=0)[:, 0, j], ax=ax,
color='green', linestyle='--')
# hist(temp['prediction_results_inv'][:, 0, 0, 0])
def get_natural_resources_results(wf_name: str, use_corr_impute: str):
pred_natural_resources = load_pkl_file(
NN_MODEL_PREDICTION_PATH / fr"{wf_name}/EveryThing{use_corr_impute}/test_set_predictions.pkl"
)
return pred_natural_resources
def get_opr_results(wf_name: str):
pred_opr = load_pkl_file(
NN_MODEL_PREDICTION_PATH / fr"{wf_name}/OPR/test_set_predictions.pkl"
)
return pred_opr
def plot_natural_resources_results(wf_name: str, use_corr_impute: str):
pred_natural_resources = get_natural_resources_results(wf_name, use_corr_impute)
actual = pred_natural_resources['test_samples_y_inv']
preds = pred_natural_resources['prediction_results_inv']
preds_continuous_var_plot(wf_name=wf_name,
preds_samples=preds[:, :, 0, 0].T,
target_pout=actual[:, 0, 0],
name='WS')
preds_continuous_var_plot(wf_name=wf_name,
preds_samples=preds[:, :, 0, 1].T,
target_pout=actual[:, 0, 1],
name='AD')
preds_continuous_var_plot(wf_name=wf_name,
preds_samples=preds[:, :, 0, 2].T,
target_pout=actual[:, 0, 2],
name='WD')
def plot_opr_results(wf_name: str):
pred_opr = get_opr_results(wf_name)
actual = pred_opr['test_samples_y_inv']
preds = pred_opr['prediction_results_inv']
z = np.full((NUMBER_OF_WT_MAPPER[wf_name] + 1, actual.shape[0],), 0, dtype=float)
cmap = cm.get_cmap('binary')
for i in range(actual.shape[0]):
temp = np.histogram(preds[:, i, 0, 0], np.arange(-0.5, NUMBER_OF_WT_MAPPER[wf_name] + 1))
z[:, i] = temp[0] / np.sum(temp[0])
z = (np.round(z, 1) * 10).astype(int)
x = np.arange(-0.5, actual.shape[0], 1)
y = np.arange(-0.5, NUMBER_OF_WT_MAPPER[wf_name] + 1)
plt.figure(figsize=(6, 5 * 0.551), constrained_layout=True)
ax = plt.gca()
norm = colors.Normalize(vmin=- 0.1, vmax=10)
for i in range(9, 0, -1):
ax.fill_between(np.arange(1000, 1010),
np.arange(1000, 1010),
np.arange(1010, 1020),
facecolor=cmap(norm(i)),
edgecolor='none',
label=f"{i * 10}%")
ax.pcolormesh(x, y, z,
cmap=cmap,
norm=norm,
edgecolor='none',
zorder=-1),
plt.xlabel('Time [Hour]', fontsize=10)
plt.xlim(-1, 169)
plt.xticks(np.arange(0, 168 + 1, 24), np.arange(0, 168 + 1, 24), fontsize=10)
plt.ylabel(f'{wf_name} WF Operating Regime', fontsize=10)
plt.ylim(-0.5, NUMBER_OF_WT_MAPPER[wf_name] + 0.5)
plt.yticks(np.arange(0, NUMBER_OF_WT_MAPPER[wf_name] + 1, 4),
np.arange(0, NUMBER_OF_WT_MAPPER[wf_name] + 1, 4), fontsize=10)
step(np.arange(0, 168), actual.flatten(), color='red', ax=ax,
linestyle='-.', linewidth=1.2, alpha=0.95, label='Actual')
step(np.arange(0, 168), stats.mode(np.squeeze(preds)).mode.flatten(), ax=ax,
color='royalblue', linewidth=1.2, alpha=0.95, label='Pred.')
plt.grid(True, color='gold', alpha=0.25)
ax = adjust_legend_in_ax(ax, protocol='Outside center right')
def cal_natural_resources_errors(wf_name: str):
file_path = project_path_ / fr"Data\Results\Forecasting\errors\stage1"
try_to_find_folder_path_otherwise_make_one(file_path)
use_corr_impute = ("", "_cluster_")
ans_dict = {"own": dict(),
"cluster": dict()}
cols = ['WS', 'AD', 'WD_cos', 'WD_sin']
for i, now_use_corr_impute in enumerate(use_corr_impute):
pred_natural_resources = get_natural_resources_results(wf_name, now_use_corr_impute)
actual = pred_natural_resources['test_samples_y_inv']
preds = pred_natural_resources['prediction_results_inv']
for j, now_col in enumerate(cols):
if now_col == 'WD_cos':
dist_objs = [UnivariatePDFOrCDFLike.init_from_samples_by_ecdf(x)
for x in np.cos(np.deg2rad(preds[:, :, 0, 2].T))]
target = np.cos(np.deg2rad(actual[:, 0, 2]))
name = 'WD'
elif now_col == 'WD_sin':
dist_objs = [UnivariatePDFOrCDFLike.init_from_samples_by_ecdf(x)
for x in np.sin(np.deg2rad(preds[:, :, 0, 2].T))]
target = np.sin(np.deg2rad(actual[:, 0, 2]))
name = 'WD'
else:
dist_objs = [UnivariatePDFOrCDFLike.init_from_samples_by_ecdf(x) for x in preds[:, :, 0, j].T]
target = actual[:, 0, j]
name = now_col
temp = cal_continuous_var_error(target=target,
model_output=dist_objs,
name=name)
if now_use_corr_impute == '':
ans_dict["own"][now_col] = temp
else:
ans_dict["cluster"][now_col] = temp
for val in ans_dict.values():
val['WD'] = {}
for ele in ('mae', 'rmse', 'pinball_loss', 'crps'):
val['WD'][ele] = (val['WD_cos'][ele] + val['WD_sin'][ele]) / 2
def recursion_round(node):
for node_key, node_val in node.items():
if isinstance(node_val, dict):
recursion_round(node_val)
else:
node[node_key] = f"{np.round(node_val, 3):.3f}"
ans_dict_round = copy.deepcopy(ans_dict)
recursion_round(ans_dict_round)
with open(file_path / f"{wf_name}_natural_resources.json", 'w') as json_file:
json.dump(ans_dict_round, json_file)
df = pd.DataFrame(columns=['mae', 'rmse', 'pinball_loss', 'crps'],
index=pd.MultiIndex.from_tuples([('own', 'WS'),
('cluster', 'WS'),
('own', 'AD'),
('cluster', 'AD'),
('own', 'WD'),
('cluster', 'WD')]))
for i in ('own', 'cluster'):
for j in ('WS', 'AD', 'WD'):
for k in ['mae', 'rmse', 'pinball_loss', 'crps']:
df.loc[(i, j), k] = ans_dict[i][j][k]
df.to_csv(file_path / f"{wf_name}_natural_resources.csv")
def cal_opr_errors(wf_name: str):
file_path = project_path_ / fr"Data\Results\Forecasting\errors\stage1"
try_to_find_folder_path_otherwise_make_one(file_path)
pred_natural_resources = get_opr_results(wf_name)
actual = pred_natural_resources['test_samples_y_inv']
preds = pred_natural_resources['prediction_results_inv']
z = np.full((NUMBER_OF_WT_MAPPER[wf_name] + 1, actual.shape[0],), 0, dtype=float)
for i in range(actual.shape[0]):
z[:, i] = np.histogram(preds[:, i, 0, 0], np.arange(-0.5, NUMBER_OF_WT_MAPPER[wf_name] + 1))[0] / preds.shape[0]
error = tf.keras.losses.SparseCategoricalCrossentropy(reduction=tf.keras.losses.Reduction.NONE)
err_val = error(list(actual.flatten().astype(int)),
list(z.T),
).numpy()
acc = 0
for i in range(actual.shape[0]):
now_true = int(actual[i, 0, 0])
now_pred = preds[:, i, 0, 0].astype(int)
# acc += np.sum(now_pred + 1 == now_true)
acc += np.sum(np.any([now_pred + x == now_true for x in range(-4, 5, 1)], axis=0))
return np.mean(err_val), acc / (preds.shape[0] * preds.shape[1])
def cal_corr():
ans = pd.DataFrame()
for name in ['Bruska', 'Jelinak', 'Lukovac']:
data, _ = get_natural_resources_or_opr_or_copula_data(name, "training", False,
res_name='EveryThing',
use_corr_impute='_cluster_')
cluster_df = pd.DataFrame()
for i in range(3):
arr_a = data.data.iloc[:, i].values
arr_b = data.data.iloc[:, i + 3].values
if i != 1:
corr = spearmanr(arr_a, arr_b)
coef = corr[0]
p_val = corr[1]
now_ans = pd.DataFrame(
data=[coef, p_val],
index=[WF_TO_CLUSTER_MAPPER[name]],
columns=[data.data.columns[i] + 'coefficient', data.data.columns[i] + 'p-value']
)
else:
corr = spearmanr(np.cos(np.deg2rad(arr_a)), np.cos(np.deg2rad(arr_b)))
coef = corr[0]
p_val = corr[1]
now_ans = pd.DataFrame(
data=[coef, p_val],
index=[WF_TO_CLUSTER_MAPPER[name]],
columns=['wind direction cos']
)
corr = spearmanr(np.sin(np.deg2rad(arr_a)), np.sin(np.deg2rad(arr_b)))
coef = corr[0]
p_val = corr[1]
now_ans = pd.DataFrame(
data=[coef, p_val],
index=[WF_TO_CLUSTER_MAPPER[name]],
columns=['wind direction sin']
)
if __name__ == "__main__":
# train_nn_model("Glunca", 'EveryThing', continue_training=True, use_corr_impute='')
# train_nn_model('Glunca', 'OPR', continue_training=False)
# train_nn_model("Jelinak", 'EveryThing', continue_training=True)
# train_nn_model('Jelinak', 'OPR', continue_training=True, use_corr_impute='')
# train_nn_model("Zelengrad", 'EveryThing', continue_training=False)
# train_nn_model("Zelengrad", 'OPR', continue_training=True)
# train_nn_model("Bruska", 'EveryThing', continue_training=False)
# train_nn_model("Bruska", 'OPR', continue_training=False)
# train_nn_model("Lukovac", 'EveryThing', continue_training=False, use_corr_impute='')
# train_nn_model("Lukovac", 'OPR', continue_training=True)
# train_nn_model("Katuni", 'EveryThing', continue_training=False, use_corr_impute='')
# train_nn_model("Katuni", 'OPR', continue_training=True, use_corr_impute='')
pass
# train_nn_model("Glunca", 'EveryThing', continue_training=True, use_corr_impute='_cluster_')
# train_nn_model("Jelinak", 'EveryThing', continue_training=True, use_corr_impute='_cluster_')
# train_nn_model("Zelengrad", 'EveryThing', continue_training=True, use_corr_impute='_cluster_')
# train_nn_model("Bruska", 'EveryThing', continue_training=True, use_corr_impute='_cluster_')
# train_nn_model("Lukovac", 'EveryThing', continue_training=True, use_corr_impute='_cluster_')
# train_nn_model("Katuni", 'EveryThing', continue_training=True, use_corr_impute='_cluster_')
pass
# get_natural_resources_or_opr_or_copula_data('Katuni', 'training', use_corr_impute='', res_name='EveryThing')
test_nn_model('Katuni', 'EveryThing', ensemble_size=1, use_corr_impute='_cluster_', use_training_set=True)
# for final_name in AVAILABLE_WF_NAMES:
# # plot_natural_resources_results(final_name, '')
# # plot_natural_resources_results(final_name, '_cluster_')
# # plot_opr_results(final_name)
# cal_natural_resources_errors(final_name)
# plot_natural_resources_results('Bruska', '')
# plot_natural_resources_results('Bruska', '_cluster_')
# cal_natural_resources_errors('Bruska')
# print(cal_opr_errors('Bruska'))
# print(cal_opr_errors('Jelinak'))
# cal_natural_resources_errors('Lukovac')
# plot_opr_results('Jelinak')
# print(cal_opr_errors('Jelinak'))
# for now_wf in AVAILABLE_WF_NAMES:
# plot_opr_results(now_wf)
# print(f"{now_wf} cross_entropy = {cal_opr_errors(now_wf):.3f}")
# cal_corr()
| [
"[email protected]"
] | |
958814c696552fc46be3946a897013745a3c075a | ab12a13973c6e106a1f7bfc1e0cca70a2616d96c | /04_CNN_model.py | 5187a68ee61830a497ea72bacbf3bec808662b93 | [] | no_license | marofmar/Udacity_ComputerVisionNanodegree | 6c91d25d878bd5450a68a049abce43f970b5cba1 | cd127d745ee412ac12f5d7ca548444bdc2352cd6 | refs/heads/master | 2022-03-31T04:44:21.665336 | 2019-12-31T07:36:32 | 2019-12-31T07:36:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,552 | py | """
Tue 5 Nov 2019
CNN to detect facial keypoints
Things have updated
1. x.view(@@@, -1) to x.view(-1,@@@)
2. before two conv layers to five layers
3. before single fully connected layer to three fully connected ones
Hope this new architecture works better than before!
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
# can use the below import should you choose to initialize the weights of your Net
import torch.nn.init as I
'''
w: width of an image(assuming square sized input)
f: filter size (f*f, square)
s: stride
p: padding
'''
def calc(w,f,s):
return (w-f)/s +1
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
#*****************************************************
#self.conv1 = nn.Conv2d(1, 32, 5)
self.layer1 = nn.Sequential(nn.Conv2d(1, 32, 5),#ch, out, kernel
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2)) # kernel_size, stride, padding
# 28
self.layer2 = nn.Sequential(nn.Conv2d(32, 64, 5),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2)) # 60
self.layer3 = nn.Sequential(nn.Conv2d(64, 64, 3),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2,1)) # 62
self.layer4 = nn.Sequential(nn.Conv2d(64, 32, 3),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2)) # 30
self.layer5 = nn.Sequential(nn.Conv2d(32, 16, 3),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(2)) # 14
self.fc1 = nn.Linear(16*14*14,64)
self.fc2 = nn.Linear(64, 128)
self.fc3 = nn.Linear(128,136)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = x.view(-1, 16*14*14)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
| [
"[email protected]"
] | |
493093fa3084036e66da729ce1cbca3bfa72e3c7 | b9cdb1dbdaa631c50551859e46beea3a4e157909 | /Priya_Notebooks/api_keys.py | 61f319f2dcbdf5421104173860455019e8452e80 | [
"MIT"
] | permissive | yash5OG/GamingVizs-PriyaYash | b6d10891cb61535c737cdee7a5245bedd45c7315 | 7c6ea4ac86c9825e3cfd59a39a7dc84adbebf27e | refs/heads/main | 2023-04-03T21:18:45.138458 | 2021-04-25T00:11:02 | 2021-04-25T00:11:02 | 357,759,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33 | py | client_id = "YOUR API KEY HERE"
| [
"[email protected]"
] | |
e959bdb95b50465a45e28ef3f71181d61ac8bbe5 | 5a24fe4c9efae9ebd98b530869696986714a8a9d | /pages/locators/MainPageLocators.py | f4e6dd5b5fb4f7ffcc0332dca9ec2f218f84f670 | [] | no_license | zhulanov-an/pythonQASelenium | 70cd6e7b3b388b82ce6e6c5ba88b919f00397a5d | 4383a357ef96ed3181c11b15d693ec2c338d6777 | refs/heads/master | 2023-05-05T09:03:17.533044 | 2021-05-20T19:27:19 | 2021-05-20T19:27:19 | 334,416,598 | 0 | 1 | null | 2021-03-02T22:07:16 | 2021-01-30T13:16:10 | Python | UTF-8 | Python | false | false | 349 | py | class MainPageLocators:
MAIN_SLIDER = {'css': '#slideshow0'}
IMAGES_IN_MAIN_SLIDER = {'xpath': '//*[@id="slideshow0"]//img'}
FEATURED_ITEMS = {'css': '#content > div.row > div'}
BRAND_PAGINATION_ITEMS = {'xpath': '//*[@id="content"]/div[3]/div[2]/span'}
MAIN_PAGINATION_ITEMS = {'xpath': '//*[@id="content"]/div[1]/div[2]/span'}
| [
"[email protected]"
] | |
0c5606a551288fb8a6e2939ce75c87ff9f2d917d | 4d200e1f225455c58e0dd89db587a29411f86245 | /venv/Lib/site-packages/openmdao/drivers/tests/test_doe_driver.py | 1ae1a0bccf73a7e877a248c44b904cceb05dd455 | [] | no_license | ManojDjs/Heart-rate-estimation | df0be78edbc70cc75c006c6f87c8169200de84e0 | d9e89fe017f1131d554599c248247f73bb9b534d | refs/heads/main | 2023-05-09T10:58:37.614351 | 2021-06-01T11:12:45 | 2021-06-01T11:12:45 | 371,493,498 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82,397 | py | """
Test DOE Driver and Generators.
"""
import unittest
import os
import shutil
import tempfile
import csv
import json
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.paraboloid import Paraboloid
from openmdao.test_suite.components.paraboloid_distributed import DistParab
from openmdao.test_suite.groups.parallel_groups import FanInGrouped
from openmdao.utils.assert_utils import assert_near_equal
from openmdao.utils.general_utils import run_driver, printoptions
from openmdao.utils.testing_utils import use_tempdirs
from openmdao.utils.mpi import MPI
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
class ParaboloidArray(om.ExplicitComponent):
"""
Evaluates the equation f(x,y) = (x-3)^2 + x*y + (y+4)^2 - 3.
Where x and y are xy[0] and xy[1] respectively.
"""
def setup(self):
self.add_input('xy', val=np.array([0., 0.]))
self.add_output('f_xy', val=0.0)
def compute(self, inputs, outputs):
"""
f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3
"""
x = inputs['xy'][0]
y = inputs['xy'][1]
outputs['f_xy'] = (x - 3.0)**2 + x * y + (y + 4.0)**2 - 3.0
class ParaboloidDiscrete(om.ExplicitComponent):
def setup(self):
self.add_discrete_input('x', val=10, tags='xx')
self.add_discrete_input('y', val=0, tags='yy')
self.add_discrete_output('f_xy', val=0, tags='ff')
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
x = discrete_inputs['x']
y = discrete_inputs['y']
f_xy = (x - 3.0)**2 + x * y + (y + 4.0)**2 - 3.0
discrete_outputs['f_xy'] = int(f_xy)
class ParaboloidDiscreteArray(om.ExplicitComponent):
def setup(self):
self.add_discrete_input('x', val=np.ones((2, )), tags='xx')
self.add_discrete_input('y', val=np.ones((2, )), tags='yy')
self.add_discrete_output('f_xy', val=np.ones((2, )), tags='ff')
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
x = discrete_inputs['x']
y = discrete_inputs['y']
f_xy = (x - 3.0)**2 + x * y + (y + 4.0)**2 - 3.0
discrete_outputs['f_xy'] = f_xy.astype(np.int)
class TestErrors(unittest.TestCase):
def test_generator_check(self):
prob = om.Problem()
with self.assertRaises(TypeError) as err:
prob.driver = om.DOEDriver(om.FullFactorialGenerator)
self.assertEqual(str(err.exception),
"DOEDriver requires an instance of DOEGenerator, "
"but a class object was found: FullFactorialGenerator")
with self.assertRaises(TypeError) as err:
prob.driver = om.DOEDriver(om.Problem())
self.assertEqual(str(err.exception),
"DOEDriver requires an instance of DOEGenerator, "
"but an instance of Problem was found.")
def test_lhc_criterion(self):
with self.assertRaises(ValueError) as err:
om.LatinHypercubeGenerator(criterion='foo')
self.assertEqual(str(err.exception),
"Invalid criterion 'foo' specified for LatinHypercubeGenerator. "
"Must be one of ['center', 'c', 'maximin', 'm', 'centermaximin', "
"'cm', 'correlation', 'corr', None].")
@use_tempdirs
class TestDOEDriver(unittest.TestCase):
def setUp(self):
self.expected_fullfact3 = [
{'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},
{'x': np.array([.5]), 'y': np.array([0.]), 'f_xy': np.array([19.25])},
{'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},
{'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])},
{'x': np.array([.5]), 'y': np.array([.5]), 'f_xy': np.array([23.75])},
{'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])},
{'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},
{'x': np.array([.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])},
{'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},
]
def test_no_generator(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.), promotes=['*'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_design_var('x', lower=-10, upper=10)
model.add_design_var('y', lower=-10, upper=10)
model.add_objective('f_xy')
prob.driver = om.DOEDriver()
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 0)
def test_list(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.setup()
# create a list of DOE cases
case_gen = om.FullFactorialGenerator(levels=3)
cases = list(case_gen(model.get_design_vars(recurse=True)))
# create DOEDriver using provided list of cases
prob.driver = om.DOEDriver(cases)
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.run_driver()
prob.cleanup()
expected = self.expected_fullfact3
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 9)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
def test_list_errors(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.setup()
# data does not contain a list
cases = {'desvar': 1.0}
with self.assertRaises(RuntimeError) as err:
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
self.assertEqual(str(err.exception), "Invalid DOE case data, "
"expected a list but got a dict.")
# data contains a list of non-list
cases = [{'desvar': 1.0}]
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case found, "
"expecting a list of name/value pairs:\n{'desvar': 1.0}")
# data contains a list of list, but one has the wrong length
cases = [
[['p1.x', 0.], ['p2.y', 0.]],
[['p1.x', 1.], ['p2.y', 1., 'foo']]
]
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case found, "
"expecting a list of name/value pairs:\n"
"[['p1.x', 1.0], ['p2.y', 1.0, 'foo']]")
# data contains a list of list, but one case has an invalid design var
cases = [
[['p1.x', 0.], ['p2.y', 0.]],
[['p1.x', 1.], ['p2.z', 1.]]
]
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case found, "
"'p2.z' is not a valid design variable:\n"
"[['p1.x', 1.0], ['p2.z', 1.0]]")
# data contains a list of list, but one case has multiple invalid design vars
cases = [
[['p1.x', 0.], ['p2.y', 0.]],
[['p1.y', 1.], ['p2.z', 1.]]
]
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case found, "
"['p1.y', 'p2.z'] are not valid design variables:\n"
"[['p1.y', 1.0], ['p2.z', 1.0]]")
def test_csv(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.setup()
# create a list of DOE cases
case_gen = om.FullFactorialGenerator(levels=3)
cases = list(case_gen(model.get_design_vars(recurse=True)))
# generate CSV file with cases
header = [var for (var, val) in cases[0]]
with open('cases.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for case in cases:
writer.writerow([val for _, val in case])
# create DOEDriver using generated CSV file
prob.driver = om.DOEDriver(om.CSVGenerator('cases.csv'))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.run_driver()
prob.cleanup()
expected = self.expected_fullfact3
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 9)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
def test_csv_array(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', [0., 1.]))
model.add_subsystem('p2', om.IndepVarComp('y', [0., 1.]))
model.add_subsystem('comp1', Paraboloid())
model.add_subsystem('comp2', Paraboloid())
model.connect('p1.x', 'comp1.x', src_indices=[0])
model.connect('p2.y', 'comp1.y', src_indices=[0])
model.connect('p1.x', 'comp2.x', src_indices=[1])
model.connect('p2.y', 'comp2.y', src_indices=[1])
model.add_design_var('p1.x', lower=0.0, upper=1.0)
model.add_design_var('p2.y', lower=0.0, upper=1.0)
prob.setup()
# create a list of DOE cases
case_gen = om.FullFactorialGenerator(levels=2)
cases = list(case_gen(model.get_design_vars(recurse=True)))
# generate CSV file with cases
header = [var for var, _ in cases[0]]
with open('cases.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for case in cases:
writer.writerow([val for _, val in case])
# create DOEDriver using generated CSV file
prob.driver = om.DOEDriver(om.CSVGenerator('cases.csv'))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.run_driver()
prob.cleanup()
expected = [
{'p1.x': np.array([0., 0.]), 'p2.y': np.array([0., 0.])},
{'p1.x': np.array([1., 0.]), 'p2.y': np.array([0., 0.])},
{'p1.x': np.array([0., 1.]), 'p2.y': np.array([0., 0.])},
{'p1.x': np.array([1., 1.]), 'p2.y': np.array([0., 0.])},
{'p1.x': np.array([0., 0.]), 'p2.y': np.array([1., 0.])},
{'p1.x': np.array([1., 0.]), 'p2.y': np.array([1., 0.])},
{'p1.x': np.array([0., 1.]), 'p2.y': np.array([1., 0.])},
{'p1.x': np.array([1., 1.]), 'p2.y': np.array([1., 0.])},
{'p1.x': np.array([0., 0.]), 'p2.y': np.array([0., 1.])},
{'p1.x': np.array([1., 0.]), 'p2.y': np.array([0., 1.])},
{'p1.x': np.array([0., 1.]), 'p2.y': np.array([0., 1.])},
{'p1.x': np.array([1., 1.]), 'p2.y': np.array([0., 1.])},
{'p1.x': np.array([0., 0.]), 'p2.y': np.array([1., 1.])},
{'p1.x': np.array([1., 0.]), 'p2.y': np.array([1., 1.])},
{'p1.x': np.array([0., 1.]), 'p2.y': np.array([1., 1.])},
{'p1.x': np.array([1., 1.]), 'p2.y': np.array([1., 1.])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 16)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
self.assertEqual(outputs['p1.x'][0], expected_case['p1.x'][0])
self.assertEqual(outputs['p2.y'][0], expected_case['p2.y'][0])
self.assertEqual(outputs['p1.x'][1], expected_case['p1.x'][1])
self.assertEqual(outputs['p2.y'][1], expected_case['p2.y'][1])
def test_csv_errors(self):
# test invalid file name
with self.assertRaises(RuntimeError) as err:
om.CSVGenerator(1.23)
self.assertEqual(str(err.exception),
"'1.23' is not a valid file name.")
# test file not found
with self.assertRaises(RuntimeError) as err:
om.CSVGenerator('nocases.csv')
self.assertEqual(str(err.exception),
"File not found: nocases.csv")
# create problem and a list of DOE cases
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.setup()
case_gen = om.FullFactorialGenerator(levels=2)
cases = list(case_gen(model.get_design_vars(recurse=True)))
# test CSV file with an invalid design var
header = [var for var, _ in cases[0]]
header[-1] = 'foobar'
with open('cases.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for case in cases:
writer.writerow([val for _, val in case])
prob.driver = om.DOEDriver(om.CSVGenerator('cases.csv'))
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case file, "
"'foobar' is not a valid design variable.")
# test CSV file with invalid design vars
header = [var + '_bad' for var, _ in cases[0]]
with open('cases.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for case in cases:
writer.writerow([val for _, val in case])
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case file, "
"%s are not valid design variables." %
str(header))
# test CSV file with invalid values
header = [var for var, _ in cases[0]]
with open('cases.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for case in cases:
writer.writerow([np.ones((2, 2)) * val for _, val in case])
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= LooseVersion("1.14"):
opts = {'legacy': '1.13'}
else:
opts = {}
with printoptions(**opts):
# have to use regex to handle differences in numpy print formats for shape
msg = f"Error assigning p1.x = \[ 0. 0. 0. 0.\]: could not broadcast " \
f"input array from shape \(4.*\) into shape \(1.*\)"
with self.assertRaisesRegex(ValueError, msg):
prob.run_driver()
def test_uniform(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_design_var('x', lower=-10, upper=10)
model.add_design_var('y', lower=-10, upper=10)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(om.UniformGenerator(num_samples=5, seed=0))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
# all values should be between -10 and 10, check expected values for seed = 0
expected = [
{'x': np.array([0.97627008]), 'y': np.array([4.30378733])},
{'x': np.array([2.05526752]), 'y': np.array([0.89766366])},
{'x': np.array([-1.52690401]), 'y': np.array([2.91788226])},
{'x': np.array([-1.24825577]), 'y': np.array([7.83546002])},
{'x': np.array([9.27325521]), 'y': np.array([-2.33116962])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 5)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y'):
assert_near_equal(outputs[name], expected_case[name], 1e-4)
def test_full_factorial(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(generator=om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
expected = self.expected_fullfact3
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 9)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
def test_full_factorial_factoring(self):
class Digits2Num(om.ExplicitComponent):
"""
Makes from two vectors with 2 elements a 4 digit number.
For singe digit integers always gives a unique output number.
"""
def setup(self):
self.add_input('x', val=np.array([0., 0.]))
self.add_input('y', val=np.array([0., 0.]))
self.add_output('f', val=0.0)
def compute(self, inputs, outputs):
x = inputs['x']
y = inputs['y']
outputs['f'] = x[0] * 1000 + x[1] * 100 + y[0] * 10 + y[1]
prob = om.Problem()
model = prob.model
model.set_input_defaults('x', np.array([0.0, 0.0]))
model.set_input_defaults('y', np.array([0.0, 0.0]))
model.add_subsystem('comp', Digits2Num(), promotes=['*'])
model.add_design_var('x', lower=0.0, upper=np.array([1.0, 2.0]))
model.add_design_var('y', lower=0.0, upper=np.array([3.0, 4.0]))
model.add_objective('f')
prob.driver = om.DOEDriver(generator=om.FullFactorialGenerator(levels=2))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
objs = [int(cr.get_case(case).outputs['f']) for case in cases]
self.assertEqual(len(objs), 16)
# Testing uniqueness. If all elements are unique, it should be the same length as the
# number of cases
self.assertEqual(len(set(objs)), 16)
def test_full_factorial_array(self):
prob = om.Problem()
model = prob.model
model.set_input_defaults('xy', np.array([0., 0.]))
model.add_subsystem('comp', ParaboloidArray(), promotes=['*'])
model.add_design_var('xy', lower=np.array([-10., -50.]), upper=np.array([10., 50.]))
model.add_objective('f_xy')
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
expected = [
{'xy': np.array([-10., -50.])},
{'xy': np.array([0., -50.])},
{'xy': np.array([10., -50.])},
{'xy': np.array([-10., 0.])},
{'xy': np.array([0., 0.])},
{'xy': np.array([10., 0.])},
{'xy': np.array([-10., 50.])},
{'xy': np.array([0., 50.])},
{'xy': np.array([10., 50.])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 9)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
self.assertEqual(outputs['xy'][0], expected_case['xy'][0])
self.assertEqual(outputs['xy'][1], expected_case['xy'][1])
def test_full_fact_dict_levels(self):
# Specifying levels only for one DV, the other is defaulted
prob = om.Problem()
model = prob.model
expected = [
{'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},
{'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},
{'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])},
{'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])},
{'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},
{'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},
]
# size = prob.comm.size
# rank = prob.comm.rank
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(generator=om.FullFactorialGenerator(levels={"y": 3}))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 6)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
self.assertEqual(outputs['x'], expected_case['x'])
self.assertEqual(outputs['y'], expected_case['y'])
self.assertEqual(outputs['f_xy'], expected_case['f_xy'])
def test_generalized_subset(self):
# All DVs have the same number of levels
prob = om.Problem()
model = prob.model
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(generator=om.GeneralizedSubsetGenerator(levels=2, reduction=2))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
expected = [
{'x': np.array([0.0]), 'y': np.array([0.0]), 'f_xy': np.array([22.0])},
{'x': np.array([1.0]), 'y': np.array([1.0]), 'f_xy': np.array([27.0])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver')
self.assertEqual(len(cases), 2)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
def test_generalized_subset_dict_levels(self):
# Number of variables specified individually for all DVs (scalars).
prob = om.Problem()
model = prob.model
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(generator=om.GeneralizedSubsetGenerator(levels={'x': 3, 'y': 6}, reduction=2))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
expected = [
{'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.])},
{'x': np.array([0.]), 'y': np.array([0.4]), 'f_xy': np.array([25.36])},
{'x': np.array([0.]), 'y': np.array([0.8]), 'f_xy': np.array([29.04])},
{'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.])},
{'x': np.array([1.]), 'y': np.array([0.4]), 'f_xy': np.array([20.76])},
{'x': np.array([1.]), 'y': np.array([0.8]), 'f_xy': np.array([24.84])},
{'x': np.array([0.5]), 'y': np.array([0.2]), 'f_xy': np.array([20.99])},
{'x': np.array([0.5]), 'y': np.array([0.6]), 'f_xy': np.array([24.71])},
{'x': np.array([0.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver')
self.assertEqual(len(cases), 9)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertAlmostEqual(outputs[name][0], expected_case[name][0])
def test_generalized_subset_array(self):
# Number of levels specified individually for all DVs (arrays).
class Digits2Num(om.ExplicitComponent):
"""
Makes from two vectors with 2 elements a 4 digit number.
For singe digit integers always gives a unique output number.
"""
def setup(self):
self.add_input('x', val=np.array([0., 0.]))
self.add_input('y', val=np.array([0., 0.]))
self.add_output('f', val=0.0)
def compute(self, inputs, outputs):
x = inputs['x']
y = inputs['y']
outputs['f'] = x[0] * 1000 + x[1] * 100 + y[0] * 10 + y[1]
prob = om.Problem()
model = prob.model
model.set_input_defaults('x', np.array([0.0, 0.0]))
model.set_input_defaults('y', np.array([0.0, 0.0]))
model.add_subsystem('comp', Digits2Num(), promotes=['*'])
model.add_design_var('x', lower=0.0, upper=np.array([1.0, 2.0]))
model.add_design_var('y', lower=0.0, upper=np.array([3.0, 4.0]))
model.add_objective('f')
prob.driver = om.DOEDriver(generator=om.GeneralizedSubsetGenerator(levels={'x': 5, 'y': 8}, reduction=14))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
objs = [int(cr.get_case(case).outputs['f']) for case in cases]
self.assertEqual(len(objs), 104) # The number can be verified with standalone pyDOE2
# Testing uniqueness. If all elements are unique, it should be the same length as the number of cases
self.assertEqual(len(set(objs)), 104)
def test_plackett_burman(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(om.PlackettBurmanGenerator())
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
expected = [
{'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},
{'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},
{'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},
{'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 4)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
def test_box_behnken(self):
upper = 10.
center = 1
prob = om.Problem()
model = prob.model
indep = model.add_subsystem('indep', om.IndepVarComp(), promotes=['*'])
indep.add_output('x', 0.0)
indep.add_output('y', 0.0)
indep.add_output('z', 0.0)
model.add_subsystem('comp', om.ExecComp('a = x**2 + y - z'), promotes=['*'])
model.add_design_var('x', lower=0., upper=upper)
model.add_design_var('y', lower=0., upper=upper)
model.add_design_var('z', lower=0., upper=upper)
model.add_objective('a')
prob.driver = om.DOEDriver(om.BoxBehnkenGenerator(center=center))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
# The Box-Behnken design for 3 factors involves three blocks, in each of
# which 2 factors are varied thru the 4 possible combinations of high & low.
# It also includes centre points (all factors at their central values).
# ref: https://en.wikipedia.org/wiki/Box-Behnken_design
self.assertEqual(len(cases), (3*4)+center)
expected = [
{'x': np.array([0.]), 'y': np.array([0.]), 'z': np.array([5.])},
{'x': np.array([10.]), 'y': np.array([0.]), 'z': np.array([5.])},
{'x': np.array([0.]), 'y': np.array([10.]), 'z': np.array([5.])},
{'x': np.array([10.]), 'y': np.array([10.]), 'z': np.array([5.])},
{'x': np.array([0.]), 'y': np.array([5.]), 'z': np.array([0.])},
{'x': np.array([10.]), 'y': np.array([5.]), 'z': np.array([0.])},
{'x': np.array([0.]), 'y': np.array([5.]), 'z': np.array([10.])},
{'x': np.array([10.]), 'y': np.array([5.]), 'z': np.array([10.])},
{'x': np.array([5.]), 'y': np.array([0.]), 'z': np.array([0.])},
{'x': np.array([5.]), 'y': np.array([10.]), 'z': np.array([0.])},
{'x': np.array([5.]), 'y': np.array([0.]), 'z': np.array([10.])},
{'x': np.array([5.]), 'y': np.array([10.]), 'z': np.array([10.])},
{'x': np.array([5.]), 'y': np.array([5.]), 'z': np.array([5.])},
]
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'z'):
self.assertEqual(outputs[name], expected_case[name])
def test_latin_hypercube(self):
samples = 4
bounds = np.array([
[-1, -10], # lower bounds for x and y
[1, 10] # upper bounds for x and y
])
xlb, xub = bounds[0][0], bounds[1][0]
ylb, yub = bounds[0][1], bounds[1][1]
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=xlb, upper=xub)
model.add_design_var('y', lower=ylb, upper=yub)
model.add_objective('f_xy')
prob.driver = om.DOEDriver()
prob.driver.options['generator'] = om.LatinHypercubeGenerator(samples=4, seed=0)
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
# the sample space for each variable should be divided into equal
# size buckets and each variable should have a value in each bucket
all_buckets = set(range(samples))
x_offset = - xlb
x_bucket_size = xub - xlb
x_buckets_filled = set()
y_offset = - ylb
y_bucket_size = yub - ylb
y_buckets_filled = set()
# expected values for seed = 0
expected = [
{'x': np.array([-0.19861831]), 'y': np.array([-6.42405317])},
{'x': np.array([0.2118274]), 'y': np.array([9.458865])},
{'x': np.array([0.71879361]), 'y': np.array([3.22947057])},
{'x': np.array([-0.72559325]), 'y': np.array([-2.27558409])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 4)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
x = outputs['x']
y = outputs['y']
bucket = int((x + x_offset) / (x_bucket_size / samples))
x_buckets_filled.add(bucket)
bucket = int((y + y_offset) / (y_bucket_size / samples))
y_buckets_filled.add(bucket)
assert_near_equal(x, expected_case['x'], 1e-4)
assert_near_equal(y, expected_case['y'], 1e-4)
self.assertEqual(x_buckets_filled, all_buckets)
self.assertEqual(y_buckets_filled, all_buckets)
def test_latin_hypercube_array(self):
samples = 4
bounds = np.array([
[-10, -50], # lower bounds for x and y
[10, 50] # upper bounds for x and y
])
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('xy', np.array([50., 50.])), promotes=['*'])
model.add_subsystem('comp', ParaboloidArray(), promotes=['*'])
model.add_design_var('xy', lower=bounds[0], upper=bounds[1])
model.add_objective('f_xy')
prob.driver = om.DOEDriver(om.LatinHypercubeGenerator(samples=4, seed=0))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
# the sample space for each variable should be divided into equal
# size buckets and each variable should have a value in each bucket
all_buckets = set(range(samples))
xlb, xub = bounds[0][0], bounds[1][0]
x_offset = - xlb
x_bucket_size = xub - xlb
x_buckets_filled = set()
ylb, yub = bounds[0][1], bounds[1][1]
y_offset = - ylb
y_bucket_size = yub - ylb
y_buckets_filled = set()
# expected values for seed = 0
expected = [
{'xy': np.array([-1.98618312, -32.12026584])},
{'xy': np.array([2.118274, 47.29432502])},
{'xy': np.array([7.18793606, 16.14735283])},
{'xy': np.array([-7.25593248, -11.37792043])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 4)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
x = outputs['xy'][0]
y = outputs['xy'][1]
bucket = int((x + x_offset) / (x_bucket_size / samples))
x_buckets_filled.add(bucket)
bucket = int((y + y_offset) / (y_bucket_size / samples))
y_buckets_filled.add(bucket)
assert_near_equal(x, expected_case['xy'][0], 1e-4)
assert_near_equal(y, expected_case['xy'][1], 1e-4)
self.assertEqual(x_buckets_filled, all_buckets)
self.assertEqual(y_buckets_filled, all_buckets)
def test_latin_hypercube_center(self):
samples = 4
upper = 10.
prob = om.Problem()
model = prob.model
indep = model.add_subsystem('indep', om.IndepVarComp())
indep.add_output('x', 0.0)
indep.add_output('y', 0.0)
model.add_subsystem('comp', Paraboloid())
model.connect('indep.x', 'comp.x')
model.connect('indep.y', 'comp.y')
model.add_design_var('indep.x', lower=0., upper=upper)
model.add_design_var('indep.y', lower=0., upper=upper)
model.add_objective('comp.f_xy')
prob.driver = om.DOEDriver(om.LatinHypercubeGenerator(samples=samples, criterion='c'))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), samples)
# the sample space for each variable (0 to upper) should be divided into
# equal size buckets and each variable should have a value in each bucket
bucket_size = upper / samples
all_buckets = set(range(samples))
x_buckets_filled = set()
y_buckets_filled = set()
# with criterion of 'center', each value should be in the center of it's bucket
valid_values = [round(bucket_size * (bucket + 1 / 2), 3) for bucket in all_buckets]
for case in cases:
outputs = cr.get_case(case).outputs
x = float(outputs['indep.x'])
y = float(outputs['indep.y'])
x_buckets_filled.add(int(x/bucket_size))
y_buckets_filled.add(int(y/bucket_size))
self.assertTrue(round(x, 3) in valid_values, '%f not in %s' % (x, valid_values))
self.assertTrue(round(y, 3) in valid_values, '%f not in %s' % (y, valid_values))
self.assertEqual(x_buckets_filled, all_buckets)
self.assertEqual(y_buckets_filled, all_buckets)
def test_record_bug(self):
# There was a bug that caused values to be recorded in driver_scaled form.
prob = om.Problem()
model = prob.model
ivc = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
ivc.add_output('x', val=1.)
model.add_subsystem('obj_comp', om.ExecComp('y=2*x'), promotes=['*'])
model.add_subsystem('con_comp', om.ExecComp('z=3*x'), promotes=['*'])
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.driver.recording_options['includes'] = ['*']
model.add_design_var('x', lower=0., upper=10., ref=3.0)
model.add_constraint('z', lower=2.0, scaler=13.0)
model.add_objective('y', scaler=-1)
prob.setup(check=True)
prob.run_driver()
cr = om.CaseReader("cases.sql")
final_case = cr.list_cases('driver', out_stream=None)[-1]
outputs = cr.get_case(final_case).outputs
assert_near_equal(outputs['x'], 10.0, 1e-7)
assert_near_equal(outputs['y'], 20.0, 1e-7)
assert_near_equal(outputs['z'], 30.0, 1e-7)
def test_discrete_desvar_list(self):
prob = om.Problem()
model = prob.model
# Add independent variables
indeps = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_discrete_output('x', 4)
indeps.add_discrete_output('y', 3)
# Add components
model.add_subsystem('parab', ParaboloidDiscrete(), promotes=['*'])
# Specify design variable range and objective
model.add_design_var('x')
model.add_design_var('y')
model.add_objective('f_xy')
samples = [[('x', 5), ('y', 1)],
[('x', 3), ('y', 6)],
[('x', -1), ('y', 3)],
]
# Setup driver for 3 cases at a time
prob.driver = om.DOEDriver(om.ListGenerator(samples))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
expected = [{'x': 5, 'y': 1, 'f_xy': 31},
{'x': 3, 'y': 6, 'f_xy': 115},
{'x': -1, 'y': 3, 'f_xy': 59},
]
self.assertEqual(len(cases), len(expected))
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
self.assertTrue(isinstance(outputs[name], int))
def test_discrete_desvar_alltypes(self):
# Make sure we can handle any allowed type for discrete variables.
class PassThrough(om.ExplicitComponent):
def setup(self):
self.add_discrete_input('x', val='abc')
self.add_discrete_output('y', val='xyz')
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
discrete_outputs['y'] = discrete_inputs['x']
prob = om.Problem()
model = prob.model
indeps = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_discrete_output('x', 'abc')
model.add_subsystem('parab', PassThrough(), promotes=['*'])
model.add_design_var('x')
model.add_constraint('y')
my_obj = Paraboloid()
samples = [[('x', 'abc'), ],
[('x', None), ],
[('x', my_obj, ), ]
]
prob.driver = om.DOEDriver(om.ListGenerator(samples))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
expected = ['abc', None]
for case, expected_value in zip(cases, expected):
outputs = cr.get_case(case).outputs
self.assertEqual(outputs['x'], expected_value)
# Can't read/write objects through SQL case.
self.assertEqual(prob['y'], my_obj)
def test_discrete_array_output(self):
prob = om.Problem()
model = prob.model
# Add independent variables
indeps = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_discrete_output('x', np.ones((2, ), dtype=np.int))
indeps.add_discrete_output('y', np.ones((2, ), dtype=np.int))
# Add components
model.add_subsystem('parab', ParaboloidDiscreteArray(), promotes=['*'])
# Specify design variable range and objective
model.add_design_var('x', np.array([5, 1]))
model.add_design_var('y', np.array([1, 4]))
model.add_objective('f_xy')
recorder = om.SqliteRecorder("cases.sql")
prob.driver.add_recorder(recorder)
prob.add_recorder(recorder)
prob.recording_options['record_inputs'] = True
prob.setup()
prob.run_driver()
prob.record("end")
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('problem', out_stream=None)
case = cr.get_case('end')
inputs = case.inputs
outputs = case.outputs
for name in ('x', 'y'):
self.assertTrue(isinstance(inputs[name], np.ndarray))
self.assertTrue(inputs[name].shape, (2,))
self.assertTrue(isinstance(outputs[name], np.ndarray))
self.assertTrue(outputs[name].shape, (2,))
def test_discrete_arraydesvar_list(self):
prob = om.Problem()
model = prob.model
# Add components
model.add_subsystem('parab', ParaboloidDiscreteArray(), promotes=['*'])
# Specify design variable range and objective
model.add_design_var('x')
model.add_design_var('y')
model.add_objective('f_xy')
samples = [[('x', np.array([5, 1])), ('y', np.array([1, 4]))],
[('x', np.array([3, 2])), ('y', np.array([6, -3]))],
[('x', np.array([-1, 0])), ('y', np.array([3, 5]))],
]
# Setup driver for 3 cases at a time
prob.driver = om.DOEDriver(om.ListGenerator(samples))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.set_val('x', np.ones((2, ), dtype=np.int))
prob.set_val('y', np.ones((2, ), dtype=np.int))
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
expected = [{'x': np.array([5, 1]), 'y': np.array([1, 4]), 'f_xy': np.array([31, 69])},
{'x': np.array([3, 2]), 'y': np.array([6, -3]), 'f_xy': np.array([115, -7])},
{'x': np.array([-1, 0]), 'y': np.array([3, 5]), 'f_xy': np.array([59, 87])},
]
self.assertEqual(len(cases), len(expected))
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name][0], expected_case[name][0])
self.assertEqual(outputs[name][1], expected_case[name][1])
def test_discrete_desvar_csv(self):
prob = om.Problem()
model = prob.model
# Add independent variables
indeps = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_discrete_output('x', 4)
indeps.add_discrete_output('y', 3)
# Add components
model.add_subsystem('parab', ParaboloidDiscrete(), promotes=['*'])
# Specify design variable range and objective
model.add_design_var('x')
model.add_design_var('y')
model.add_objective('f_xy')
samples = '\n'.join([" x , y",
"5, 1",
"3, 6",
"-1, 3",
])
# this file contains design variable inputs in CSV format
with open('cases.csv', 'w') as f:
f.write(samples)
# Setup driver for 3 cases at a time
prob.driver = om.DOEDriver(om.CSVGenerator('cases.csv'))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
expected = [{'x': 5, 'y': 1, 'f_xy': 31},
{'x': 3, 'y': 6, 'f_xy': 115},
{'x': -1, 'y': 3, 'f_xy': 59},
]
self.assertEqual(len(cases), len(expected))
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
self.assertTrue(isinstance(outputs[name], int))
def test_desvar_indices(self):
prob = om.Problem()
prob.model.add_subsystem('comp', om.ExecComp('y=x**2',
x=np.array([1., 2., 3.]),
y=np.zeros(3)), promotes=['*'])
prob.model.add_design_var('x', lower=7.0, upper=11.0, indices=[0])
prob.model.add_objective('y', index=0)
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.setup()
prob.run_driver()
# Last value in fullfactorial DOE is 11, which gives 121.
assert_near_equal(prob.get_val('y'), np.array([121., 4., 9.]))
def test_multidimensional_inputs(self):
# Create a subsystem with multidimensional array inputs
matmul_comp = om.ExecComp('z = matmul(x,y)',
x=np.ones((3, 3)),
y=np.ones((3, 3)),
z=np.ones((3, 3)))
# Single execution test
prob = om.Problem()
prob.model.add_subsystem('matmul', matmul_comp, promotes=['*'])
prob.setup()
prob['x'] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
prob['y'] = np.array([[9, 8, 7], [6, 5, 4], [3, 2, 1]])
prob.run_model()
# DOE test
prob2 = om.Problem()
prob2.model.add_subsystem('matmul', matmul_comp, promotes=['*'])
prob2.model.add_design_var('x')
prob2.model.add_design_var('y')
prob2.model.add_objective('z')
prob2.setup()
case_list = [
[('x', prob['x']), ('y', prob['y'])]
]
prob2.driver = om.DOEDriver(case_list)
prob2.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob2.run_driver()
prob2.cleanup()
cr = om.CaseReader("cases.sql")
outputs = cr.get_case(0).outputs
for name in ('x', 'y', 'z'):
assert_near_equal(outputs[name], prob[name])
@unittest.skipUnless(MPI and PETScVector, "MPI and PETSc are required.")
@use_tempdirs
class TestParallelDOE(unittest.TestCase):
N_PROCS = 4
def setUp(self):
self.expected_fullfact3 = [
{'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},
{'x': np.array([.5]), 'y': np.array([0.]), 'f_xy': np.array([19.25])},
{'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},
{'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])},
{'x': np.array([.5]), 'y': np.array([.5]), 'f_xy': np.array([23.75])},
{'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])},
{'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},
{'x': np.array([.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])},
{'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},
]
def test_indivisible_error(self):
prob = om.Problem()
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.options['run_parallel'] = True
prob.driver.options['procs_per_model'] = 3
with self.assertRaises(RuntimeError) as context:
prob.setup()
self.assertEqual(str(context.exception),
"The total number of processors is not evenly divisible by the "
"specified number of processors per model.\n Provide a number of "
"processors that is a multiple of 3, or specify a number "
"of processors per model that divides into 4.")
def test_minprocs_error(self):
prob = om.Problem(FanInGrouped())
# require 2 procs for the ParallelGroup
prob.model._proc_info['sub'] = (2, None, 1.0)
# run cases on all procs
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.options['run_parallel'] = True
prob.driver.options['procs_per_model'] = 1
with self.assertRaises(RuntimeError) as context:
prob.setup()
self.assertEqual(str(context.exception),
"<model> <class FanInGrouped>: MPI process allocation failed: can't meet "
"min_procs required for the following subsystems: ['sub']")
def test_full_factorial(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3), procs_per_model=1,
run_parallel=True)
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
failed, output = run_driver(prob)
self.assertFalse(failed)
prob.cleanup()
expected = self.expected_fullfact3
size = prob.comm.size
rank = prob.comm.rank
# cases will be split across files for each proc
filename = "cases.sql_%d" % rank
expect_msg = "Cases from rank %d are being written to %s." % (rank, filename)
self.assertTrue(expect_msg in output)
cr = om.CaseReader(filename)
cases = cr.list_cases('driver', out_stream=None)
# cases recorded on this proc
num_cases = len(cases)
self.assertEqual(num_cases, len(expected) // size + (rank < len(expected) % size))
for n in range(num_cases):
outputs = cr.get_case(cases[n]).outputs
idx = n * size + rank # index of expected case
self.assertEqual(outputs['x'], expected[idx]['x'])
self.assertEqual(outputs['y'], expected[idx]['y'])
self.assertEqual(outputs['f_xy'], expected[idx]['f_xy'])
# total number of cases recorded across all procs
num_cases = prob.comm.allgather(num_cases)
self.assertEqual(sum(num_cases), len(expected))
def test_fan_in_grouped_parallel_2x2(self):
# run cases in parallel with 2 procs per model
# (cases will be split between the 2 parallel model instances)
run_parallel = True
procs_per_model = 2
prob = om.Problem(FanInGrouped())
model = prob.model
model.add_design_var('x1', lower=0.0, upper=1.0)
model.add_design_var('x2', lower=0.0, upper=1.0)
model.add_objective('c3.y')
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.driver.options['run_parallel'] = run_parallel
prob.driver.options['procs_per_model'] = procs_per_model
prob.setup()
failed, output = run_driver(prob)
from openmdao.utils.mpi import multi_proc_exception_check
with multi_proc_exception_check(prob.comm):
self.assertFalse(failed)
prob.cleanup()
expected = [
{'x1': np.array([0.]), 'x2': np.array([0.]), 'c3.y': np.array([0.0])},
{'x1': np.array([.5]), 'x2': np.array([0.]), 'c3.y': np.array([-3.0])},
{'x1': np.array([1.]), 'x2': np.array([0.]), 'c3.y': np.array([-6.0])},
{'x1': np.array([0.]), 'x2': np.array([.5]), 'c3.y': np.array([17.5])},
{'x1': np.array([.5]), 'x2': np.array([.5]), 'c3.y': np.array([14.5])},
{'x1': np.array([1.]), 'x2': np.array([.5]), 'c3.y': np.array([11.5])},
{'x1': np.array([0.]), 'x2': np.array([1.]), 'c3.y': np.array([35.0])},
{'x1': np.array([.5]), 'x2': np.array([1.]), 'c3.y': np.array([32.0])},
{'x1': np.array([1.]), 'x2': np.array([1.]), 'c3.y': np.array([29.0])},
]
num_cases = 0
# we can run two models in parallel on our 4 procs
num_models = prob.comm.size // procs_per_model
# a separate case file will be written by rank 0 of each parallel model
# (the top two global ranks)
rank = prob.comm.rank
if rank < num_models:
filename = "cases.sql_%d" % rank
expect_msg = "Cases from rank %d are being written to %s." % (rank, filename)
self.assertTrue(expect_msg in output)
cr = om.CaseReader(filename)
cases = cr.list_cases('driver')
# cases recorded on this proc
num_cases = len(cases)
self.assertEqual(num_cases, len(expected) // num_models+(rank < len(expected) % num_models))
for n, case in enumerate(cases):
idx = n * num_models + rank # index of expected case
outputs = cr.get_case(case).outputs
for name in ('x1', 'x2', 'c3.y'):
self.assertEqual(outputs[name], expected[idx][name])
else:
self.assertFalse("Cases from rank %d are being written" % rank in output)
# total number of cases recorded across all requested procs
num_cases = prob.comm.allgather(num_cases)
self.assertEqual(sum(num_cases), len(expected))
def test_fan_in_grouped_parallel_4x1(self):
# run cases in parallel with 1 proc per model
# (cases will be split between the 4 serial model instances)
run_parallel = True
procs_per_model = 1
prob = om.Problem(FanInGrouped())
model = prob.model
model.add_design_var('x1', lower=0.0, upper=1.0)
model.add_design_var('x2', lower=0.0, upper=1.0)
model.add_objective('c3.y')
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.driver.options['run_parallel'] = run_parallel
prob.driver.options['procs_per_model'] = procs_per_model
prob.setup()
failed, output = run_driver(prob)
self.assertFalse(failed)
prob.cleanup()
expected = [
{'x1': np.array([0.]), 'x2': np.array([0.]), 'c3.y': np.array([0.0])},
{'x1': np.array([.5]), 'x2': np.array([0.]), 'c3.y': np.array([-3.0])},
{'x1': np.array([1.]), 'x2': np.array([0.]), 'c3.y': np.array([-6.0])},
{'x1': np.array([0.]), 'x2': np.array([.5]), 'c3.y': np.array([17.5])},
{'x1': np.array([.5]), 'x2': np.array([.5]), 'c3.y': np.array([14.5])},
{'x1': np.array([1.]), 'x2': np.array([.5]), 'c3.y': np.array([11.5])},
{'x1': np.array([0.]), 'x2': np.array([1.]), 'c3.y': np.array([35.0])},
{'x1': np.array([.5]), 'x2': np.array([1.]), 'c3.y': np.array([32.0])},
{'x1': np.array([1.]), 'x2': np.array([1.]), 'c3.y': np.array([29.0])},
]
rank = prob.comm.rank
# there will be a separate case file for each proc, containing the cases
# run by the instance of the model that runs in serial mode on that proc
filename = "cases.sql_%d" % rank
expect_msg = "Cases from rank %d are being written to %s." % (rank, filename)
self.assertTrue(expect_msg in output)
# we are running 4 models in parallel, each using 1 proc
num_models = prob.comm.size // procs_per_model
cr = om.CaseReader(filename)
cases = cr.list_cases('driver', out_stream=None)
# cases recorded on this proc
num_cases = len(cases)
self.assertEqual(num_cases, len(expected) // num_models + (rank < len(expected) % num_models))
for n, case in enumerate(cases):
idx = n * num_models + rank # index of expected case
outputs = cr.get_case(case).outputs
self.assertEqual(outputs['x1'], expected[idx]['x1'])
self.assertEqual(outputs['x2'], expected[idx]['x2'])
self.assertEqual(outputs['c3.y'], expected[idx]['c3.y'])
# total number of cases recorded across all requested procs
num_cases = prob.comm.allgather(num_cases)
self.assertEqual(sum(num_cases), len(expected))
def test_fan_in_grouped_serial_2x2(self):
# do not run cases in parallel, but with 2 procs per model
# (all cases will run on each of the 2 parallel model instances)
run_parallel = False
procs_per_model = 2
prob = om.Problem(FanInGrouped())
model = prob.model
model.add_design_var('x1', lower=0.0, upper=1.0)
model.add_design_var('x2', lower=0.0, upper=1.0)
model.add_objective('c3.y')
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.driver.options['run_parallel'] = run_parallel
prob.driver.options['procs_per_model'] = procs_per_model
prob.setup()
failed, output = run_driver(prob)
self.assertFalse(failed)
prob.cleanup()
expected = [
{'x1': np.array([0.]), 'x2': np.array([0.]), 'c3.y': np.array([0.0])},
{'x1': np.array([.5]), 'x2': np.array([0.]), 'c3.y': np.array([-3.0])},
{'x1': np.array([1.]), 'x2': np.array([0.]), 'c3.y': np.array([-6.0])},
{'x1': np.array([0.]), 'x2': np.array([.5]), 'c3.y': np.array([17.5])},
{'x1': np.array([.5]), 'x2': np.array([.5]), 'c3.y': np.array([14.5])},
{'x1': np.array([1.]), 'x2': np.array([.5]), 'c3.y': np.array([11.5])},
{'x1': np.array([0.]), 'x2': np.array([1.]), 'c3.y': np.array([35.0])},
{'x1': np.array([.5]), 'x2': np.array([1.]), 'c3.y': np.array([32.0])},
{'x1': np.array([1.]), 'x2': np.array([1.]), 'c3.y': np.array([29.0])},
]
num_cases = 0
rank = prob.comm.rank
# we are running the model on two sets of two procs
num_models = prob.comm.size // procs_per_model
if rank < num_models:
# a separate case file will be written by rank 0 of each parallel model
# (the top two global ranks)
filename = "cases.sql_%d" % rank
expect_msg = "Cases from rank %d are being written to %s." % (rank, filename)
self.assertTrue(expect_msg in output)
cr = om.CaseReader(filename)
cases = cr.list_cases('driver', out_stream=None)
# cases recorded on this proc... each proc will run all cases
num_cases = len(cases)
self.assertEqual(num_cases, len(expected))
for idx, case in enumerate(cases):
outputs = cr.get_case(case).outputs
self.assertEqual(outputs['x1'], expected[idx]['x1'])
self.assertEqual(outputs['x2'], expected[idx]['x2'])
self.assertEqual(outputs['c3.y'], expected[idx]['c3.y'])
# total number of cases recorded will be twice the number of cases
# (every case will be recorded on all pairs of procs)
num_cases = prob.comm.allgather(num_cases)
self.assertEqual(sum(num_cases), num_models*len(expected))
def test_fan_in_grouped_serial_4x1(self):
# do not run cases in parallel, with 1 proc per model
# (all cases will run on each of the 4 serial model instances)
run_parallel = False
procs_per_model = 1
prob = om.Problem(FanInGrouped())
model = prob.model
model.add_design_var('x1', lower=0.0, upper=1.0)
model.add_design_var('x2', lower=0.0, upper=1.0)
model.add_objective('c3.y')
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.driver.options['run_parallel'] = run_parallel
prob.driver.options['procs_per_model'] = procs_per_model
prob.setup()
failed, output = run_driver(prob)
self.assertFalse(failed)
prob.cleanup()
expected = [
{'x1': np.array([0.]), 'x2': np.array([0.]), 'c3.y': np.array([0.0])},
{'x1': np.array([.5]), 'x2': np.array([0.]), 'c3.y': np.array([-3.0])},
{'x1': np.array([1.]), 'x2': np.array([0.]), 'c3.y': np.array([-6.0])},
{'x1': np.array([0.]), 'x2': np.array([.5]), 'c3.y': np.array([17.5])},
{'x1': np.array([.5]), 'x2': np.array([.5]), 'c3.y': np.array([14.5])},
{'x1': np.array([1.]), 'x2': np.array([.5]), 'c3.y': np.array([11.5])},
{'x1': np.array([0.]), 'x2': np.array([1.]), 'c3.y': np.array([35.0])},
{'x1': np.array([.5]), 'x2': np.array([1.]), 'c3.y': np.array([32.0])},
{'x1': np.array([1.]), 'x2': np.array([1.]), 'c3.y': np.array([29.0])},
]
rank = prob.comm.rank
# we are running the model on all four procs
num_models = prob.comm.size // procs_per_model
# there will be a separate case file for each proc, containing the cases
# run by the instance of the model that runs in serial mode on that proc
filename = "cases.sql_%d" % rank
expect_msg = "Cases from rank %d are being written to %s." % (rank, filename)
self.assertTrue(expect_msg in output)
# we are running 4 models in parallel, each using 1 proc
num_models = prob.comm.size // procs_per_model
cr = om.CaseReader(filename)
cases = cr.list_cases('driver', out_stream=None)
# cases recorded on this proc
num_cases = len(cases)
self.assertEqual(num_cases, len(expected))
for idx, case in enumerate(cases):
outputs = cr.get_case(case).outputs
self.assertEqual(outputs['x1'], expected[idx]['x1'])
self.assertEqual(outputs['x2'], expected[idx]['x2'])
self.assertEqual(outputs['c3.y'], expected[idx]['c3.y'])
# total number of cases recorded will be 4x the number of cases
# (every case will be recorded on all procs)
num_cases = prob.comm.allgather(num_cases)
self.assertEqual(sum(num_cases), num_models*len(expected))
@use_tempdirs
class TestDOEDriverFeature(unittest.TestCase):
def setUp(self):
import json
import numpy as np
self.expected_csv = '\n'.join([
" x , y",
"0.0, 0.0",
"0.5, 0.0",
"1.0, 0.0",
"0.0, 0.5",
"0.5, 0.5",
"1.0, 0.5",
"0.0, 1.0",
"0.5, 1.0",
"1.0, 1.0",
])
with open('cases.csv', 'w') as f:
f.write(self.expected_csv)
expected = [
{'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},
{'x': np.array([.5]), 'y': np.array([0.]), 'f_xy': np.array([19.25])},
{'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},
{'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])},
{'x': np.array([.5]), 'y': np.array([.5]), 'f_xy': np.array([23.75])},
{'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])},
{'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},
{'x': np.array([.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])},
{'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},
]
values = []
cases = []
for case in expected:
values.append((case['x'], case['y'], case['f_xy']))
# converting ndarray to list enables JSON serialization
cases.append((('x', list(case['x'])), ('y', list(case['y']))))
self.expected_text = "\n".join([
"x: %5.2f, y: %5.2f, f_xy: %6.2f" % vals_i for vals_i in values
])
self.expected_json = json.dumps(cases).replace(']]],', ']]],\n')
with open('cases.json', 'w') as f:
f.write(self.expected_json)
def test_uniform(self):
import openmdao.api as om
from openmdao.test_suite.components.paraboloid import Paraboloid
prob = om.Problem()
model = prob.model
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_design_var('x', lower=-10, upper=10)
model.add_design_var('y', lower=-10, upper=10)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(om.UniformGenerator(num_samples=5))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.set_val('x', 0.0)
prob.set_val('y', 0.0)
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver')
self.assertEqual(len(cases), 5)
values = []
for case in cases:
outputs = cr.get_case(case).outputs
values.append((outputs['x'], outputs['y'], outputs['f_xy']))
print("\n".join(["x: %5.2f, y: %5.2f, f_xy: %6.2f" % xyf for xyf in values]))
def test_csv(self):
import openmdao.api as om
from openmdao.test_suite.components.paraboloid import Paraboloid
prob = om.Problem()
model = prob.model
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.setup()
prob.set_val('x', 0.0)
prob.set_val('y', 0.0)
# this file contains design variable inputs in CSV format
with open('cases.csv', 'r') as f:
self.assertEqual(f.read(), self.expected_csv)
# run problem with DOEDriver using the CSV file
prob.driver = om.DOEDriver(om.CSVGenerator('cases.csv'))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver')
values = []
for case in cases:
outputs = cr.get_case(case).outputs
values.append((outputs['x'], outputs['y'], outputs['f_xy']))
self.assertEqual("\n".join(["x: %5.2f, y: %5.2f, f_xy: %6.2f" % xyf for xyf in values]),
self.expected_text)
def test_list(self):
import openmdao.api as om
from openmdao.test_suite.components.paraboloid import Paraboloid
import json
prob = om.Problem()
model = prob.model
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.setup()
prob.set_val('x', 0.0)
prob.set_val('y', 0.0)
# load design variable inputs from JSON file and decode into list
with open('cases.json', 'r') as f:
json_data = f.read()
self.assertEqual(json_data, self.expected_json)
case_list = json.loads(json_data)
self.assertEqual(case_list, json.loads(json_data))
# create DOEDriver using provided list of cases
prob.driver = om.DOEDriver(case_list)
# a ListGenerator was created
self.assertEqual(type(prob.driver.options['generator']), om.ListGenerator)
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver')
values = []
for case in cases:
outputs = cr.get_case(case).outputs
values.append((outputs['x'], outputs['y'], outputs['f_xy']))
self.assertEqual("\n".join(["x: %5.2f, y: %5.2f, f_xy: %6.2f" % xyf for xyf in values]),
self.expected_text)
@unittest.skipUnless(MPI and PETScVector, "MPI and PETSc are required.")
@use_tempdirs
class TestParallelDOEFeature(unittest.TestCase):
N_PROCS = 2
def setUp(self):
import numpy as np
from mpi4py import MPI
rank = MPI.COMM_WORLD.rank
expected = [
{'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},
{'x': np.array([.5]), 'y': np.array([0.]), 'f_xy': np.array([19.25])},
{'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},
{'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])},
{'x': np.array([.5]), 'y': np.array([.5]), 'f_xy': np.array([23.75])},
{'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])},
{'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},
{'x': np.array([.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])},
{'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},
]
# expect odd cases on rank 0 and even cases on rank 1
values = []
for idx, case in enumerate(expected):
if idx % 2 == rank:
values.append((case['x'], case['y'], case['f_xy']))
self.expect_text = "\n"+"\n".join([
"x: %5.2f, y: %5.2f, f_xy: %6.2f" % xyf for xyf in values
])
def test_full_factorial(self):
import openmdao.api as om
from openmdao.test_suite.components.paraboloid import Paraboloid
from mpi4py import MPI
prob = om.Problem()
prob.model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
prob.model.add_design_var('x', lower=0.0, upper=1.0)
prob.model.add_design_var('y', lower=0.0, upper=1.0)
prob.model.add_objective('f_xy')
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.options['run_parallel'] = True
prob.driver.options['procs_per_model'] = 1
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
self.assertEqual(MPI.COMM_WORLD.size, 2)
# check recorded cases from each case file
rank = MPI.COMM_WORLD.rank
filename = "cases.sql_%d" % rank
self.assertEqual(filename, "cases.sql_%d" % rank)
# SqliteCaseReader will automatically look for cases.sql_meta if
# metadata_filename is not specified, but test by explicitly
# using it here.
cr = om.CaseReader(filename, metadata_filename = 'cases.sql_meta')
cases = cr.list_cases('driver')
self.assertEqual(len(cases), 5 if rank == 0 else 4)
values = []
for case in cases:
outputs = cr.get_case(case).outputs
values.append((outputs['x'], outputs['y'], outputs['f_xy']))
self.assertEqual("\n"+"\n".join(["x: %5.2f, y: %5.2f, f_xy: %6.2f" % xyf for xyf in values]),
self.expect_text)
del cr
# Test for missing metadata db file error
try:
cr_test = om.CaseReader(filename, metadata_filename = 'nonexistant_filename')
found_metadata = True
except IOError:
found_metadata = False
self.assertFalse(found_metadata, "No error from SqliteCaseReader for missing metadata file.")
@unittest.skipUnless(MPI and PETScVector, "MPI and PETSc are required.")
@use_tempdirs
class TestParallelDOEFeature2(unittest.TestCase):
N_PROCS = 4
def setUp(self):
from mpi4py import MPI
rank = MPI.COMM_WORLD.rank
expected = [
{'x1': np.array([0.]), 'x2': np.array([0.]), 'c3.y': np.array([0.00])},
{'x1': np.array([.5]), 'x2': np.array([0.]), 'c3.y': np.array([-3.00])},
{'x1': np.array([1.]), 'x2': np.array([0.]), 'c3.y': np.array([-6.00])},
{'x1': np.array([0.]), 'x2': np.array([.5]), 'c3.y': np.array([17.50])},
{'x1': np.array([.5]), 'x2': np.array([.5]), 'c3.y': np.array([14.50])},
{'x1': np.array([1.]), 'x2': np.array([.5]), 'c3.y': np.array([11.50])},
{'x1': np.array([0.]), 'x2': np.array([1.]), 'c3.y': np.array([35.00])},
{'x1': np.array([.5]), 'x2': np.array([1.]), 'c3.y': np.array([32.00])},
{'x1': np.array([1.]), 'x2': np.array([1.]), 'c3.y': np.array([29.00])},
]
# expect odd cases on rank 0 and even cases on rank 1
values = []
for idx, case in enumerate(expected):
if idx % 2 == rank:
values.append((case['x1'], case['x2'], case['c3.y']))
self.expect_text = "\n"+"\n".join([
"x1: %5.2f, x2: %5.2f, c3.y: %6.2f" % vals_i for vals_i in values
])
def test_fan_in_grouped(self):
import openmdao.api as om
from openmdao.test_suite.groups.parallel_groups import FanInGrouped
from mpi4py import MPI
prob = om.Problem(FanInGrouped())
prob.model.add_design_var('x1', lower=0.0, upper=1.0)
prob.model.add_design_var('x2', lower=0.0, upper=1.0)
prob.model.add_objective('c3.y')
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
# the FanInGrouped model uses 2 processes, so we can run
# two instances of the model at a time, each using 2 of our 4 procs
prob.driver.options['run_parallel'] = True
prob.driver.options['procs_per_model'] = procs_per_model = 2
prob.setup()
prob.run_driver()
prob.cleanup()
# a separate case file will be written by rank 0 of each parallel model
# (the top two global ranks)
rank = prob.comm.rank
num_models = prob.comm.size // procs_per_model
if rank < num_models:
filename = "cases.sql_%d" % rank
cr = om.CaseReader(filename)
cases = cr.list_cases('driver')
values = []
for case in cases:
outputs = cr.get_case(case).outputs
values.append((outputs['x1'], outputs['x2'], outputs['c3.y']))
self.assertEqual("\n"+"\n".join(["x1: %5.2f, x2: %5.2f, c3.y: %6.2f" % (x1, x2, y) for x1, x2, y in values]),
self.expect_text)
@unittest.skipUnless(MPI and PETScVector, "MPI and PETSc are required.")
@use_tempdirs
class TestParallelDistribDOE(unittest.TestCase):
N_PROCS = 4
def test_doe_distributed_var(self):
size = 3
prob = om.Problem()
model = prob.model
ivc = om.IndepVarComp()
ivc.add_output('x', np.ones((size, )))
ivc.add_output('y', np.ones((size, )))
ivc.add_output('a', -3.0 + 0.6 * np.arange(size))
model.add_subsystem('p', ivc, promotes=['*'])
model.add_subsystem("parab", DistParab(arr_size=size, deriv_type='dense'), promotes=['*'])
model.add_subsystem('sum', om.ExecComp('f_sum = sum(f_xy)',
f_sum=np.ones((size, )),
f_xy=np.ones((size, ))),
promotes_outputs=['*'])
model.promotes('sum', inputs=['f_xy'], src_indices=om.slicer[:])
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_objective('f_sum', index=-1)
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=2))
prob.driver.options['run_parallel'] = True
prob.driver.options['procs_per_model'] = 2
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
# check recorded cases from each case file
rank = prob.comm.rank
if rank == 0:
filename0 = "cases.sql_0"
values = []
cr = om.CaseReader(filename0)
cases = cr.list_cases('driver')
for case in cases:
outputs = cr.get_case(case).outputs
values.append(outputs)
# 2**6 cases, half on each rank
self.assertEqual(len(values), 32)
x_inputs = [list(val['x']) for val in values]
for n1 in [-50.]:
for n2 in [-50., 50.]:
for n3 in [-50., 50.]:
self.assertEqual(x_inputs.count([n1, n2, n3]), 8)
elif rank == 1:
filename0 = "cases.sql_1"
values = []
cr = om.CaseReader(filename0)
cases = cr.list_cases('driver')
for case in cases:
outputs = cr.get_case(case).outputs
values.append(outputs)
# 2**6 cases, half on each rank
self.assertEqual(len(values), 32)
x_inputs = [list(val['x']) for val in values]
for n1 in [50.]:
for n2 in [-50., 50.]:
for n3 in [-50., 50.]:
self.assertEqual(x_inputs.count([n1, n2, n3]), 8)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
a008a2484cd85352027f8158be9a8d986d35cb96 | c5ec5d92c1b97cdebc24cfa2144d58e5455f3c1a | /Tasks/03_Improved_ANN/ann_utils.py | fb817f497c532d2a88145c025f5f9d6b48e6fbed | [] | no_license | Silver-Taurus/DeepLearning_at_FLT | f6f22635942e4436e5186bda5fc59be6f190f884 | 1ced64e62a360adfbde9cceb73de2139e16543c1 | refs/heads/master | 2020-04-30T22:33:40.741004 | 2019-08-26T04:08:46 | 2019-08-26T04:08:46 | 177,122,543 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,211 | py | ''' Utility funtions for Artificial Neural Network '''
import numpy as np
def function_dispatcher(default_fn):
''' Decorator for dispatching the function from the registry '''
registry = {}
registry['Default'] = default_fn
def decorated_function(fn):
''' function is decorated to give the desired function back
but with an additional property '''
return registry.get(fn, registry['Default'])
def register(act_fn_name):
''' Decorator factory (or Paramterized Decorator) that will be a
property of our decorated function, which when called return the
decorator '''
def register_decorator(act_fn):
''' decorator to register the function in the registry and
return the function back as it is '''
registry[act_fn_name] = act_fn
return act_fn
return register_decorator
decorated_function.register = register
return decorated_function
def validate(features, targets, alpha, epochs):
''' Function to validate the parameters '''
assert features.ndim == 2, ('Features entered should be a numpy array of rank 2')
assert targets.ndim == 2 and targets.shape[0] == 1, ('Targets should be a numpy array of rank 2 and dimensions 1 * m \n(m: samples)')
assert alpha > 0 and alpha <= 1, ('Learning rate should be between 0 and 1')
assert epochs > 0, ('Epochs should be greater than 1')
def create(X, layers_units):
''' Function to create the layer inputs, initial weights and bias matrix '''
layer_inputs = [X.shape[0]] + layers_units[:-1]
layer_samples = X.shape[1]
W = []; b = []; Z = []; A = []; dWs = []; dbs = []; V_dWs = []; V_dbs = []
for inputs, units in zip(layer_inputs, layers_units):
W.append(np.random.randn(inputs, units)*np.sqrt(2 / inputs)) # Careful Weight Initialisation
b.append(np.random.randn(units, 1))
Z.append(np.zeros((units, layer_samples)))
A.append(np.zeros((units, layer_samples)))
dWs.append(np.zeros((inputs, units)))
dbs.append(np.zeros((units, 1)))
# Adding momentum to the gradient descent
V_dWs.append(np.zeros((inputs, units)))
V_dbs.append(np.zeros((units, 1)))
return W, b, Z, A, dWs, dbs, V_dWs, V_dbs
def propagate(X, Y, W, b, Z, A, alpha, l, beta, bs, iterations, dWs, dbs, V_dWs, V_dbs, layers):
''' Function to perform both forward pass and backward pass '''
# Adding mini-batch technique
for i in range(iterations):
Z, A = batch_forward_propagation(X, W, b, Z, A, bs, i, layers)
dWs, dbs, V_dWs, V_dbs = batch_backward_propagation(X, Y, W, Z, A, l, beta, bs, i, dWs, dbs, V_dWs, V_dbs, layers)
W, b = optimize(W, b, alpha, V_dWs, V_dbs, layers)
C = cost_function(A[-1], Y)
return W, b, Z, A, dWs, dbs, V_dWs, V_dbs, C
def batch_forward_propagation(X, W, b, Z, A, bs, i, layers):
''' Function for forward propagating a batch '''
Z_batch = [z[:, i*bs: (i + 1)*bs] for z in Z]
A_batch = [a[:, i*bs: (i + 1)*bs] for a in A]
Z_batch, A_batch = forward_propagation(X[:, i*bs: (i + 1)*bs], W, b, Z_batch, A_batch, layers)
Z = [np.concatenate((z[:, :i*bs], zb, z[:, (i + 1)*bs:]), axis=1) for zb,z in zip(Z_batch, Z)]
A = [np.concatenate((a[:, :i*bs], ab, a[:, (i + 1)*bs:]), axis=1) for ab,a in zip(A_batch, A)]
return Z, A
def forward_propagation(X, W, b, Z, A, layers):
''' Function for Forward Propagation '''
A_cache = [X]
for num in range(layers):
activation = 'sigmoid' if num == layers - 1 else 'tanh'
Z[num], A[num] = process_layer(W[num], b[num], A_cache.pop(), activation)
A_cache.append(A[num])
return Z, A
def process_layer(W, b, A_cache, activation):
''' Function to process a layer of NN '''
z = np.dot(W.T, A_cache) + b
a = activation_function(activation)(z)
return z, a
def loss_function(Y_hat, Y):
''' Function to return the residual (loss or error) '''
return -(Y * np.log(Y_hat) + (1 - Y) * (np.log(1 - Y_hat)))
def cost_function(Y_hat, Y):
''' Function to return the cost '''
return (1 / Y.shape[1]) * np.sum(loss_function(Y_hat, Y))
def batch_backward_propagation(X, Y, W, Z, A, l, beta, bs, i, dWs, dbs, V_dWs, V_dbs, layers):
''' Function for backward propagating a batch '''
Z_batch = [z[:, i*bs: (i + 1)*bs] for z in Z]
A_batch = [a[:, i*bs: (i + 1)*bs] for a in A]
X_batch = X[:, i*bs: (i + 1)*bs]
Y_batch = Y[:, i*bs: (i + 1)*bs]
return backward_propagation(X_batch, Y_batch, W, Z_batch, A_batch, l, beta, dWs, dbs, V_dWs, V_dbs, layers)
def backward_propagation(X, Y, W, Z, A, l, beta, dWs, dbs, V_dWs, V_dbs, layers):
''' Function to backpropagate using gradient descent '''
dZ_cache = [A[-1] - Y]
for num in range(layers - 1, -1, -1):
dZ = dZ_cache.pop()
a = X if num == 0 else A[num-1]
dWs[num] = (1 / a.shape[1]) * (np.dot(dZ, a.T).T) + (l / X.shape[1]) * W[num] # Adding Regularisation
dbs[num] = (1 / a.shape[1]) * np.sum(dZ, axis=1, keepdims=True)
V_dWs[num] = beta * V_dWs[num] + (1 - beta) * dWs[num]
V_dbs[num] = beta * V_dbs[num] + (1 - beta) * dbs[num]
if num - 1 >= 0:
dZ_cache.append(np.dot(W[num], dZ) * dtanh(Z[num-1]))
return dWs, dbs, V_dWs, V_dbs
def optimize(W, b, alpha, V_dWs, V_dbs, layers):
''' Function to optimize (or update) the weights and bias '''
for num in range(layers):
W[num] = W[num] - alpha * V_dWs[num]
b[num] = b[num] - alpha * V_dbs[num]
return W, b
@function_dispatcher
def activation_function(fn):
return AttributeError('No such function Exists!!!')
@activation_function.register('tanh')
def activate_tanh(Z):
''' Function to return the activated values '''
return np.tanh(Z)
@activation_function.register('sigmoid')
def activate_sigmoid(Z):
''' Function to return the activated value matrix '''
return 1 / (1 + np.exp(-Z))
def dtanh(Z):
''' Function to return gradient descent value of the matrix '''
return 1 - activate_tanh(Z)**2
def dsigmoid(Z):
return np.exp(-Z) * (activate_sigmoid(Z) ** 2)
| [
"[email protected]"
] | |
f94664ca4600fb82ec724d82af43ec97bca0c16f | 27bfd3bc212c3451e454ca9089d2e13625ce04e7 | /dynamic-programming/longest_palindrome_subsequence_DP.py | 3c52b9d141d6671993cd45714f4d1bec45c454f6 | [] | no_license | RobRcx/algorithm-design-techniques | de6e33891ca9ced725ebe2c24cbc160bd274b232 | 7f3526fe7d80bbb525daf58ac8f536eb4e8695ec | refs/heads/master | 2021-06-12T22:01:41.420464 | 2021-03-21T10:24:40 | 2021-03-21T10:24:40 | 164,210,600 | 0 | 0 | null | 2019-01-05T11:55:25 | 2019-01-05T11:55:24 | null | UTF-8 | Python | false | false | 979 | py | # Copyright (c) June 02, 2017 CareerMonk Publications and others.
# E-Mail : [email protected]
# Creation Date : 2017-06-02 06:15:46
# Last modification : 2017-06-02
# Modified by : Narasimha Karumanchi
# Book Title : Algorithm Design Techniques
# Warranty : This software is provided "as is" without any
# warranty; without even the implied warranty of
# merchantability or fitness for a particular purpose.
def longest_palindrome_subsequence(S):
n = len(S)
L =[[0 for x in range(n)] for x in range(n)]
# palindromes with length 1
for i in range(0,n-1):
L[i][i] = 1
# palindromes with length up to j+1
for k in range(2,n+1):
for i in range(0,n-k+1):
j = i+k-1
if S[i] == S[j] and k == 2:
L[i][j] = 2
if S[i] == S[j]:
L[i][j] = 2 + L[i+1][j-1]
else:
L[i][j] = max( L[i+1][j] , L[i][j-1] )
#print L
return L[0][n-1]
print longest_palindrome_subsequence("Career Monk Publications")
| [
"[email protected]"
] | |
fa0472b95809dcfb8b5cd1a12645261c8e213dba | 71d463e7c263b53ab2909ea8b439092aec3161c1 | /HW2/hw2_p2_data/a.py | e7517d607c02718d74beca165a8f3b457301d434 | [] | no_license | RahulMaganti47/Learning-in-Robotics | 09c4a9f41b6eb26676d0e231e3842cb31aaf59be | 7e04c83ec23597c2ad6bea237101e0257cb1cb96 | refs/heads/main | 2023-06-23T12:09:00.407961 | 2021-07-23T17:51:58 | 2021-07-23T17:51:58 | 349,591,922 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,942 | py | import sys
import numpy as np
import math
import matplotlib.pyplot as plt
def sample_state_noise():
epsilon_k = np.random.normal(0, 1)
return epsilon_k
def sample_obs_noise():
nu_k = np.random.normal(0, math.sqrt(1/2))
return nu_k
def propagate_system_state(x):
return -1*x + sample_state_noise()
def propagate_system_obs(x):
return math.sqrt(pow(x,2) + 1) + sample_obs_noise()
def generate_ground_truth_data(iters, x_curr):
obs_dataset = []
state_dataset = [x_curr]
for _ in range(iters):
x_next = propagate_system_state(x_curr)
obs = propagate_system_obs(x_next)
obs_dataset.append(obs)
state_dataset.append(x_next)
x_curr = x_next
return np.array(obs_dataset), np.array(state_dataset)
class EKF():
def __init__(self, state_0, var_0, actual_obs):
self.mean_k_k = state_0
self.cov_k_k = np.diag([.1, .1])
self.actual_obs = actual_obs
self.A = np.zeros((2, 2))
def propagate_mean(self):
#print(self.mean_k_k)
#print(self.A)
self.A[0] = np.array([self.mean_k_k[1].item(), self.mean_k_k[0].item()])
self.A[1] = np.array([0, 1])
mean_knext_k = [email protected]_k_k
return mean_knext_k.astype('float64') #(2, 1)
def propagate_covariance(self):
R = np.diag([2, 0.])
cov_knext_k = [email protected][email protected]
return cov_knext_k + R #(2, 2)
def compute_jac_obs(self, mean_k_next_k):
C = np.zeros((1, 2))
elem = mean_k_next_k[0] / math.sqrt(pow(mean_k_next_k[0], 2) + 1)
C[0, :] = np.array([elem, 0.], dtype='float64')
return C
def compute_obs_estimate(self, C, mean_k_next_k):
return np.matmul(C.reshape(1, 2), mean_k_next_k.reshape(2, 1))
def compute_fake_obs(self, obs_actual, obs_estimate, C, mean_k_next_k):
y_knext_prime = obs_actual - obs_estimate + C@mean_k_next_k
return y_knext_prime
def compute_kalman_gain(self, cov_knext_k, C):
information = 1. / (C@[email protected] + 1/2)
K = [email protected]@information
return K
def update(self, y_knext_prime, mean_knext_k, kalman_gain, C, cov_knext_k):
innovation = y_knext_prime - np.matmul(C,mean_knext_k)
mean_updated = mean_knext_k + np.matmul(kalman_gain, innovation)
self.mean_k_k = mean_updated
cov_updated = (np.identity(2) - kalman_gain@C)@cov_knext_k
self.cov_k_k = cov_updated
return mean_updated, cov_updated
def one_iteration(self, i):
mean_knext_k = self.propagate_mean()
#print(mean_knext_k.shape)
#sys.exit()
cov_knext_k = self.propagate_covariance()
#print(cov_knext_k.shape)
C = self.compute_jac_obs(mean_knext_k)
#print(C.shape)
obs_estimate = self.compute_obs_estimate(C, mean_knext_k)
#print(obs_estimate)
y_knext_prime = self.compute_fake_obs(self.actual_obs[i], obs_estimate, C, mean_knext_k)
K = self.compute_kalman_gain(cov_knext_k, C)
state_estimate_i, covariance_estimate_i = self.update(y_knext_prime, mean_knext_k, K, C, cov_knext_k)
print(covariance_estimate_i)
#sys.exit()
return state_estimate_i, covariance_estimate_i
def main(ekf, iters):
x_estimates = []
a_estimates = []
a_estimate_cov = []
for i in range(iters):
x_estimate_i, covariance_estimate_i = ekf.one_iteration(i)
x_estimates.append(x_estimate_i[0])
a_estimates.append(x_estimate_i[1])
a_estimate_cov.append(covariance_estimate_i[0, 0])
return np.array(x_estimates), np.array(a_estimates), np.array(a_estimate_cov)
def plot_results(ground_truth_data, state_estimates, a_estimates, a_estimates_cov):
fig, axs = plt.subplots(2)
fig.suptitle("Estimated States vs. Ground Truth Values")
axs[0].scatter(np.arange(ground_truth_data.shape[0]), ground_truth_data, color="blue", label="Ground Truth State")
axs[0].scatter(np.arange(100), state_estimates, color="green", label="Estimated State")
axs[1].plot(np.array([1.] * 100), color="blue", label="Expected a value")
axs[1].scatter(np.arange(100), a_estimates + a_estimates_cov.reshape(100, 1), color="green", label="Estimated a value")
for a in axs:
a.legend()
a.grid()
plt.show()
if __name__=="__main__":
iters = 100
x_0 = np.random.normal(1., math.sqrt(2))
x_a = x_0
actual_obs, actual_state = generate_ground_truth_data(iters, x_0)
x_0 = 2
a_0 = 1.
state_0 = np.array([x_0, a_0]).reshape(2, 1)
initial_cov = np.diag([2, 2])
ekf = EKF(state_0, initial_cov, actual_obs)
x_estimates, a_estimates, a_estimates_cov = main(ekf, iters)
#plot_results(actual_state, x_estimates, a_estimates, a_estimates_cov)
| [
"[email protected]"
] | |
a04df4572c182738280b576591b01d087a4ddb20 | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/iam/admin/v1/iam-admin-v1-py/google/iam/admin_v1/__init__.py | 5de06b42015db50300075194884fb9a7fce4ea19 | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,044 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .services.iam import IAMClient
from .services.iam import IAMAsyncClient
from .types.audit_data import AuditData
from .types.iam import CreateRoleRequest
from .types.iam import CreateServiceAccountKeyRequest
from .types.iam import CreateServiceAccountRequest
from .types.iam import DeleteRoleRequest
from .types.iam import DeleteServiceAccountKeyRequest
from .types.iam import DeleteServiceAccountRequest
from .types.iam import DisableServiceAccountRequest
from .types.iam import EnableServiceAccountRequest
from .types.iam import GetRoleRequest
from .types.iam import GetServiceAccountKeyRequest
from .types.iam import GetServiceAccountRequest
from .types.iam import LintPolicyRequest
from .types.iam import LintPolicyResponse
from .types.iam import LintResult
from .types.iam import ListRolesRequest
from .types.iam import ListRolesResponse
from .types.iam import ListServiceAccountKeysRequest
from .types.iam import ListServiceAccountKeysResponse
from .types.iam import ListServiceAccountsRequest
from .types.iam import ListServiceAccountsResponse
from .types.iam import PatchServiceAccountRequest
from .types.iam import Permission
from .types.iam import QueryAuditableServicesRequest
from .types.iam import QueryAuditableServicesResponse
from .types.iam import QueryGrantableRolesRequest
from .types.iam import QueryGrantableRolesResponse
from .types.iam import QueryTestablePermissionsRequest
from .types.iam import QueryTestablePermissionsResponse
from .types.iam import Role
from .types.iam import ServiceAccount
from .types.iam import ServiceAccountKey
from .types.iam import SignBlobRequest
from .types.iam import SignBlobResponse
from .types.iam import SignJwtRequest
from .types.iam import SignJwtResponse
from .types.iam import UndeleteRoleRequest
from .types.iam import UndeleteServiceAccountRequest
from .types.iam import UndeleteServiceAccountResponse
from .types.iam import UpdateRoleRequest
from .types.iam import UploadServiceAccountKeyRequest
from .types.iam import RoleView
from .types.iam import ServiceAccountKeyAlgorithm
from .types.iam import ServiceAccountKeyOrigin
from .types.iam import ServiceAccountPrivateKeyType
from .types.iam import ServiceAccountPublicKeyType
__all__ = (
'IAMAsyncClient',
'AuditData',
'CreateRoleRequest',
'CreateServiceAccountKeyRequest',
'CreateServiceAccountRequest',
'DeleteRoleRequest',
'DeleteServiceAccountKeyRequest',
'DeleteServiceAccountRequest',
'DisableServiceAccountRequest',
'EnableServiceAccountRequest',
'GetRoleRequest',
'GetServiceAccountKeyRequest',
'GetServiceAccountRequest',
'IAMClient',
'LintPolicyRequest',
'LintPolicyResponse',
'LintResult',
'ListRolesRequest',
'ListRolesResponse',
'ListServiceAccountKeysRequest',
'ListServiceAccountKeysResponse',
'ListServiceAccountsRequest',
'ListServiceAccountsResponse',
'PatchServiceAccountRequest',
'Permission',
'QueryAuditableServicesRequest',
'QueryAuditableServicesResponse',
'QueryGrantableRolesRequest',
'QueryGrantableRolesResponse',
'QueryTestablePermissionsRequest',
'QueryTestablePermissionsResponse',
'Role',
'RoleView',
'ServiceAccount',
'ServiceAccountKey',
'ServiceAccountKeyAlgorithm',
'ServiceAccountKeyOrigin',
'ServiceAccountPrivateKeyType',
'ServiceAccountPublicKeyType',
'SignBlobRequest',
'SignBlobResponse',
'SignJwtRequest',
'SignJwtResponse',
'UndeleteRoleRequest',
'UndeleteServiceAccountRequest',
'UndeleteServiceAccountResponse',
'UpdateRoleRequest',
'UploadServiceAccountKeyRequest',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
7d17bc3781d764fb67f047208d884d5c0ed93d71 | e949b67f3d8a5758817b4861783c6136e33d8c99 | /python/hello1.py | 1c3f8b5025ee568ef9c8f9da94bb5b92c1c8befa | [] | no_license | edt11x/edt-jnk | 1976c6b4cbb1cfe38db731bd520a3d1eb31995bf | e9ceb4fa645c2513b7699cea9fcb31944476efba | refs/heads/master | 2023-08-17T11:15:20.938437 | 2023-08-16T18:53:47 | 2023-08-16T18:53:47 | 20,622,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | # File: hello1.py
from Tkinter import *
root = Tk()
w = Label(root, text="Hello World!")
w.pack()
root.mainloop()
| [
"[email protected]"
] | |
ec250829ed80000d1b7ebf7e8e7e6777e2d66ffc | f91cd3d990349e88402aa67117dc3480468f96b2 | /test2/Applications/MonsterKeyboard/keyboardlight-gui.py | 533230a3fd690071c0188fe7fa15f237b4728699 | [
"MIT"
] | permissive | emartisoft/MonsterKeyboardLight | 3832cd495745dcc23bd3c9c5d99bcd44ff868bd3 | fb6e00dad854d541af766d821829cf56ad80b02e | refs/heads/main | 2023-04-20T17:25:13.240851 | 2021-05-10T15:45:01 | 2021-05-10T15:45:01 | 361,874,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,358 | py |
# pip install hidapi
"""
Distributed under the terms of the MIT License.
Created on Apr, 25, 2021
@author: emarti, Murat Ozdemir
@comment: Set mono color for Monster Laptop Keyboard Light
@usage: python3 keyboardlight.py <RED> <GREEN> <BLUE> <BRIGHTNESS>
<RED> <GREEN> <BLUE> 0...255
<BRIGHTNESS> 0...50
"""
import hid
import sys
from tkinter import *
if (len(sys.argv)-1) != 4:
print ("Set mono color for Monster Laptop Keyboard Light")
print ("Usage: python3 keyboardlight.py <RED> <GREEN> <BLUE> <BRIGHTNESS>")
print ("")
print ("<RED> <GREEN> <BLUE> 0...255")
print ("<BRIGHTNESS> 0...50")
print ("example:")
print ("to set color white => RED=255, GREEN=255, BLUE=255")
print ("to set brightness => BRIGHTNESS=20")
print ("python3 keyboardlight.py 255 255 255 20")
sys.exit()
splashWindow = Tk()
x=(splashWindow.winfo_screenwidth() - 640)/2
y=(splashWindow.winfo_screenheight() - 275)/2
splashWindow.geometry('640x275+%d+%d' % (x, y))
splashWindow.title("Monster Keyboard Mono Light")
#splashWindow.resizable(False, False)
mk_png = PhotoImage(file='/Applications/MonsterKeyboard/monsterkeyboard.png')
splashLabel = Label(splashWindow, text = "Monster Keyboard Mono Light", image=mk_png)
splashLabel.pack()
splashWindow.overrideredirect(1)
splashWindow.overrideredirect(0)
def setMonoLight():
vendor_id = 0x048d
product_id = 0xce00
R=sys.argv[1]
#print ("RED: %s" % R)
G=sys.argv[2]
#print ("GREEN: %s" % G)
B=sys.argv[3]
#print ("BLUE: %s" % B)
BR=sys.argv[4]
#print ("BRIGHTNESS: %s" % BR)
brightness=int(BR)
if brightness > 50:
brightness=50
monsterKeyboardLight = hid.device()
monsterKeyboardLight.open(vendor_id, product_id)
#print("Manufacturer: %s" % h.get_manufacturer_string())
# Manufacturer: ITE Tech. Inc.
#print("Product: %s" % h.get_product_string())
# Product: ITE Device(8291)
monsterKeyboardLight.set_nonblocking(0x01)
for x in range(0x08):
monsterKeyboardLight.send_feature_report([0x14, 0x00, x+1, int(R), int(G), int(B), 0x00, 0x00])
monsterKeyboardLight.send_feature_report([0x08, 0x02, x+1, 0x05, brightness, 0x08, 0x00, 0x01])
monsterKeyboardLight.close()
splashWindow.destroy()
splashWindow.after(3000, setMonoLight)
mainloop()
| [
"[email protected]"
] | |
5499832ef749fd1db5ad1554b8f3cb177bfd27b1 | 96aa84a2a838ea4b7c19340db5483e05d1be533b | /gde/migrations/0001_initial.py | d4a4a3c326c757bf9de4adb5c590640a8a0ad874 | [] | no_license | sadj123/Ecommerce-Videogames | e4aa81066217152d1c6e431c6a7e5322fee3c2b4 | a18debd160e20a6aa36ecdcb6f36969b54120037 | refs/heads/master | 2023-01-27T23:30:46.766613 | 2020-12-04T05:05:25 | 2020-12-04T05:05:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,669 | py | # Generated by Django 3.1.2 on 2020-11-18 22:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category_name', models.CharField(max_length=20, unique=True)),
],
),
migrations.CreateModel(
name='Dispatcher',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='auth.user')),
('telephone', models.CharField(default='no hay', max_length=20, unique=True)),
('plate', models.CharField(default='NA', max_length=10, unique=True)),
('weight', models.DecimalField(decimal_places=2, default=0.0, max_digits=10)),
('height', models.DecimalField(decimal_places=2, default=0.0, max_digits=10)),
('blood_type', models.CharField(choices=[('A', 'A'), ('B', 'B'), ('AB', 'AB'), ('O', 'O')], default='A', max_length=20)),
('rh_type', models.CharField(choices=[('+', '+'), ('-', '-')], default='+', max_length=20)),
],
),
migrations.CreateModel(
name='Format',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('formats', models.CharField(max_length=50, unique=True)),
],
),
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('genre', models.CharField(max_length=50, unique=True)),
],
),
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language', models.CharField(max_length=50, unique=True)),
],
),
migrations.CreateModel(
name='Platform',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('platform', models.CharField(max_length=50, unique=True)),
],
),
migrations.CreateModel(
name='Shopping_cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('expiration_date', models.DateField()),
('total', models.FloatField()),
],
),
migrations.CreateModel(
name='State_location',
fields=[
('State', models.CharField(max_length=50, primary_key=True, serialize=False)),
('Zip', models.PositiveSmallIntegerField(unique=True)),
],
),
migrations.CreateModel(
name='Videogame',
fields=[
('videogame_name', models.CharField(max_length=50, primary_key=True, serialize=False)),
('rating', models.CharField(choices=[('Early childhood', 'EC'), ('Every one', 'E'), ('Every one 10+', 'E10+'), ('Teen', 'T'), ('Mature', 'M'), ('Rating Pending', 'RP'), ('Adults', 'A')], default='Rating Pending', max_length=20)),
('release_date', models.DateField()),
('engine', models.CharField(choices=[('Unity', 'Unity'), ('Unreal', 'Unreal'), ('GameMaker', 'GameMaker'), ('Godot', 'Godot'), ('AppGameKit', 'AppGameKit'), ('CryEngine', 'CryEngine'), ('Amazon lumberyard', 'Amazon lumberyard'), ('RPG Maker', 'RPG Maker'), ('Lib GDX', 'Lib GDX')], max_length=50)),
('production_cost', models.DecimalField(decimal_places=2, max_digits=18)),
('unit_price', models.DecimalField(decimal_places=2, max_digits=18)),
('administrator', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
('category', models.ForeignKey(default=2, editable=False, on_delete=django.db.models.deletion.PROTECT, to='gde.category')),
('formats', models.ManyToManyField(to='gde.Format')),
('genre', models.ManyToManyField(to='gde.Genre')),
('language', models.ManyToManyField(to='gde.Language')),
('platform', models.ManyToManyField(to='gde.Platform')),
],
),
migrations.CreateModel(
name='Store_location',
fields=[
('city', models.CharField(max_length=50, primary_key=True, serialize=False)),
('country', models.CharField(max_length=50)),
('state', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='gde.state_location')),
],
),
migrations.CreateModel(
name='Store',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='auth.user')),
('street_number', models.CharField(max_length=50)),
('street_name', models.CharField(max_length=50)),
('phone', models.CharField(max_length=20)),
('city', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='gde.store_location')),
],
),
migrations.CreateModel(
name='Package',
fields=[
('package_name', models.CharField(max_length=50, primary_key=True, serialize=False)),
('unit_price', models.DecimalField(decimal_places=2, max_digits=18)),
('administrator', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
('category', models.ForeignKey(default=1, editable=False, on_delete=django.db.models.deletion.PROTECT, to='gde.category')),
('videogames', models.ManyToManyField(to='gde.Videogame')),
],
),
migrations.CreateModel(
name='Dlc',
fields=[
('dlc_name', models.CharField(max_length=50, primary_key=True, serialize=False)),
('Release_date', models.DateField()),
('unit_price', models.DecimalField(decimal_places=2, max_digits=18)),
('administrator', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
('category', models.ForeignKey(default=3, editable=False, on_delete=django.db.models.deletion.PROTECT, to='gde.category')),
('videogame', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='gde.videogame')),
],
),
migrations.CreateModel(
name='Checkout',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Delivery_date', models.DateField()),
('cart', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='gde.shopping_cart')),
('dispatcher', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='gde.dispatcher')),
('store', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='gde.store')),
],
),
migrations.CreateModel(
name='Shopping_cart_videogames',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('units', models.IntegerField()),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='gde.shopping_cart')),
('videogame', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='gde.videogame')),
],
options={
'unique_together': {('cart', 'videogame')},
},
),
migrations.CreateModel(
name='Shopping_cart_packages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('units', models.IntegerField()),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='gde.shopping_cart')),
('package', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='gde.package')),
],
options={
'unique_together': {('cart', 'package')},
},
),
migrations.CreateModel(
name='Shopping_cart_dlc',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('units', models.IntegerField()),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='gde.shopping_cart')),
('dlc', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='gde.dlc')),
],
options={
'unique_together': {('cart', 'dlc')},
},
),
]
| [
"[email protected]"
] | |
da9a232684d1049a1740290d61218c564a143248 | 88c7f00aa5c92bcea2272de6ccd952d4fa428aa7 | /archived_material/lecture/6,7-Classes/wheel.py | 0c0936f56e29505aec6a9673e3561227e79968d9 | [] | no_license | XZeusJ/6.189_brief_intro_to_python_2011 | ee0f449fa133c13b5a0005a440249afa242ddddb | acde28fbddc780bab9cb2af46ffcf3bdc3f89d40 | refs/heads/master | 2020-05-26T11:58:54.426142 | 2019-05-23T12:18:22 | 2019-05-23T12:18:22 | 188,224,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,667 | py | from graphics import *
class Wheel():
def __init__(self, center, wheel_radius, tire_radius):
self.tire_circle = Circle(center, tire_radius)
self.wheel_circle = Circle(center, wheel_radius)
def draw(self, win):
self.tire_circle.draw(win)
self.wheel_circle.draw(win)
def move(self, dx, dy):
self.tire_circle.move(dx, dy)
self.wheel_circle.move(dx, dy)
def set_color(self, wheel_color, tire_color):
self.tire_circle.setFill(tire_color)
self.wheel_circle.setFill(wheel_color)
def undraw(self):
self.tire_circle .undraw()
self.wheel_circle .undraw()
def get_size(self):
return self.tire_circle.getRadius()
def get_center(self):
return self.tire_circle.getCenter()
# Define a main function; if you want to display graphics, run main()
# after you load code into your interpreter
def main():
# create a window with width = 700 and height = 500
new_win = GraphWin('Wheel', 700, 500)
# What we'll need for the wheel...
wheel_center = Point(200, 200) # The wheel center is a Point at (200, 200)
tire_radius = 100 # The radius of the outer tire is 100
# Make a wheel object
new_wheel = Wheel(wheel_center, 0.6*tire_radius, tire_radius)
# Set its color
new_wheel.set_color('red', 'black')
# And finally, draw it
new_wheel.draw(new_win)
# Run the window loop (must be the *last* line in your code)
new_win.mainloop()
# Comment this call to main() when you import this code into
# your car.py file - otherwise the Wheel will pop up when you
# try to run your car code.
main() | [
"[email protected]"
] | |
072a29b2727e1ec364850d4fb199936c6ff2e08e | 2e553f34f8449b24c873ba7960bd28f3204d0884 | /beam/data2.py | 8715173dff69db3cf0d0e7bed132ed94adfae2a6 | [] | no_license | lambert8809/MSG_Machine_learning | d3b26b462192559e2d68aa1a1368bec5adb6b599 | 90f661a1fce06dedd2a71b887c019c59240c310c | refs/heads/master | 2020-04-25T04:37:56.569845 | 2019-03-21T22:17:40 | 2019-03-21T22:17:40 | 172,516,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,223 | py | # Create database using LHS samlping.
# Usage: call load_data(parameter), parameter is the size of
# samples in one dimension (same size in two dimensions)
from pyDOE import *
import numpy as np
import matplotlib.pyplot as plt
def load_data(sample_size):
sampling = lhs(2,samples=sample_size,criterion='center')
idata = np.zeros((sample_size,2)) #[x,q]
idata[:,0] = 10*sampling[:,0]
idata[:,1] = 10*sampling[:,1]+5
np.random.shuffle(idata) # ramdon the samples for splitting
trainD, validateD, testD = np.split(idata,[int(0.7*len(idata)),int(0.9*len(idata))])
#x = 10*sampling[:,0]
#q = 10*sampling[:,1]+5
trainL = np.zeros((len(trainD),2))
validateL = np.zeros((len(validateD),2))
testL = np.zeros((len(testD),2))
#u = np.zeros(1000)
#k = np.zeros(1000)
Ei = 1e6
L = 10
for i in range(len(trainD)):
# multiply 1000 to convert m to mm
trainL[i,0] = -1000*trainD[i,1]*L**4/24/Ei*(trainD[i,0]/L-trainD[i,0]**2/L**2)**2
# multiply 1000 to convert the similar order of u
trainL[i,1] = -1000*trainD[i,1]*(L**2-6*L*trainD[i,0]+6*trainD[i,0]**2)/12/Ei
for i in range(len(validateD)):
# multiply 1000 to convert m to mm
validateL[i,0] = -1000*validateD[i,1]*L**4/24/Ei*(validateD[i,0]/L-validateD[i,0]**2/L**2)**2
# multiply 1000 to convert the similar order of u
validateL[i,1] = -1000*validateD[i,1]*(L**2-6*L*validateD[i,0]+6*validateD[i,0]**2)/12/Ei
for i in range(len(testD)):
# multiply 1000 to convert m to mm
testL[i,0] = -1000*testD[i,1]*L**4/24/Ei*(testD[i,0]/L-testD[i,0]**2/L**2)**2
# multiply 1000 to convert the similar order of u
testL[i,1] = -1000*testD[i,1]*(L**2-6*L*testD[i,0]+6*testD[i,0]**2)/12/Ei
return trainD, validateD, testD, trainL, validateL, testL
if __name__ == "__main__":
trainD, validateD, testD, trainL, validateL, testL = load_data(1000)
# Plot data for a quick check
f1 = plt.figure()
f2 = plt.figure()
ax1 = f1.add_subplot(111)
ax1.scatter(trainD[:,0], trainL[:,0])
ax2 = f2.add_subplot(111)
ax2.scatter(trainD[:,0], trainL[:,1])
plt.show()
| [
"[email protected]"
] | |
306fbeb749b5805bb67d154dbca42ba66c88db19 | a0159623c490437013a4896c4b77c73e2f836236 | /add-key-dictionary.py | 2dcf5aeb3b32bc66ee85c289368112e8d79bcf9b | [] | no_license | AjaiKumarMisra/DS_18_July_2021 | 53bcab99b5b40faef50849190de731038a9c707e | 688afa2a43d2e5672494c50f09620e85b2b1f550 | refs/heads/main | 2023-06-27T12:05:26.250838 | 2021-08-02T18:49:12 | 2021-08-02T18:49:12 | 389,232,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py |
distN={
"name":"Ajai Kumar Misra",
"age":"39",
"email":"[email protected]"
}
distN["company"]="FICCI"
distN.update({"name":"Ajai k Misra"})
print(distN) | [
"[email protected]"
] | |
fe88afb172d6cbadfd20c2a3bd761390bdd93769 | 5cfaabb4e3443b661401165991b3e5ab57606301 | /app.py | 848c4ee8e27d31e8fd751c89fd2b95b0b040154b | [] | no_license | subaquatic-pierre/azure-pieplines-exercise | 83cb5dcf6d135c1fc921758b738e0eae3ee36beb | 20064717eb1e4d2197e550d21fcfd27a12a5b738 | refs/heads/master | 2022-12-16T14:05:42.905439 | 2020-09-21T10:42:54 | 2020-09-21T10:42:54 | 297,267,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,800 | py | from flask import Flask, request, jsonify
from flask.logging import create_logger
import logging
import pandas as pd
from sklearn.externals import joblib
from sklearn.preprocessing import StandardScaler
app = Flask(__name__)
LOG = create_logger(app)
LOG.setLevel(logging.INFO)
def scale(payload):
"""Scales Payload"""
LOG.info("Scaling Payload: %s payload")
scaler = StandardScaler().fit(payload)
scaled_adhoc_predict = scaler.transform(payload)
return scaled_adhoc_predict
@app.route("/")
def home():
html = """
<h1>Awesome WebApp life !</h1>
<p>The world is an amazingplace, each day we can do something to make it a better place</p>
<h3>Sklearn Prediction Home</h3>
"""
return html.format(format)
# TO DO: Log out the prediction value
@app.route("/predict", methods=["POST"])
def predict():
"""Performs an sklearn prediction
input looks like:
{
"CHAS":{
"0":0
},
"RM":{
"0":6.575
},
"TAX":{
"0":296.0
},
"PTRATIO":{
"0":15.3
},
"B":{
"0":396.9
},
"LSTAT":{
"0":4.98
}
result looks like:
{ "prediction": [ 20.35373177134412 ] }
"""
try:
clf = joblib.load("boston_housing_prediction.joblib")
except:
LOG.info("JSON payload: %s json_payload")
return "Model not loaded"
json_payload = request.json
LOG.info("JSON payload: %s json_payload")
inference_payload = pd.DataFrame(json_payload)
LOG.info("inference payload DataFrame: %s inference_payload")
scaled_payload = scale(inference_payload)
prediction = list(clf.predict(scaled_payload))
return jsonify({"prediction": prediction})
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000, debug=True)
| [
"[email protected]"
] | |
50f661a722b51db91632e8d66450621f803372c6 | 47586d7e5954e8dbdf2101cc3208e50c1fc80db5 | /TP7/productoServices.py | 2f48cc6c28b45c8ade2ff2229053bf768f2ac514 | [] | no_license | MatiasTomasRobbio/ComputacionMatiasRobbio | 124a1bd5d9c0148a7bf3664924a9b580a3bb189d | 0ce0a9a1796cfaaa0651a58867c71c6ac9f85402 | refs/heads/master | 2023-01-01T01:06:05.044894 | 2020-10-26T17:36:47 | 2020-10-26T17:36:47 | 274,050,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,771 | py | from repositorios import Repositorios
class ProductoService:
# Devuelve repositorio productoList
def get_productosList(self):
return Repositorios.productosList
# parametros : Object producto
# return: Key id
def add_producto(self, producto):
lastKey = -1
for key in Repositorios.productosList:
lastKey = key
id_new = int(lastKey) + 1
Repositorios.productosList[id_new] = producto.__dict__
return id_new
# parametros: key id, Object producto
def delete_producto(self, id):
if id not in Repositorios.productosList:
raise ValueError("El id a elminar no existe")
del Repositorios.productosList[id]
# Actualiza producto en repositorio memberList
# parametros : key legajo , Object member
def update_producto(self, legajo, producto):
if id not in Repositorios.productoList:
raise ValueError("El legajo no existe")
Repositorios.productoList.update({legajo: producto.__dict__})
def insertion_sort_precio(self, lista__ordenada, tipo_orden):
lista__ordenada_ordenada = lista__ordenada.copy()
for i in range(1, len(lista__ordenada_ordenada)):
actual = lista__ordenada_ordenada[i]
j = i
# Desplazamiento de los elementos
if tipo_orden == 'ascendente':
while j > 0 and \
lista__ordenada_ordenada[j-1]["_precio"] > actual["_precio"]:
lista__ordenada_ordenada[j] = lista__ordenada_ordenada[j-1]
j = j-1
if tipo_orden == 'descendente':
while j > 0 and \
lista__ordenada_ordenada[j-1]["_precio"] < actual["_precio"]:
lista__ordenada_ordenada[j] = lista__ordenada_ordenada[j-1]
j = j-1
# insertar el elemento en su lugar
lista__ordenada_ordenada[j] = actual
return lista__ordenada_ordenada
def busqueda_binaria(self, lista__ordenada, x):
lista__ordenada = self.\
insertion_sort_precio(lista__ordenada, "ascendente")
# """Búsqueda binaria
# Precondición: lista__ordenada está ordenada
# Devuelve -1 si x no está en lista__ordenada;
# Devuelve p tal que lista__ordenada[p] == x, si
# x está en lista__ordenada
# """
# Busca en toda la lista__ordenada dividiéndola en segmentos y
# considerando
# a la lista__ordenada completa como el segmento que
# empieza en 0 y termina
# en len(lista__ordenada) - 1.
izq = 0 # izq guarda el índice inicio del segmento
der = len(lista__ordenada) - 1 # der guarda el índice fin del segmento
# un segmento es vacío cuando izq > der:
while izq <= der:
# el punto medio del segmento
medio = (izq+der) // 2
# si el medio es igual al valor buscado, lo devuelve
if lista__ordenada[medio]["_precio"] == x:
return lista__ordenada[medio]
# si el valor del punto medio es mayor que x, sigue buscando
# en el segmento de la izquierda: [izq, medio-1], descartando la
# derecha
elif lista__ordenada[medio]["_precio"] > x:
der = medio-1
# sino, sigue buscando en el segmento de la derecha:
# [medio+1, der], descartando la izquierda
else:
izq = medio+1
# si no salió del ciclo, vuelve a iterar con el nuevo segmento
# salió del ciclo de manera no exitosa: el valor no fue encontrado
raise ValueError("El precio buscado \
no esta en la lista__ordenada ingresada")
| [
"[email protected]"
] | |
5c0c9406f68e39173548029af6c2099576c9d1a3 | c3bd260d2525900843a6616a1ae4693c1c2a0aea | /can-you-get-the-loop.py | 00261747a50e41e221cc207ec9013cd1f7be51a5 | [] | no_license | kurtportelli/can-you-get-the-loop | 3534473a9d468e8ec7c4af596c5c80716336e8a3 | 5ccc8fcc736ccd5d21ee352a9d2e3ac7f55e4d2e | refs/heads/master | 2020-12-31T09:38:50.334988 | 2020-02-07T17:09:00 | 2020-02-07T17:09:00 | 238,981,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | def loop_size(node):
index = 0
nodes = {}
while node not in nodes:
nodes[node] = index
node = node.next
index += 1
return index - nodes[node]
| [
"[email protected]"
] | |
6ff94af8d40fd1672f612fbb0d90444bc127f82e | c1726886f451e793e8574f278d2ebb6e77829c68 | /users_auto.py | 8af41d73cc2b8b0e1617aca9e734d28aecbb3d0d | [] | no_license | Fernando-Devtrix/actividades | a0988d6664a12121b601b125509edfc9019194cc | 8bb5dca52a396cc02575a0a0c793d672fa1db1f0 | refs/heads/master | 2020-06-04T18:35:47.973083 | 2019-06-16T03:41:23 | 2019-06-16T03:41:23 | 192,146,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,546 | py |
import sqlite3
conexion = sqlite3.connect('usuarios_auto.db')
cursor = conexion.cursor()
# cursor.execute('''CREATE TABLE usuarios (
# id INTEGER PRIMARY KEY,
# dni VARCHAR(9) UNIQUE,
# nombre VARCHAR(100),
# edad INTEGER,
# email VARCHAR(100))''')
# usuarios = [('53456', 'Luis', 27, '[email protected]'),
# ('23435', 'Juan', 51, '[email protected]'),
# ('12433', 'Carlos', 38, '[email protected]'),
# ('44443', 'Jon', 19, '[email protected]')]
# cursor.executemany("INSERT INTO usuarios VALUES (null,?,?,?,?)", usuarios)
# cursor.execute("INSERT INTO usuarios VALUES ('11111111A', 'Fernando', 31, '[email protected]')")
# cursor.execute("SELECT * FROM usuarios WHERE id=2")
# cursor.execute("UPDATE usuarios SET nombre='Tyrion Lannister' WHERE dni=44443")
# cursor.execute("SELECT * FROM usuarios WHERE dni=44443")
# cursor.execute("INSERT INTO usuarios VALUES (null, '8235875', 'Luis Fernando', 21, '[email protected]')")
for usuario in cursor.execute("SELECT * FROM usuarios"):
print("[{}] {}".format(usuario[0], usuario[1]))
# cursor.execute("DELETE FROM usuarios WHERE dni='8235875'")
# print()
# for usuario in cursor.execute("SELECT * FROM usuarios"):
# print(usuario)
# user = cursor.fetchone()
# print(user)
# usuarios = cursor.fetchall()
# for usuario in usuarios:
# print(usuario)
conexion.commit()
conexion.close() | [
"[email protected]"
] | |
47db460dd47e5a15b0838e9b4937338781832ff0 | 74abb79304446a740e5cee5d48293d1765b623b9 | /web/temple/wsgi.py | 671834cd5438e83a38a3e0012a4596a010858a1d | [] | no_license | jinji-liuhaoda/bigbang | eab19115656a7db6ac58426abfc85247c6c37e81 | b56a4bac47a4f6cf9a8a3165e362c48d18d4cfdd | refs/heads/master | 2021-01-20T19:05:24.528361 | 2016-06-02T04:13:28 | 2016-06-02T04:13:28 | 60,245,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
WSGI config for temple project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "temple.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
98dc950bfe28aefc3283378dc7d8e3a624ba9b55 | 911e7b25961067339c31957ff41ebdb3c355d948 | /_utils/python/libs_my/version.py | ab61a1c831c80f48a5d6641192c416395f2946c8 | [] | no_license | qlong2012/notes-1 | c93efcc9a70f786929ef7e4c053e266e2bf354ad | 78913e8235845d4a94dd19f730d607df754da7fe | refs/heads/master | 2020-05-20T01:05:04.678662 | 2019-04-25T10:06:37 | 2019-04-25T10:06:53 | 185,303,355 | 1 | 0 | null | 2019-05-07T02:10:14 | 2019-05-07T02:10:14 | null | UTF-8 | Python | false | false | 3,812 | py | #!python
# -*- coding:utf-8 -*-
"""
Created on 2014/8/29
Updated on 2019/1/18
@author: Holemar
本模块专门供监控、调试用
"""
import os
import sys
import time
import types
import logging
from . import str_util, html_util
__all__=('init', 'get_version')
logger = logging.getLogger('libs_my.version')
# 请求默认值
CONFIG = {
'version' : None, # {string} 版本号
'db_fun': None, # {Function|list<Function>} 检查数据库连接是否正常的函数(需要空参,可直接调用,如 mysql_util.ping)
}
def init(**kwargs):
"""
设置get和post函数的默认参数值
:param {string} version: 版本号
:param {Function|list<Function>} db_fun: 检查数据库连接是否正常的函数(需要空参,可直接调用,如 mysql_util.ping)
"""
global CONFIG
CONFIG.update(kwargs)
#def get_version(version, db_fun=None, **kwargs):
def get_version(*args, **kwargs):
'''
查看代码版本号,返回字典类型的内容
:param {string} version: 此系统的版本号
:param {Function|list<Function>} db_fun: 查看数据库连接是否正常的函数(需要空参,可直接调用,如 mysql_util.ping)
需要查看多个数据库(如redis+mysql),可用列表传多个函数过来
:return {dict}: {
"result":{int}返回码, #0:成功, -1:数据库异常, 500:程序异常
"reason":{string} 程序异常/正常的说明,
"version":{string} 本程序版本号,
"update_time":{string} 本程序更新时间, #格式为:"yyyy-MM-dd HH:mm:ss"
"now": {string} 系统当前时间, # 格式为:"yyyy-MM-dd HH:mm:ss"
"use_time": {string} 本接口反应所用的时间,单位秒
}
:example
version_info = version.get_version(version="agw 1.2.0", db_fun=[cache_redis.ping, mysql_util.ping])
'''
global CONFIG
try:
start_time = time.time()
version = args[0] if len(args) >= 1 else kwargs.pop('version', CONFIG.get('version'))
db_fun = args[1] if len(args) >= 2 else kwargs.pop('db_fun', CONFIG.get('db_fun'))
# 测试数据库是否连上
db_success = True
if db_fun:
if isinstance(db_fun, (list,tuple,set)):
for fun in db_fun:
db_success = db_success & fun()
elif isinstance(db_fun, types.FunctionType):
db_success = db_fun()
res = {
'result' : 0 if db_success else -1, # 成功/失败状态,0:成功, -1:数据库异常
'reason':u'访问成功' if db_success else u'数据库异常',
'version' : version,
'update_time': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(os.path.getmtime(__file__))), # 本文件更新时间
'now' : time.strftime('%Y-%m-%d %H:%M:%S'), # 系统时间,用来核对系统时间是否正确
}
use_time = time.time() - start_time
res["use_time"] = "%.4f" % use_time # 使用时间
# 调用时有传其他参数,则格式化成方便人查看的模式返回
if kwargs:
res = html_util.to_html(str_util.to_human(res))
return res
except Exception, e:
logger.error(u"[red]查询版本号出现异常[/red],%s: %s", e.__class__.__name__, e, exc_info=True, extra={'color':True})
return {"result":500, "reason":u'查询出现异常,%s: %s' % (e.__class__.__name__, e) }
# 废弃下面获取错误信息的方法,效果跟上面一样
#info = sys.exc_info()
#logger.error(u"查询版本号出现异常,%s: %s" % (info[0].__name__, info[1]), exc_info=True, extra={'color':True})
#return {"result":500, "reason":u'查询出现异常,%s: %s' % (info[0].__name__, info[1]) }
| [
"[email protected]"
] | |
d08183227f9802ee80dd53b9c054887622ea54c2 | 56281839fc4602886d15020a808f34a7e09ca0f2 | /documentation/proposed-api/python.py | 92e69122496f90c0dc05d1852c8b530763b9c0ca | [
"MIT"
] | permissive | JonathanWilbur/wildboar-logging-protocol | 10e33927e5b9f34f68ad5dec1f57e4e4da65e0d8 | 3eda8ec8da9549d9b737f60ef25d111d880e00ac | refs/heads/master | 2020-05-01T13:35:35.826511 | 2019-03-25T01:53:08 | 2019-03-25T01:53:08 | 177,495,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | #!/usr/bin/python
wl = WELP.WelpLog()
wl.log(
containsPasswords=True,
severity=WELP.MEDIUM,
urgency=WELP.PATIENT,
concerns=[ WELP.CONFIDENTIALITY, WELP.LEGALITY ],
message={
client: "test-client.domain.local",
authFailureReason: "PASSWORD",
username: "bob",
password: "example"
}) | [
"[email protected]"
] | |
93af6aa3c7d9823be507deb2296b8dbbc0b637b1 | e7bbaec2ad3fe63323c81a0a6fe92d0a6838aec5 | /annotation/__init__.py | 13a0eb1de348d6a39a10612bae3e869732e2b61c | [] | no_license | CleitonDeLima/hatedetector | f18b19e0d6e7a3f98824573e8d87e80e89d53cdb | 0127daf7291547c423b70a71c3090ef4543bda11 | refs/heads/master | 2020-03-27T09:20:46.589348 | 2018-08-27T18:49:27 | 2018-08-27T18:49:27 | 146,331,843 | 1 | 0 | null | 2018-08-27T17:33:02 | 2018-08-27T17:33:02 | null | UTF-8 | Python | false | false | 55 | py | default_app_config = 'annotation.apps.AnnotationConfig' | [
"[email protected]"
] | |
55ca8c36c3bcf51394d3e8aa1301fde53694101c | 7af06a197e0a05750f15aab936de307337a37d2d | /venv/Scripts/easy_install-3.7-script.py | ba3ff659746e41869474bada634464119d33e86d | [] | no_license | SemikAlexander/Racer | ab83387d444e58668909ec134b1d8d675d55b4ab | 1c37cc6ead191de59ae734c0c78a2eb8fc83c17d | refs/heads/master | 2020-06-30T10:20:27.406719 | 2019-08-06T08:05:56 | 2019-08-06T08:05:56 | 200,800,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | #!C:\Users\Admin\Documents\Python\Project\MyFirstGame\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"[email protected]"
] | |
3af05b1f21c8a6962e058ae733c46f9bbcb52dbf | 4a67ca308faab57797d2684e87aba8dffa9f09f8 | /GetIps/GetIps/items.py | 2aef12470a852aee47fb3a10550dfa63d285ed1c | [] | no_license | KeepCodeing/ScrapyProject | 827cf82b550d6ebdd6f088b1e20f74c8e8354e59 | 91b2be73bc44dc65cb588a39ade7dcf957ee4224 | refs/heads/master | 2020-05-27T06:05:20.174495 | 2019-07-29T12:38:24 | 2019-07-29T12:38:24 | 188,514,261 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class GetipsItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| [
"[email protected]"
] | |
970fe942fbd6d146bc70b89fc6dded2601906c84 | e079006d43a545baf2e63ef20dfefd6e778f42c2 | /venv/Scripts/pip3-script.py | 5f89c76539fae0a6888fd5dac4d72b3f0c6589eb | [] | no_license | wailaifeike/myAdmin | 34a41f1f8c7b04defa92d1ed3872888ff5295445 | 8f79f8232767bae73d0fd0c326232ca33203c7e2 | refs/heads/master | 2020-04-07T11:10:29.225049 | 2018-11-20T01:55:46 | 2018-11-20T01:55:46 | 158,315,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | #!E:\python_code\KingAdmin\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
] | |
cce5ba0b7220124f87ae4dac8555b8803462f6c1 | f0c4f7053853766d6b91f9c7fda810a0007b2694 | /comments/migrations/0001_initial.py | d017bf6a3c978ef19d3c047ec6a7f3dc565a9023 | [] | no_license | mrzhang0320/django-blog | bc146eb1fd815d5ed30be286ad3ad78823abbd0c | 8d760a51192fc3a01acfccceac1e6c024a99e227 | refs/heads/master | 2021-04-12T08:56:50.089791 | 2018-04-19T11:00:36 | 2018-04-19T11:00:36 | 126,841,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 973 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2018-03-26 13:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('blog', '0003_auto_20180326_2133'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=255)),
('url', models.URLField(blank=True)),
('text', models.TextField()),
('created_time', models.DateTimeField(auto_now_add=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Post')),
],
),
]
| [
"[email protected]"
] | |
79c85f25e82618ee3cc74889b888b34cd05c7a0d | 02338bb8111fc1aa88e830ac09a11664720eb2d4 | /tmp/azure_rm_hostpool.py | a65bf56113a7255d2eb0c0c644b60eb76e096b90 | [] | no_license | Fred-sun/fred_yaml | a49977b0e8505c7447df23dd80c7fef1be70e6bc | 295ca4cd2b59b8d2758f06eb7fd79920327ea524 | refs/heads/master | 2023-04-28T05:51:56.599488 | 2023-04-25T13:52:10 | 2023-04-25T13:52:10 | 131,376,340 | 0 | 1 | null | 2020-07-06T14:22:46 | 2018-04-28T05:34:49 | TSQL | UTF-8 | Python | false | false | 14,508 | py | #!/usr/bin/python
#
# Copyright (c) 2020 GuopengLin, (@t-glin)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_hostpool
version_added: '2.9'
short_description: Manage Azure HostPool instance.
description:
- 'Create, update and delete instance of Azure HostPool.'
options:
resource_group_name:
description:
- The name of the resource group. The name is case insensitive.
required: true
type: str
host_pool_name:
description:
- The name of the host pool within the specified resource group
required: true
type: str
location:
description:
- The geo-location where the resource lives
type: str
friendly_name:
description:
- Friendly name of HostPool.
type: str
description:
description:
- Description of HostPool.
type: str
host_pool_type:
description:
- HostPool type for desktop.
type: str
choices:
- Personal
- Pooled
personal_desktop_assignment_type:
description:
- PersonalDesktopAssignment type for HostPool.
type: str
choices:
- Automatic
- Direct
custom_rdp_property:
description:
- Custom rdp property of HostPool.
type: str
max_session_limit:
description:
- The max session limit of HostPool.
type: integer
load_balancer_type:
description:
- The type of the load balancer.
type: str
choices:
- BreadthFirst
- DepthFirst
- Persistent
ring:
description:
- The ring number of HostPool.
type: integer
validation_environment:
description:
- Is validation environment.
type: bool
registration_info:
description:
- The registration info of HostPool.
type: dict
suboptions:
expiration_time:
description:
- Expiration time of registration token.
type: str
token:
description:
- The registration token base64 encoded string.
type: str
registration_token_operation:
description:
- The type of resetting the token.
type: str
choices:
- Delete
- None
- Update
vm_template:
description:
- VM template for sessionhosts configuration within hostpool.
type: str
sso_context:
description:
- Path to keyvault containing ssoContext secret.
type: str
preferred_app_group_type:
description:
- >-
The type of preferred application group type, default to Desktop
Application Group
type: str
choices:
- None
- Desktop
- RailApplications
force:
description:
- Force flag to delete sessionHost.
type: bool
state:
description:
- Assert the state of the HostPool.
- >-
Use C(present) to create or update an HostPool and C(absent) to delete
it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- GuopengLin (@t-glin)
'''
EXAMPLES = '''
- name: HostPool_Create
azure_rm_hostpool:
host_pool_name: hostPool1
resource_group_name: resourceGroup1
- name: HostPool_Delete
azure_rm_hostpool:
force: true
host_pool_name: hostPool1
resource_group_name: resourceGroup1
- name: HostPool_Update
azure_rm_hostpool:
host_pool_name: hostPool1
resource_group_name: resourceGroup1
'''
RETURN = '''
tags:
description:
- Resource tags.
returned: always
type: dictionary
sample: null
location:
description:
- The geo-location where the resource lives
returned: always
type: str
sample: null
friendly_name:
description:
- Friendly name of HostPool.
returned: always
type: str
sample: null
description:
description:
- Description of HostPool.
returned: always
type: str
sample: null
host_pool_type:
description:
- HostPool type for desktop.
returned: always
type: str
sample: null
personal_desktop_assignment_type:
description:
- PersonalDesktopAssignment type for HostPool.
returned: always
type: str
sample: null
custom_rdp_property:
description:
- Custom rdp property of HostPool.
returned: always
type: str
sample: null
max_session_limit:
description:
- The max session limit of HostPool.
returned: always
type: integer
sample: null
load_balancer_type:
description:
- The type of the load balancer.
returned: always
type: str
sample: null
ring:
description:
- The ring number of HostPool.
returned: always
type: integer
sample: null
validation_environment:
description:
- Is validation environment.
returned: always
type: bool
sample: null
registration_info:
description:
- The registration info of HostPool.
returned: always
type: dict
sample: null
contains:
expiration_time:
description:
- Expiration time of registration token.
returned: always
type: str
sample: null
token:
description:
- The registration token base64 encoded string.
returned: always
type: str
sample: null
registration_token_operation:
description:
- The type of resetting the token.
returned: always
type: str
sample: null
vm_template:
description:
- VM template for sessionhosts configuration within hostpool.
returned: always
type: str
sample: null
application_group_references:
description:
- List of applicationGroup links.
returned: always
type: list
sample: null
sso_context:
description:
- Path to keyvault containing ssoContext secret.
returned: always
type: str
sample: null
preferred_app_group_type:
description:
- >-
The type of preferred application group type, default to Desktop
Application Group
returned: always
type: str
sample: null
'''
import time
import json
import re
from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
from copy import deepcopy
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.desktop import Desktop Virtualization API Client
from msrestazure.azure_operation import AzureOperationPoller
from msrest.polling import LROPoller
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMHostPool(AzureRMModuleBaseExt):
def __init__(self):
self.module_arg_spec = dict(
resource_group_name=dict(
type='str',
required=True
),
host_pool_name=dict(
type='str',
required=True
),
location=dict(
type='str',
disposition='/location'
),
friendly_name=dict(
type='str',
disposition='/friendly_name'
),
description=dict(
type='str',
disposition='/description'
),
host_pool_type=dict(
type='str',
disposition='/host_pool_type',
choices=['Personal',
'Pooled']
),
personal_desktop_assignment_type=dict(
type='str',
disposition='/personal_desktop_assignment_type',
choices=['Automatic',
'Direct']
),
custom_rdp_property=dict(
type='str',
disposition='/custom_rdp_property'
),
max_session_limit=dict(
type='integer',
disposition='/max_session_limit'
),
load_balancer_type=dict(
type='str',
disposition='/load_balancer_type',
choices=['BreadthFirst',
'DepthFirst',
'Persistent']
),
ring=dict(
type='integer',
disposition='/ring'
),
validation_environment=dict(
type='bool',
disposition='/validation_environment'
),
registration_info=dict(
type='dict',
disposition='/registration_info',
options=dict(
expiration_time=dict(
type='str',
disposition='expiration_time'
),
token=dict(
type='str',
disposition='token'
),
registration_token_operation=dict(
type='str',
disposition='registration_token_operation',
choices=['Delete',
'None',
'Update']
)
)
),
vm_template=dict(
type='str',
disposition='/vm_template'
),
sso_context=dict(
type='str',
disposition='/sso_context'
),
preferred_app_group_type=dict(
type='str',
disposition='/preferred_app_group_type',
choices=['None',
'Desktop',
'RailApplications']
),
force=dict(
type='bool'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group_name = None
self.host_pool_name = None
self.force = None
self.body = {}
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.to_do = Actions.NoAction
super(AzureRMHostPool, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.body[key] = kwargs[key]
self.inflate_parameters(self.module_arg_spec, self.body, 0)
old_response = None
response = None
self.mgmt_client = self.get_mgmt_svc_client(Desktop Virtualization API Client,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2019-12-10-preview')
old_response = self.get_resource()
if not old_response:
if self.state == 'present':
self.to_do = Actions.Create
else:
if self.state == 'absent':
self.to_do = Actions.Delete
else:
modifiers = {}
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
self.results['modifiers'] = modifiers
self.results['compare'] = []
if not self.default_compare(modifiers, self.body, old_response, '', self.results):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.results['changed'] = True
if self.check_mode:
return self.results
response = self.create_update_resource()
elif self.to_do == Actions.Delete:
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_resource()
else:
self.results['changed'] = False
response = old_response
return self.results
def create_update_resource(self):
try:
response = self.mgmt_client.host_pools.create_or_update(resource_group_name=self.resource_group_name,
host_pool_name=self.host_pool_name,
host_pool=self.body)
if isinstance(response, AzureOperationPoller) or isinstance(response, LROPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the HostPool instance.')
self.fail('Error creating the HostPool instance: {0}'.format(str(exc)))
return response.as_dict()
def delete_resource(self):
try:
response = self.mgmt_client.host_pools.delete(resource_group_name=self.resource_group_name,
host_pool_name=self.host_pool_name,
force=self.force)
except CloudError as e:
self.log('Error attempting to delete the HostPool instance.')
self.fail('Error deleting the HostPool instance: {0}'.format(str(e)))
return True
def get_resource(self):
try:
response = self.mgmt_client.host_pools.get(resource_group_name=self.resource_group_name,
host_pool_name=self.host_pool_name)
except CloudError as e:
return False
return response.as_dict()
def main():
AzureRMHostPool()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
6da2b8053f343db28341456672ae46faaa81215e | 5275a2bb918731ec4ff7d2397f7892da3a5095b4 | /create_indices.py | bf319b488d922195e036ed81b8a75eec3b4f64f9 | [] | no_license | SinhaPrateek/ms_aichallenge | f632590d20af2f84de247aae50837f29bc01f00a | 9e90cfe7865a28d6eb2887312b8d18c69061bd3f | refs/heads/master | 2023-03-14T08:43:07.097282 | 2020-02-15T09:02:39 | 2020-02-15T09:02:39 | 346,091,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | test_df = pd.read_csv("/data/downloaded/eval2_unlabelled.tsv",sep = '\t', header = None)
test_df = test_df.rename(columns = {0:'index',3:4})
test_df.to_csv('data/test_eval2.tsv',sep = '\t',index = False)
query_series = test_df.loc[:,'index']
query_unique = query_series.unique()
query_unique_df = pd.Dataframe(query_unique)
query_unique_df.to_csv('data/query_ids/test.ids',header = None,index = False)
| [
"[email protected]"
] | |
1c1e5812836bde266ae9d4377ad91980e1b3dbd8 | 893037fa36fc97cfb094eadc8bed90a97132ff78 | /0521/coin.py | fe14e4a6c87d1db4cdd24ad0185f262ff732aefd | [] | no_license | bobe7109/python | 7c001d86173b484a462a65d2c7bdc59d426ec1f3 | 5271caf24db4d2607612d75b82faf315e2f63c05 | refs/heads/main | 2023-05-14T22:08:51.900687 | 2021-06-04T04:56:23 | 2021-06-04T04:56:23 | 344,701,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | # -*- coding: utf-8 -*-
"""
Created on Fri May 21 15:07:18 2021
@author: Mac_1
"""
import turtle as t
import random as r
screen = t.screen()
image1 = "fornt.gif"
image2 = "back.gif"
screen.addshape(image1)
screen.addshape(image2)
while True:
t.delay(500)
coin = r.randint(0,1)
if coin = 0:
t.shape(image1)
t.stamp()
else:
t.shape(image2)
t.stamp()
t.exitonclick()
| [
"[email protected]"
] | |
e383855a70f56ea9ef0acb9a3d8922f2b7be822d | 9ef2facfe5824e18b3a15a536627ec2eae171a5b | /GUI.spec | ce2ba5660b3776ca8a6661b2ce1bd81c8e8b0fbb | [] | no_license | landonleighredline/AutomationGUI | c3137e24316d8980787990a89900c2076dc749c0 | a811cdd4a806ad631b17c0636d2cfb305374b336 | refs/heads/main | 2023-05-04T06:36:57.426406 | 2021-05-18T12:37:53 | 2021-05-18T12:37:53 | 365,245,210 | 0 | 0 | null | 2021-05-10T16:56:53 | 2021-05-07T13:38:25 | TeX | UTF-8 | Python | false | false | 878 | spec | # -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['GUI.py'],
pathex=['C:\\Users\\LandonLeigh\\Documents\\GitHub\\AutomationGUI'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='GUI',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True )
| [
"[email protected]"
] | |
8cebd2a1b7a7bf695c15249257baa0945a5c4da8 | 7779d11721b0f6df8620f28c32e33823a97ecf12 | /snapchat.py | eac377fec15e509795fe7acdea63b202b605f80c | [] | no_license | crepppy/snapchat-bot | 5a88a846b9af18829a6a5c242fd17e1c0cc42c07 | da3ed9c85e7ec1b853c5ea2499b8baa19bd94a19 | refs/heads/main | 2023-05-30T08:00:19.018777 | 2021-06-03T11:46:10 | 2021-06-03T11:46:10 | 329,726,288 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,953 | py | import subprocess
import os
from discord.ext import commands
import discord
import asyncio
import time
import sys
FIRST_ITEM = ("680", "480")
SEARCH_BUTTON = ("225", "130")
CANCEL_BUTTON = ("1020", "120")
# OWNER_ID = 523177775901966358
messages = {}
owner = None
try:
owner = int(sys.argv[1])
except Exception as e:
print("No arguments given - assuming no react")
def find_user(user):
# Restart the snapchat app
subprocess.run("adb shell am force-stop com.snapchat.android", shell=True)
subprocess.run("adb shell am start com.snapchat.android/.LandingPageActivity", shell=True)
# Search for user
time.sleep(1)
subprocess.run(["adb", "shell", "input", "tap", *SEARCH_BUTTON])
time.sleep(.5)
subprocess.run(["adb", "shell", "input", "text", '"' + user + '"'])
time.sleep(.6)
subprocess.run(["adb", "shell", "input", 'tap', *FIRST_ITEM])
bot = commands.Bot(command_prefix='s!', case_insensitive=True)
@bot.event
async def on_ready():
print("Bot ready!")
@bot.command()
async def msg(ctx, user, *message):
if owner is not None:
# Owner needs to confirm
messages[ctx.message.content] = (user, ' '. join(message))
await ctx.message.add_reaction("👍")
else:
send_msg(user, ' '.join(message))
@bot.event
async def on_reaction_add(reaction, u):
if owner is None or u.id != owner:
return
if(reaction.message.content.startswith("s!msg ")):
user, message = messages.pop(reaction.message.content)
send_msg(user, message)
def send_msg(user, message):
message = message.replace(" ", "\\ ").replace("'", "\'")
find_user(user)
time.sleep(.8)
subprocess.run(f"adb shell input text '{message}'", shell=True)
subprocess.run("adb shell input keyevent KEYCODE_ENTER", shell=True)
subprocess.run(["adb", "shell", "input", "tap", *CANCEL_BUTTON])
if __name__ == "__main__":
bot.run(os.getenv("BOT_TOKEN"))
| [
"[email protected]"
] | |
0cd2de579d8b629295187fd580a1749de33473e4 | b0eb135342b21ba1eea718453df7d10a146632d4 | /functionModules/fkRigTest.py | 932c6dd55be2200296b477c6fcb742c89be6d2f9 | [] | no_license | siamkiw/kiwToolModules | da6028d3c4379e1621ffca0c5d333957a05bc621 | c1c39691664526e024811cdde807893d3aa053da | refs/heads/master | 2022-03-11T06:47:00.150504 | 2019-10-22T11:16:53 | 2019-10-22T11:16:53 | 216,794,998 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,661 | py | import maya.cmds as mc
def createFkZeroGrp(zGrpName='', ojName=''):
mc.group(em=True, n=str(zGrpName))
zGrpCon = mc.parentConstraint(ojName, zGrpName, mo=False)
mc.delete(zGrpCon)
mc.parent(ojName, zGrpName)
# mc.select( clear=True )
def stretchFk(nodeMultName, nodeAddName):
ctrl = mc.ls(sl=True)[0]
ojStretch = mc.ls(sl=True)[1]
mc.createNode('multDoubleLinear', n=nodeMultName)
mc.createNode('addDoubleLinear', n=nodeAddName)
mc.addAttr(ctrl, ln='Stretch', at="double", dv=0, k=True)
ojStretchTy = mc.getAttr(ojStretch + '.ty')
mc.setAttr(nodeMultName+ '.input2', 0.1 )
mc.setAttr(nodeAddName + '.input2', ojStretchTy )
mc.connectAttr(ctrl + '.Stretch', nodeMultName + '.input1')
mc.connectAttr(nodeMultName + '.output', nodeAddName + '.input1')
mc.connectAttr(nodeAddName + '.output', ojStretch + '.ty')
# mc.select( clear=True )
# gr.StretchFk('test1_stretchFk_Mult1', 'test1_stretchFk_add1')
def squashFk(nodeMultName, nodeAddName):
ctrl = mc.ls(sl=True)[0]
ojSquash = mc.ls(sl=True)[1]
mc.createNode('multDoubleLinear', n=nodeMultName)
mc.createNode('addDoubleLinear', n=nodeAddName)
mc.addAttr(ctrl, ln='Squash', at="double", dv=0, k=True)
mc.setAttr(nodeMultName+ '.input2', 0.1 )
mc.setAttr(nodeAddName + '.input2', 1 )
mc.connectAttr(ctrl + '.Squash', nodeMultName + '.input1')
mc.connectAttr(nodeMultName + '.output', nodeAddName + '.input1')
mc.connectAttr(nodeAddName + '.output', ojSquash + '.sx')
mc.connectAttr(nodeAddName + '.output', ojSquash + '.sz')
# mc.select( clear=True )
def createGmbl(gmblName, ojName):
sx = mc.getAttr(ojName + '.sx')
sz = mc.getAttr(ojName + '.sz')
mc.circle(nr=(0, 1, 0), c=(0, 0, 0), n=gmblName)
mc.setAttr(gmblName + '.sx', sx*0.70)
mc.setAttr(gmblName + '.sz', sz*0.70)
gmblCon = mc.parentConstraint(ojName, gmblName, mo=False)
mc.delete(gmblCon)
mc.parent(gmblName, ojName)
mc.makeIdentity(gmblName, apply=True )
mc.delete(gmblName, ch = 1)
def createCtrlSet():
ctrlList = mc.ls(sl=True)
for item in ctrlList:
ojName = item.split('_')[0]
zGrpName = ojName + '_L_zGrp'
gmblName = ojName + '_L_gmblCtrl'
createFkZeroGrp(zGrpName, item)
createGmbl(gmblName, item)
##############################################
def zeroGroup(obj1 = '', obj2 = '', name = ''):
ord = mc.xform(obj1, q = True, roo = True)
grp = mc.group(em = True , n = name)
mc.xform(grp, roo = ord)
con = mc.parentConstraint(obj2, grp, mo = False)
mc.delete(con)
mc.parent(obj1, grp)
mc.makeIdentity(obj1, a = True)
mc.setAttr(obj1 + '.rp', 0,0,0)
mc.setAttr(obj1 + '.sp', 0,0,0)
mc.select(obj1, obj2)
def createFkZeroGrp(zGrpName='', ojName=''):
mc.group(em=True, n=str(zGrpName))
zGrpCon = mc.parentConstraint(ojName, zGrpName, mo=False)
mc.delete(zGrpCon)
mc.parent(ojName, zGrpName)
# mc.select( clear=True )
def nameing(obj):
list = obj.split('_')
name = list[0]
type = list[-1].capitalize()
side = ''
desc = ''
if len(list) > 2 :
sideList = ('CNT', 'LFT', 'RGT', 'UPR', 'LWR', 'FNT', 'BCK')
for each in sideList :
if each in list[1] :
side = each
desc = list[1].split(side)[0]
break
else :
desc = list[1]
print (name, desc, side, type)
tmpDesc1 = desc + type + 'Zro' + side
tmpDesc2 = tmpDesc1[0].lower() + tmpDesc1[1:]
nameList = [name, tmpDesc2, 'grp']
grpName = ('_').join (nameList)
print grpName
| [
"[email protected]"
] | |
9fce81d8dab91750c3d26cc01884d02c3d8bca2e | 575e87d92d73dd08774a7fd3e5a3b08ebf50305f | /Python/Aulas/Nerps/Tecnicas/Busca_binaria/Ogros.py | fa2a141cbbf0c7df64f6cca5a9fe3f6a2e6105fa | [] | no_license | LuisAugusto0205/C-digos-python | d6f7e4744175ff6c1a679161cbde0b4a61809f01 | dd88c84e6b151e4cd0ccf38caa91b3c3a48fbc21 | refs/heads/master | 2023-01-14T17:03:59.212494 | 2020-11-19T21:09:07 | 2020-11-19T21:09:07 | 314,340,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | def main():
n, m = [int(x) for x in input().split()]
faixas = [int(x) for x in input().split()]
premio = [int(x) for x in input().split()]
st = ''
for x in input().split():
i = busca_bin(faixas, int(x))
st += str(premio[i]) + ' '
print(st[:-1])
def busca_bin(v, x):
s = len(v) - 1
i = 0
resp = len(v)
while s >= i:
m = (s+i)//2
if v[m] > x:
resp = m
s = m - 1
else:
i = m + 1
return resp
main()
| [
"[email protected]"
] | |
adaf37aa02e07fed2e43f734bceb4f34c4c72df5 | aff11077baaa416ad6bb84f7d61afd1b8bc41854 | /DragDrop.py | fe0551a41418cfc4c2e2de1f0d216c8eaf45d340 | [] | no_license | afinapd/selenium-python | 7f93c43f0aeb31558efeb51b8a577477a06c9674 | ffa7b24f7c3f899e395f087cb1381b72e1f3e346 | refs/heads/master | 2023-04-19T14:12:49.163808 | 2021-05-16T16:47:59 | 2021-05-16T16:47:59 | 367,692,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | from selenium import webdriver
from selenium.webdriver import ActionChains
from Locators.Constants import Constants
driver = webdriver.Chrome(executable_path=Constants.driver)
driver.get("http://testautomationpractice.blogspot.com/")
driver.maximize_window()
drag=driver.find_element_by_xpath("//*[@id='draggable']")
drop=driver.find_element_by_xpath("//*[@id='droppable']")
actions=ActionChains(driver)
actions.drag_and_drop(drag,drop).perform() | [
"[email protected]"
] | |
7b0633d2e32725393f3108a89b86cb0c0c9b0f65 | d147e8cbe78b9e72cb64fbd1c0be9e8b728ae177 | /Exercise8/exercise08_challenge2.py | dc536a35417a1398f53e165cba6e27b8ec7d6fa5 | [] | no_license | luwangg/PythonScriptingForArcGIS | a77daeb50c78418849229464b87d9e8693031cf2 | 77a6676ba8572d2d2344b17e1e14f4e1532d574d | refs/heads/master | 2020-03-23T13:17:02.377691 | 2018-04-25T16:34:33 | 2018-04-25T16:34:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | import arcpy
from arcpy import env
env.workspace = "C:/EsriPress/Python/Data/Exercise08"
fc = "Hawaii.shp"
newfc = "Results/Hawaii_single.shp"
arcpy.MultipartToSinglepart_management(fc, newfc)
spatialref = arcpy.Describe(newfc).spatialReference
unit = spatialref.linearUnitName
cursor = arcpy.da.SearchCursor(newfc, ["SHAPE@"])
for row in cursor:
print ("Perimeter: {0} square {1}".format(row[0].length, unit))
print ("Area: {0} square {1}".format(row[0].area, unit))
| [
"[email protected]"
] | |
fe9feb76f91f26a7eadbd0136adb60c853b437a6 | b1e6e1e5789d302a9213bcbdf85887cfcf7f84a3 | /src/youtubeScraper.py | e43ca9f4d8793309fa18614b7327e46b29e4b1e6 | [] | no_license | rachidnajjar/youtube-scraper | cadbcc0d71a41ca75c4bb7f4bf112b78f559744b | 3e81764392d5bb4d33a89d43e86d2b257926e6dd | refs/heads/master | 2020-04-13T23:47:30.546641 | 2018-12-31T13:13:52 | 2018-12-31T13:13:52 | 163,515,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py | '''
Created on 24/12/2018
@author: Rachid
'''
import requests
from bs4 import BeautifulSoup as bs
from pytube import YouTube
# query YouTube for a particular search string
base = "https://www.youtube.com/results?search_query="
qstring = "boddingtons+advert"
r = requests.get(base+qstring)
# extract the html of the search results page using BeautifulSoup
page = r.text
soup=bs(page,'html.parser')
# extract the links to the individual videos
vids = soup.findAll('a',attrs={'class':'yt-uix-tile-link'})
videolist=[]
for v in vids:
tmp = 'https://www.youtube.com' + v['href']
videolist.append(tmp)
print(tmp)
count=0
for item in videolist:
# increment counter:
count+=1
# initiate the class:
yt = YouTube(item)
yt.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first().download()
# have a look at the different formats available:
#formats = yt.get_videos()
# grab the video:
#video = yt.get('mp4', '360p')
# set the output file name:
#yt.set_filename('Video_'+str(count))
# download the video:
#video.download('./') | [
"[email protected]"
] | |
26c35625b862f890f6996fd1bc85d70daa1ecac9 | 9410a94667c05f45761204a6210d334cd70426ec | /code/script/rq1-apfdc.py | 18a285b8b866ac73d86a886400771d421acecdda | [] | no_license | L2Pri/L2Pri | b0c7b1ecff5fdf8711f8081a9a080651f47390b9 | 458a467bea4489d4ca5b5adb944596ea4a35288d | refs/heads/main | 2023-04-03T21:19:54.119616 | 2021-04-20T15:12:44 | 2021-04-20T15:12:44 | 348,381,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,742 | py | import os
import numpy as np
def readFile(filepath):
f = open(filepath)
content = f.read()
f.close()
return content.splitlines()
APFDType = [
'APFDcL2RStatement-use',
'APFDcGTStatement',
'APFDcGAStatement',
'APFDcGEStatement',
'APFDcARPStatement',
'APFDcTTStatement',
'APFDcTAStatement',
'APFDcTGEStatement',
'APFDcTARPStatement',
'APFDcTOStatement',
'APFDcOriFeatureBinaryStatement'
]
apps = [
'L2R',
'UGT',
'UGA',
'UGE',
'UARP',
'AGT',
'AGA',
'AGE',
'AARP',
'TO',
'L2Rori'
#'PerMutant',
#'Graph'
#'BestL2R'
]
def getAPFDc(filepath):
tmp = readFile(filepath)
for i in range(len(tmp)):
tmp[i] = eval(tmp[i])
#print(np.mean(t1))
return np.mean(tmp)
def test():
path = '/devdata2/zjy/fse-extension/subjects/experiment/java-uuid-generator/'
#print(t2)
for app in APFDType:
t2 = getAPFDc(path + 'training/%s_200'%app)
print('%s : %s'%(app,t2))
print('*************')
t1 = getAPFDc(path+'new-order/APFDcGraphStatement')
#t2 = getAPFDc(path+'training/APFDcARPStatement_200')
print('%s : %s'%('APFDcGraphStatement',t1))
print('*************')
t3 = getAPFDc(path+'new-order/APFDcL2RStatement')
print('%s : %s'%('APFDcL2RStatement',t3))
#test()
if __name__ == '__main__':
path = '/devdata2/zjy/fse-extension/subjects/experiment/'
subjects = readFile(path + 'uselist-sc')
#subjects.remove('jsoup')
f = open('../result/rq1-apfdc-new','w')
info_dict = {}
sorted_list = []
for i,subject in enumerate(subjects):
subject_path = path + subject + '/'
tmp_list = []
for j,app in enumerate(APFDType):
if j==0:
l2r = getAPFDc(subject_path + 'new-order/APFDcL2RStatement-use')
tmp_str = str(round(l2r,3))
tmp_list.append(r'\textbf{'+tmp_str+'0'*(5-len(tmp_str))+'}')
elif j == 10:
l2rori = getAPFDc(subject_path + 'new-order/APFDcOriFeatureBinaryStatement')
tmp_str = str(round(l2rori,3))
tmp_list.append(tmp_str+'0'*(5-len(tmp_str)))
else:
tmp_apfdc = getAPFDc(subject_path+'training/%s_200'%app)
tmp_str = str(round(tmp_apfdc,3))
tmp_list.append(tmp_str+'0'*(5-len(tmp_str)))
#print(tmp_list)
#print(type(tmp_list))
#print('& '.join(tmp_list))
info_dict[subject.lower()] = '& '.join(tmp_list)
sorted_list.append(subject.lower())
sorted_list.sort()
tt = ['rome','dictomaton']
for i,subject in enumerate(sorted_list):
f.write('%s& '%(i+1) + info_dict[subject] + r' \\' + '\n')
if subject in tt:
print('%s : %s'%(i+1, subject))
f.close()
| [
"[email protected]"
] | |
f712e5c47ead3e9f58959f91e833df50b42d0301 | 0d47033b99c82492b3f46658db7f991a19513fe0 | /src/nm/__init__.py | ead65609cb42e3d1e714bcd1019ac456b67326ab | [] | no_license | abbbe/mitmer | 0b9e4bb6670964e4db5dbd2d25beb2c864baee17 | ac5905e7d19b3f014700b43fcbb9293da13d19f5 | refs/heads/master | 2021-05-27T08:56:16.259058 | 2012-05-27T03:52:36 | 2012-05-27T03:52:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,285 | py | '''
This module gives access to the configuration of Linux network stack.
There is Stack singleton object containing other configuration elements.
'''
import os
import subprocess
import threading
OVS_CONTROLLER_URL = 'tcp:127.0.0.1:6633'
class Iface(object):
def __init__(self, up, loopback, carrier):
self.up = up
self.loopback = loopback
self.carrier = carrier
def set_duplex(self, duplex): self.duplex = duplex
def set_speed(self, speed): self.speed = speed
class Stack(object):
def run(self):
out = run_ext_command(['ip', 'link', 'list'])
self.parse_ip_links_out(out)
for link in self.links:
if not link.loopback:
out = run_ext_command(['ethtool', link])
self.parse_ethtool_out(link, out)
def parse_ip_links_out(self, out):
self.links = dict()
pass
def parse_ethtool_out(self, link, out):
pass
def get_iface(self, iface):
return self.links[iface]
def set_iface_policy(self, iface, policy):
self.links[iface].policy = policy
def set_global_policy(self, policy):
self.policy = policy
class Controller(threading.Thread):
should_stop = False
bridge_name = 'mitm0'
def __init__(self):
threading.Thread.__init__(self, target = self.run)
self.initialized = False
def run(self):
pass
def stop(self):
self.should_stop = True
def sudo(self, cmd, bg=False):
sudo_cmd = ['sudo']
sudo_cmd.extend(cmd)
if subprocess.call(sudo_cmd) != 0:
raise RuntimeError('command "%s" has failed"' % sudo_cmd)
def spawn(self, cmd):
return subprocess.Popen(cmd)
def init_mitm_switch(self, failmode_standalone=False):
assert not self.initialized, 'double initialization'
self.sudo(['ovs-vsctl', 'add-br', self.bridge_name])
try:
# assume freshly created vswitch, no need to clean
self.sudo(['ovs-vsctl', 'set-fail-mode', self.bridge_name, 'secure'])
self.sudo(['ovs-vsctl', 'add-port', self.bridge_name, self.iface1])
self.sudo(['ovs-vsctl', 'add-port', self.bridge_name, self.iface2])
self.sudo(['ovs-vsctl', 'set-controller', self.bridge_name, OVS_CONTROLLER_URL])
self.pox = self.spawn(['env', 'PYTHONPATH=../pox', 'python',
'../pox/pox.py', '--no-cli', 'forwarding.l2_learning'])
self.initialized = True
except:
try:
self.sudo(['ovs-vsctl', 'del-br', self.bridge_name])
except:
pass
raise
def deinit_mitm_switch(self):
if self.initialized:
self.sudo(['ovs-vsctl', 'del-br', self.bridge_name])
self.pox.terminate()
self.initialized = False
def set_mitm_ifaces(self, iface1, iface2):
self.iface1 = iface1
self.iface2 = iface2
def enable_mitm_tap(self):
raise NotImplemented()
def disable_mitm_tap(self):
raise NotImplemented()
def add_metaflow(self, mf):
raise NotImplemented()
def remove_metaflow(self, mf):
raise NotImplemented()
stack = Stack()
controller = Controller()
| [
"[email protected]"
] | |
8c01959fbe811f9a1269ca6a399e3ddb844a2c1f | 7ad5cf675dea79064a1ce7e54b718dd539ad5862 | /cms/management/commands/populate.py | 9c92bce781627aad0eadaf029c72a55de13ebb90 | [] | no_license | DanishKhakwani/csss-site | ee44f79d157c2726cb44d64e7b01a1ab8e3ecb44 | bb7ad7f5b359cf22259c495b01343297fcc6ef02 | refs/heads/master | 2020-12-31T03:35:24.855686 | 2016-03-15T00:24:29 | 2016-03-15T00:24:29 | 54,367,731 | 0 | 1 | null | 2016-03-21T07:19:04 | 2016-03-21T07:19:03 | null | UTF-8 | Python | false | false | 4,573 | py | #!/usr/bin/env python
from django.db.utils import IntegrityError
from django.core.management.base import BaseCommand, CommandError
from cms.models import Category, Post, Announcement
from datetime import datetime
class Command(BaseCommand):
help = "Populates database"
def handle(self, *args, **options):
lorem = """Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud
exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint
occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."""
categories = [
{'name':'About', 'slug':'about'},
{'name':'Events', 'slug':'events'},
{'name':'Projects', 'slug':'projects'},
{'name':'Comp Sci Guide', 'slug':'comp-sci-guide'},
]
posts = {
'About':[
{'title':'General', 'slug':'general', 'content':lorem},
{'title':'Exec', 'slug':'exec', 'content':lorem},
{'title':'Photos', 'slug':'photos', 'content':lorem},
{'title':'Docs', 'slug':'docs', 'content':lorem},
{'title':'Contact', 'slug':'contact', 'content':lorem},
],
'Events':[
{'title':'Upcoming', 'slug':'upcoming', 'content':lorem},
{'title':'Frosh Week', 'slug':'frosh-week', 'content':lorem},
{'title':'Workshops', 'slug':'workshops', 'content':lorem},
],
'Projects':[
{'title':'Hack Time', 'slug':'hack-time', 'content':lorem},
{'title':'Dev Tools', 'slug':'dev-tools', 'content':lorem},
{'title':'Team Names', 'slug':'team-names', 'content':lorem},
{'title':'CSSS Github', 'slug':'csss-github', 'content':lorem},
],
'Comp Sci Guide':[
{'title':'Course Map', 'slug':'course-map', 'content':lorem},
{'title':'Software', 'slug':'software', 'content':lorem},
{'title':'Co-op', 'slug':'co-op', 'content':lorem},
],
}
announcements = [
{'title':'First Thing', 'author':'Corbettron9000','slug':'first-thing','content':lorem,'created':datetime.now()},
{'title':'Second Thing', 'author':'Colintron9000','slug':'second-thing','content':lorem,'created':datetime.now()},
{'title':'Third Thing', 'author':'Sidtron9000','slug':'third-thing','content':lorem,'created':datetime.now()},
{'title':'Fourth Thing', 'author':'Kennethtron9000','slug':'fourth-thing','content':lorem,'created':datetime.now()},
{'title':'Fifth Thing', 'author':'Jordantron9000','slug':'fifth-thing','content':lorem,'created':datetime.now()},
]
# Create the Categories
for category in categories:
try:
c = Category(**category)
c.save()
self.stdout.write(category['name'] + ' category created')
# Create the Posts
for post in posts[category['name']]:
try:
p = Post(**post)
p.category = c
p.save()
self.stdout.write(post['title'] + ' post created')
except IntegrityError:
self.stdout.write(post['title'] + ' post skipped')
except IntegrityError:
self.stdout.write(category['name'] + ' category skipped')
self.stdout.write('Skipping all Posts in ' + category['name'])
# Create the Announcements
for announcement in announcements:
try:
a = Announcement(**announcement)
a.save()
self.stdout.write(announcement['title'] + ' announcement created')
except IntegrityError:
self.stdout.write(announcement['title'] + ' announcement skipped') | [
"[email protected]"
] | |
d1b7a24275f548220eaa9b251bac4b7993ba5ee7 | f18dc02a1ddd1b8c4296260ace86513c7eb9e895 | /main.py | 24674b3f2e60ff6c7c5f2d30a92a075084b012be | [] | no_license | petrarka/crossfair_server | 592dd4863002ea7d63c167d3000cb2b1088b06de | 50ebb06b4d9f1e112bd957e8e63697defea44d70 | refs/heads/master | 2022-04-17T10:05:18.376934 | 2020-04-12T12:40:41 | 2020-04-12T12:40:41 | 255,079,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,550 | py | import asyncio
import json
import logging
import websockets
import random
from collections import OrderedDict
logging.basicConfig()
USERPOSE = []
USERS = OrderedDict()
WINERS = []
LOSERS = []
async def win():
winers = set()
agents = set()
killers = set()
users = list(USERS.values())
for user in users:
if user["role"] == "agent":
USERPOSE[user["shoot"]]["alive"] = False
if users[user["shoot"]]["role"] == "taunt" or users[user["shoot"]]["role"] == "red taunt":
winers.add(users[user["shoot"]]["index"])
for user in users:
if user["role"] == "killer" and USERPOSE[user["index"]]["alive"]:
USERPOSE[user["shoot"]]["alive"] = False
if users[user["shoot"]]["role"] == "taunt" or users[user["shoot"]]["role"] == "blue taunt":
winers.add(users[user["shoot"]]["index"])
boss = 0
for user in users:
if user["role"] == "boss":
boss = user["index"]
if user["role"] == "npc" and USERPOSE[user["index"]]["alive"]:
winers.add(user["index"])
if user["role"] == "boss" or user["role"] == "agent" or user["role"] == "blue taunt":
agents.add(user["index"])
if user["role"] == "killer" or user["role"] == "red taunt" :
killers.add(user["index"])
if USERPOSE[boss]["alive"]:
winers.update(agents)
print("blue")
else:
print("red")
winers.update(killers)
losers = set(range(len(users)))
losers.difference_update(winers)
for loser in losers:
LOSERS.append([loser, USERPOSE[loser]["name"]])
for winer in winers:
WINERS.append([winer, USERPOSE[winer]["name"]])
await notify_all()
async def on_admin(data, websocket):
if data["cm"] == "start":
print("start")
give_cards(data["num"])
await notify_all()
if data["cm"] == "shuffle":
print("shuffle " + str(data["num"]))
await shuffle(data["num"])
if data["cm"] == "shift":
print("shift")
await shift()
if data["cm"] == "win":
print("win")
await win()
if data["cm"] == "dsc":
print("dsc")
await unregister(websocket)
if data["cm"] == "reset":
print("restart")
await reset()
print(LOSERS)
print(WINERS)
print(USERPOSE)
print(USERS)
async def reset():
global WINERS
global LOSERS
for key in USERS:
USERS[key]["role"] = None
USERS[key]["shoot"] = 0
for user in USERPOSE:
user["alive"] = True
user["role"] = "🌰"
WINERS = []
LOSERS = []
await notify_all()
async def shift():
print("shift")
users = list(USERS.values())
tmp = users[0]["role"]
for x in range(1, len(USERS)):
tmp, users[x]["role"] = users[x]["role"], tmp
users[0]["role"] = tmp
z = 0
for x in USERS:
USERS[x]=users[z]
z+=1
await notify_all()
async def shuffle(id):
l = list(USERS.values())
k = list(USERS.keys())
roles = [l[id-1]["role"], l[id]["role"], l[id+1]["role"]]
random.shuffle(roles)
USERS[k[id-1]]["role"], USERS[k[id]]["role"], USERS[k[id+1]]["role"] = roles[0], roles[1], roles[2]
#for user in list(USERS.keys())[id-2:id+1]:
# await notify(user)
await notify_all()
def give_cards(n):
if n == 5:
roles = ["boss", "agent", "killer", "red taunt", "npc"]
elif n == 6:
roles = ["boss", "agent", "killer", "blue taunt", "killer", "npc"]
else:
print("jopa with players")
random.shuffle(roles)
usersObj = list(USERS.values())
for x in range(n):
usersObj[x]["role"] = roles[x]
async def on_message(data, websocket):
index = USERS[websocket]["index"]
if data["cm"] == "give":
print("start")
give_cards(5)
await notify_all()
elif data["cm"] == "chname":
USERPOSE[index]["name"] = data["name"]
await notify_all()
elif data["cm"] == "chrole":
USERPOSE[index]["role"] = data["role"]
await notify_all()
elif data["cm"] == "chshoot":
USERS[websocket]["shoot"] = data["index"]
async def notify_all():
if USERS: # asyncio.wait doesn't accept an empty list
for user in USERS:
await notify(user)
async def notify(user):
userState = {"cm": "state", "users": USERPOSE, "state": USERS[user], "winers": WINERS, "losers":LOSERS}
await user.send(json.dumps(userState))
async def register(websocket):
print("new user")
USERPOSE.append({"name": "vasya", "role": "🌰", "alive": True})
USERS[websocket] = { "role": None, "index": len(USERPOSE) - 1, "shoot": 0 }
await notify_all()
async def unregister(websocket):
USERPOSE.pop(USERS[websocket]["index"])
USERS.pop(websocket)
await notify_all()
async def counter(websocket, path):
# register(websocket) sends user_event() to websocket
try:
await register(websocket)
async for message in websocket:
data = json.loads(message)
print(data)
if "admin" not in message:
await on_message(data, websocket)
else:
await on_admin(data, websocket)
finally:
await unregister(websocket)
port = 6765
start_server = websockets.serve(counter, "localhost", port)
print("running on "+str(port))
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever() | [
"[email protected]"
] | |
c1cb2957d5955231b1c957e0abc6fe10f6d92b25 | 08ee04ae665dcb930ed4b98ca7b91b2dac2cc3b0 | /src/rayoptics/mpl/interactivediagram.py | 734b65766acd2f9e4a84f1007991405bdb118570 | [
"BSD-3-Clause"
] | permissive | mjhoptics/ray-optics | 6bad622f7bb9b3485823b9cc511a6d2b679f7048 | 41ea6d618a93fe14f8bee45fb3efff6a6762bcce | refs/heads/master | 2023-07-09T18:03:36.621685 | 2023-05-08T22:46:36 | 2023-05-08T22:46:36 | 109,168,474 | 195 | 49 | BSD-3-Clause | 2023-08-10T16:53:28 | 2017-11-01T18:34:12 | Python | UTF-8 | Python | false | false | 2,904 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2018 Michael J. Hayford
"""
.. Created on Thu Oct 10 22:02:44 2019
.. codeauthor: Michael J. Hayford
"""
from rayoptics.parax.diagram import Diagram
from rayoptics.mpl.interactivefigure import InteractiveFigure
class InteractiveDiagram(InteractiveFigure):
""" Editable version of |ybar| and |nubar| diagrams
Attributes:
opt_model: parent optical model
refresh_gui: function to be called on refresh_gui event
dgm_type: diagram type, 'ht' or 'slp'
do_barrel_constraint, bool: display the barrel diamond if True
barrel_constraint, float: the radius for the barrel constraint
enable_slide: Display the "bend" or "gap" constaint lines
bend_or_gap: "bend" | "gap"
parax_model: if None, 'ifcs' else parax_mode for layer parax_model_key
parax_model_key: "ifcs" | "eles" | "asm" | "sys"
"""
def __init__(self, opt_model, dgm_type, refresh_gui=None,
do_barrel_constraint=False, barrel_constraint=1.0,
enable_slide=False, bend_or_gap='bend',
parax_model=None, parax_model_key='ifcs', **kwargs):
self.refresh_gui = refresh_gui
if parax_model is None:
self.parax_model = opt_model.parax_model
self.parax_model_key = 'ifcs'
else:
self.parax_model = parax_model
self.parax_model_key = kwargs.get('parax_model_key', 'root')
is_dark = kwargs['is_dark'] if 'is_dark' in kwargs else False
self.diagram = Diagram(
opt_model, self.parax_model, self.parax_model_key, dgm_type,
do_barrel_constraint=do_barrel_constraint,
barrel_constraint=barrel_constraint,
bend_or_gap=bend_or_gap, is_dark=is_dark
)
self.setup_dgm_type(dgm_type)
self.enable_slide = enable_slide
self.build = 'rebuild'
super().__init__(**kwargs)
def setup_dgm_type(self, dgm_type):
if dgm_type == 'ht':
self.x_label = r'$\overline{y}$'
self.y_label = 'y'
self.header = r'$y-\overline{y}$ Diagram'
elif dgm_type == 'slp':
self.x_label = r'$\overline{\omega}$'
self.y_label = r'$\omega$'
self.header = r'$\omega-\overline{\omega}$ Diagram'
def sync_light_or_dark(self, is_dark, **kwargs):
self.diagram.sync_light_or_dark(is_dark)
super().sync_light_or_dark(is_dark, **kwargs)
def update_data(self, **kwargs):
self.artists = []
self.sys_bbox = self.diagram.update_data(self, **kwargs)
self.build = 'rebuild'
return self
def action_complete(self):
super().action_complete()
self.diagram.register_commands((), figure=self)
def fit_axis_limits(self):
return self.diagram.fit_axis_limits()
| [
"[email protected]"
] | |
0c5f9e75ebcf98b94a95c2bffcdb4fa05329f144 | 38fa720d0f146f2fdc4498d796aa79838c0c00c0 | /proj/settings.py | a29fb808a2967b60cff6c50d95801fa3a60ea564 | [] | no_license | sundaramseth/djangotask | bb6418dc3e5e52086f2991fde0a425cd0f53a4f2 | 18d9dc52b527903ebda1da493f28ebb4e5599681 | refs/heads/master | 2020-05-30T11:40:47.030364 | 2019-06-01T09:49:09 | 2019-06-01T09:49:09 | 189,711,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,130 | py | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR,'templates')
STATIC_DIR = os.path.join(BASE_DIR,'static')
MEDIA_DIR = os.path.join(BASE_DIR,'media')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'cg#p$g+j9tax!#a3cup@1$8obt2_+&k3q+pmu)5%asj6yjpkag')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DJANGO_DEBUG', '') != 'False'
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'proj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'proj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
STATIC_URL = '/static/'
STATICFILES_DIRS = [STATIC_DIR,]
MEDIA_ROOT = MEDIA_DIR
MEDIA_URL = '/media/'
LOGIN_URL = '/app/user_login/'
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
| [
"[email protected]"
] | |
6ba560b3c268c86924803f6409b63001606365dd | d4c720f93631097ee048940d669e0859e85eabcf | /testing/unexpected_passes_common/result_output_unittest.py | 5c4f738c001c555623403d219c23ea85c96d59c1 | [
"BSD-3-Clause"
] | permissive | otcshare/chromium-src | 26a7372773b53b236784c51677c566dc0ad839e4 | 3b920d87437d9293f654de1f22d3ea341e7a8b55 | refs/heads/webnn | 2023-03-21T03:20:15.377034 | 2023-01-25T21:19:44 | 2023-01-25T21:19:44 | 209,262,645 | 18 | 21 | BSD-3-Clause | 2023-03-23T06:20:07 | 2019-09-18T08:52:07 | null | UTF-8 | Python | false | false | 31,064 | py | #!/usr/bin/env vpython3
# Copyright 2020 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import collections
import itertools
import sys
import tempfile
from typing import Iterable, Set
import unittest
import six
from pyfakefs import fake_filesystem_unittest
from unexpected_passes_common import data_types
from unexpected_passes_common import result_output
from unexpected_passes_common import unittest_utils as uu
def CreateTextOutputPermutations(text: str, inputs: Iterable[str]) -> Set[str]:
"""Creates permutations of |text| filled with the contents of |inputs|.
Some output ordering is not guaranteed, so this acts as a way to generate
all possible outputs instead of manually listing them.
Args:
text: A string containing a single string field to format.
inputs: An iterable of strings to permute.
Returns:
A set of unique permutations of |text| filled with |inputs|. E.g. if |text|
is '1%s2' and |inputs| is ['a', 'b'], the return value will be
set(['1ab2', '1ba2']).
"""
permutations = set()
for p in itertools.permutations(inputs):
permutations.add(text % ''.join(p))
return permutations
class ConvertUnmatchedResultsToStringDictUnittest(unittest.TestCase):
def testEmptyResults(self) -> None:
"""Tests that providing empty results is a no-op."""
self.assertEqual(result_output._ConvertUnmatchedResultsToStringDict({}), {})
def testMinimalData(self) -> None:
"""Tests that everything functions when minimal data is provided."""
unmatched_results = {
'builder': [
data_types.Result('foo', [], 'Failure', 'step', 'build_id'),
],
}
expected_output = {
'foo': {
'builder': {
'step': [
'Got "Failure" on http://ci.chromium.org/b/build_id with '
'tags []',
],
},
},
}
output = result_output._ConvertUnmatchedResultsToStringDict(
unmatched_results)
self.assertEqual(output, expected_output)
def testRegularData(self) -> None:
"""Tests that everything functions when regular data is provided."""
unmatched_results = {
'builder': [
data_types.Result('foo', ['win', 'intel'], 'Failure', 'step_name',
'build_id')
],
}
# TODO(crbug.com/1198237): Hard-code the tag string once only Python 3 is
# supported.
expected_output = {
'foo': {
'builder': {
'step_name': [
'Got "Failure" on http://ci.chromium.org/b/build_id with '
'tags [%s]' % ' '.join(set(['win', 'intel'])),
]
}
}
}
output = result_output._ConvertUnmatchedResultsToStringDict(
unmatched_results)
self.assertEqual(output, expected_output)
class ConvertTestExpectationMapToStringDictUnittest(unittest.TestCase):
def testEmptyMap(self) -> None:
"""Tests that providing an empty map is a no-op."""
self.assertEqual(
result_output._ConvertTestExpectationMapToStringDict(
data_types.TestExpectationMap()), {})
def testSemiStaleMap(self) -> None:
"""Tests that everything functions when regular data is provided."""
expectation_map = data_types.TestExpectationMap({
'expectation_file':
data_types.ExpectationBuilderMap({
data_types.Expectation('foo/test', ['win', 'intel'], [
'RetryOnFailure'
]):
data_types.BuilderStepMap({
'builder':
data_types.StepBuildStatsMap({
'all_pass':
uu.CreateStatsWithPassFails(2, 0),
'all_fail':
uu.CreateStatsWithPassFails(0, 2),
'some_pass':
uu.CreateStatsWithPassFails(1, 1),
}),
}),
data_types.Expectation('foo/test', ['linux', 'intel'], [
'RetryOnFailure'
]):
data_types.BuilderStepMap({
'builder':
data_types.StepBuildStatsMap({
'all_pass':
uu.CreateStatsWithPassFails(2, 0),
}),
}),
data_types.Expectation('foo/test', ['mac', 'intel'], [
'RetryOnFailure'
]):
data_types.BuilderStepMap({
'builder':
data_types.StepBuildStatsMap({
'all_fail':
uu.CreateStatsWithPassFails(0, 2),
}),
}),
}),
})
# TODO(crbug.com/1198237): Remove the Python 2 version once we are fully
# switched to Python 3.
if six.PY2:
expected_output = {
'expectation_file': {
'foo/test': {
'"RetryOnFailure" expectation on "win intel"': {
'builder': {
'Fully passed in the following': [
'all_pass (2/2 passed)',
],
'Never passed in the following': [
'all_fail (0/2 passed)',
],
'Partially passed in the following': {
'some_pass (1/2 passed)': [
data_types.BuildLinkFromBuildId('build_id0'),
],
},
},
},
'"RetryOnFailure" expectation on "intel linux"': {
'builder': {
'Fully passed in the following': [
'all_pass (2/2 passed)',
],
},
},
'"RetryOnFailure" expectation on "mac intel"': {
'builder': {
'Never passed in the following': [
'all_fail (0/2 passed)',
],
},
},
},
},
}
else:
# Set ordering does not appear to be stable between test runs, as we can
# get either order of tags. So, generate them now instead of hard coding
# them.
linux_tags = ' '.join(set(['linux', 'intel']))
win_tags = ' '.join(set(['win', 'intel']))
mac_tags = ' '.join(set(['mac', 'intel']))
expected_output = {
'expectation_file': {
'foo/test': {
'"RetryOnFailure" expectation on "%s"' % linux_tags: {
'builder': {
'Fully passed in the following': [
'all_pass (2/2 passed)',
],
},
},
'"RetryOnFailure" expectation on "%s"' % win_tags: {
'builder': {
'Fully passed in the following': [
'all_pass (2/2 passed)',
],
'Partially passed in the following': {
'some_pass (1/2 passed)': [
data_types.BuildLinkFromBuildId('build_id0'),
],
},
'Never passed in the following': [
'all_fail (0/2 passed)',
],
},
},
'"RetryOnFailure" expectation on "%s"' % mac_tags: {
'builder': {
'Never passed in the following': [
'all_fail (0/2 passed)',
],
},
},
},
},
}
str_dict = result_output._ConvertTestExpectationMapToStringDict(
expectation_map)
self.assertEqual(str_dict, expected_output)
class ConvertUnusedExpectationsToStringDictUnittest(unittest.TestCase):
def testEmptyDict(self) -> None:
"""Tests that nothing blows up when given an empty dict."""
self.assertEqual(result_output._ConvertUnusedExpectationsToStringDict({}),
{})
def testBasic(self) -> None:
"""Basic functionality test."""
unused = {
'foo_file': [
data_types.Expectation('foo/test', ['win', 'nvidia'],
['Failure', 'Timeout']),
],
'bar_file': [
data_types.Expectation('bar/test', ['win'], ['Failure']),
data_types.Expectation('bar/test2', ['win'], ['RetryOnFailure'])
],
}
if six.PY2:
expected_output = {
'foo_file': [
'[ win nvidia ] foo/test [ Failure Timeout ]',
],
'bar_file': [
'[ win ] bar/test [ Failure ]',
'[ win ] bar/test2 [ RetryOnFailure ]',
],
}
else:
# Set ordering does not appear to be stable between test runs, as we can
# get either order of tags. So, generate them now instead of hard coding
# them.
tags = ' '.join(['nvidia', 'win'])
results = ' '.join(['Failure', 'Timeout'])
expected_output = {
'foo_file': [
'[ %s ] foo/test [ %s ]' % (tags, results),
],
'bar_file': [
'[ win ] bar/test [ Failure ]',
'[ win ] bar/test2 [ RetryOnFailure ]',
],
}
self.assertEqual(
result_output._ConvertUnusedExpectationsToStringDict(unused),
expected_output)
class HtmlToFileUnittest(fake_filesystem_unittest.TestCase):
def setUp(self) -> None:
self.setUpPyfakefs()
self._file_handle = tempfile.NamedTemporaryFile(delete=False, mode='w')
self._filepath = self._file_handle.name
def testLinkifyString(self) -> None:
"""Test for _LinkifyString()."""
self._file_handle.close()
s = 'a'
self.assertEqual(result_output._LinkifyString(s), 'a')
s = 'http://a'
self.assertEqual(result_output._LinkifyString(s),
'<a href="http://a">http://a</a>')
s = 'link to http://a, click it'
self.assertEqual(result_output._LinkifyString(s),
'link to <a href="http://a">http://a</a>, click it')
def testRecursiveHtmlToFileExpectationMap(self) -> None:
"""Tests _RecursiveHtmlToFile() with an expectation map as input."""
expectation_map = {
'foo': {
'"RetryOnFailure" expectation on "win intel"': {
'builder': {
'Fully passed in the following': [
'all_pass (2/2)',
],
'Never passed in the following': [
'all_fail (0/2)',
],
'Partially passed in the following': {
'some_pass (1/2)': [
data_types.BuildLinkFromBuildId('build_id0'),
],
},
},
},
},
}
result_output._RecursiveHtmlToFile(expectation_map, self._file_handle)
self._file_handle.close()
# pylint: disable=line-too-long
# TODO(crbug.com/1198237): Remove the Python 2 version once we've fully
# switched to Python 3.
if six.PY2:
expected_output = """\
<button type="button" class="collapsible_group">foo</button>
<div class="content">
<button type="button" class="collapsible_group">"RetryOnFailure" expectation on "win intel"</button>
<div class="content">
<button type="button" class="collapsible_group">builder</button>
<div class="content">
<button type="button" class="collapsible_group">Never passed in the following</button>
<div class="content">
<p>all_fail (0/2)</p>
</div>
<button type="button" class="highlighted_collapsible_group">Fully passed in the following</button>
<div class="content">
<p>all_pass (2/2)</p>
</div>
<button type="button" class="collapsible_group">Partially passed in the following</button>
<div class="content">
<button type="button" class="collapsible_group">some_pass (1/2)</button>
<div class="content">
<p><a href="http://ci.chromium.org/b/build_id0">http://ci.chromium.org/b/build_id0</a></p>
</div>
</div>
</div>
</div>
</div>
"""
else:
expected_output = """\
<button type="button" class="collapsible_group">foo</button>
<div class="content">
<button type="button" class="collapsible_group">"RetryOnFailure" expectation on "win intel"</button>
<div class="content">
<button type="button" class="collapsible_group">builder</button>
<div class="content">
<button type="button" class="highlighted_collapsible_group">Fully passed in the following</button>
<div class="content">
<p>all_pass (2/2)</p>
</div>
<button type="button" class="collapsible_group">Never passed in the following</button>
<div class="content">
<p>all_fail (0/2)</p>
</div>
<button type="button" class="collapsible_group">Partially passed in the following</button>
<div class="content">
<button type="button" class="collapsible_group">some_pass (1/2)</button>
<div class="content">
<p><a href="http://ci.chromium.org/b/build_id0">http://ci.chromium.org/b/build_id0</a></p>
</div>
</div>
</div>
</div>
</div>
"""
# pylint: enable=line-too-long
expected_output = _Dedent(expected_output)
with open(self._filepath) as f:
self.assertEqual(f.read(), expected_output)
def testRecursiveHtmlToFileUnmatchedResults(self) -> None:
"""Tests _RecursiveHtmlToFile() with unmatched results as input."""
unmatched_results = {
'foo': {
'builder': {
None: [
'Expected "" on http://ci.chromium.org/b/build_id, got '
'"Failure" with tags []',
],
'step_name': [
'Expected "Failure RetryOnFailure" on '
'http://ci.chromium.org/b/build_id, got '
'"Failure" with tags [win intel]',
]
},
},
}
result_output._RecursiveHtmlToFile(unmatched_results, self._file_handle)
self._file_handle.close()
# pylint: disable=line-too-long
# Order is not guaranteed, so create permutations.
expected_template = """\
<button type="button" class="collapsible_group">foo</button>
<div class="content">
<button type="button" class="collapsible_group">builder</button>
<div class="content">
%s
</div>
</div>
"""
values = [
"""\
<button type="button" class="collapsible_group">None</button>
<div class="content">
<p>Expected "" on <a href="http://ci.chromium.org/b/build_id">http://ci.chromium.org/b/build_id</a>, got "Failure" with tags []</p>
</div>
""",
"""\
<button type="button" class="collapsible_group">step_name</button>
<div class="content">
<p>Expected "Failure RetryOnFailure" on <a href="http://ci.chromium.org/b/build_id">http://ci.chromium.org/b/build_id</a>, got "Failure" with tags [win intel]</p>
</div>
""",
]
expected_output = CreateTextOutputPermutations(expected_template, values)
# pylint: enable=line-too-long
expected_output = [_Dedent(e) for e in expected_output]
with open(self._filepath) as f:
self.assertIn(f.read(), expected_output)
class PrintToFileUnittest(fake_filesystem_unittest.TestCase):
def setUp(self) -> None:
self.setUpPyfakefs()
self._file_handle = tempfile.NamedTemporaryFile(delete=False, mode='w')
self._filepath = self._file_handle.name
def testRecursivePrintToFileExpectationMap(self) -> None:
"""Tests RecursivePrintToFile() with an expectation map as input."""
expectation_map = {
'foo': {
'"RetryOnFailure" expectation on "win intel"': {
'builder': {
'Fully passed in the following': [
'all_pass (2/2)',
],
'Never passed in the following': [
'all_fail (0/2)',
],
'Partially passed in the following': {
'some_pass (1/2)': [
data_types.BuildLinkFromBuildId('build_id0'),
],
},
},
},
},
}
result_output.RecursivePrintToFile(expectation_map, 0, self._file_handle)
self._file_handle.close()
# TODO(crbug.com/1198237): Keep the Python 3 version once we are fully
# switched.
if six.PY2:
expected_output = """\
foo
"RetryOnFailure" expectation on "win intel"
builder
Never passed in the following
all_fail (0/2)
Fully passed in the following
all_pass (2/2)
Partially passed in the following
some_pass (1/2)
http://ci.chromium.org/b/build_id0
"""
else:
expected_output = """\
foo
"RetryOnFailure" expectation on "win intel"
builder
Fully passed in the following
all_pass (2/2)
Never passed in the following
all_fail (0/2)
Partially passed in the following
some_pass (1/2)
http://ci.chromium.org/b/build_id0
"""
with open(self._filepath) as f:
self.assertEqual(f.read(), expected_output)
def testRecursivePrintToFileUnmatchedResults(self) -> None:
"""Tests RecursivePrintToFile() with unmatched results as input."""
unmatched_results = {
'foo': {
'builder': {
None: [
'Expected "" on http://ci.chromium.org/b/build_id, got '
'"Failure" with tags []',
],
'step_name': [
'Expected "Failure RetryOnFailure" on '
'http://ci.chromium.org/b/build_id, got '
'"Failure" with tags [win intel]',
]
},
},
}
result_output.RecursivePrintToFile(unmatched_results, 0, self._file_handle)
self._file_handle.close()
# pylint: disable=line-too-long
# Order is not guaranteed, so create permutations.
expected_template = """\
foo
builder%s
"""
values = [
"""
None
Expected "" on http://ci.chromium.org/b/build_id, got "Failure" with tags []\
""",
"""
step_name
Expected "Failure RetryOnFailure" on http://ci.chromium.org/b/build_id, got "Failure" with tags [win intel]\
""",
]
expected_output = CreateTextOutputPermutations(expected_template, values)
# pylint: enable=line-too-long
with open(self._filepath) as f:
self.assertIn(f.read(), expected_output)
class OutputResultsUnittest(fake_filesystem_unittest.TestCase):
def setUp(self) -> None:
self.setUpPyfakefs()
self._file_handle = tempfile.NamedTemporaryFile(delete=False, mode='w')
self._filepath = self._file_handle.name
def testOutputResultsUnsupportedFormat(self) -> None:
"""Tests that passing in an unsupported format is an error."""
with self.assertRaises(RuntimeError):
result_output.OutputResults(data_types.TestExpectationMap(),
data_types.TestExpectationMap(),
data_types.TestExpectationMap(), {}, {},
'asdf')
def testOutputResultsSmoketest(self) -> None:
"""Test that nothing blows up when outputting."""
expectation_map = data_types.TestExpectationMap({
'foo':
data_types.ExpectationBuilderMap({
data_types.Expectation('foo', ['win', 'intel'], 'RetryOnFailure'):
data_types.BuilderStepMap({
'stale':
data_types.StepBuildStatsMap({
'all_pass':
uu.CreateStatsWithPassFails(2, 0),
}),
}),
data_types.Expectation('foo', ['linux'], 'Failure'):
data_types.BuilderStepMap({
'semi_stale':
data_types.StepBuildStatsMap({
'all_pass':
uu.CreateStatsWithPassFails(2, 0),
'some_pass':
uu.CreateStatsWithPassFails(1, 1),
'none_pass':
uu.CreateStatsWithPassFails(0, 2),
}),
}),
data_types.Expectation('foo', ['mac'], 'Failure'):
data_types.BuilderStepMap({
'active':
data_types.StepBuildStatsMap({
'none_pass':
uu.CreateStatsWithPassFails(0, 2),
}),
}),
}),
})
unmatched_results = {
'builder': [
data_types.Result('foo', ['win', 'intel'], 'Failure', 'step_name',
'build_id'),
],
}
unmatched_expectations = {
'foo_file': [
data_types.Expectation('foo', ['linux'], 'RetryOnFailure'),
],
}
stale, semi_stale, active = expectation_map.SplitByStaleness()
result_output.OutputResults(stale, semi_stale, active, {}, {}, 'print',
self._file_handle)
result_output.OutputResults(stale, semi_stale, active, unmatched_results,
{}, 'print', self._file_handle)
result_output.OutputResults(stale, semi_stale, active, {},
unmatched_expectations, 'print',
self._file_handle)
result_output.OutputResults(stale, semi_stale, active, unmatched_results,
unmatched_expectations, 'print',
self._file_handle)
result_output.OutputResults(stale, semi_stale, active, {}, {}, 'html',
self._file_handle)
result_output.OutputResults(stale, semi_stale, active, unmatched_results,
{}, 'html', self._file_handle)
result_output.OutputResults(stale, semi_stale, active, {},
unmatched_expectations, 'html',
self._file_handle)
result_output.OutputResults(stale, semi_stale, active, unmatched_results,
unmatched_expectations, 'html',
self._file_handle)
class OutputAffectedUrlsUnittest(fake_filesystem_unittest.TestCase):
def setUp(self) -> None:
self.setUpPyfakefs()
self._file_handle = tempfile.NamedTemporaryFile(delete=False, mode='w')
self._filepath = self._file_handle.name
def testOutput(self) -> None:
"""Tests that the output is correct."""
urls = [
'https://crbug.com/1234',
'https://crbug.com/angleproject/1234',
'http://crbug.com/2345',
'crbug.com/3456',
]
orphaned_urls = ['https://crbug.com/1234', 'crbug.com/3456']
result_output._OutputAffectedUrls(urls, orphaned_urls, self._file_handle)
self._file_handle.close()
with open(self._filepath) as f:
self.assertEqual(f.read(), ('Affected bugs: '
'https://crbug.com/1234 '
'https://crbug.com/angleproject/1234 '
'http://crbug.com/2345 '
'https://crbug.com/3456\n'
'Closable bugs: '
'https://crbug.com/1234 '
'https://crbug.com/3456\n'))
class OutputUrlsForClDescriptionUnittest(fake_filesystem_unittest.TestCase):
def setUp(self) -> None:
self.setUpPyfakefs()
self._file_handle = tempfile.NamedTemporaryFile(delete=False, mode='w')
self._filepath = self._file_handle.name
def testSingleLine(self) -> None:
"""Tests when all bugs can fit on a single line."""
urls = [
'crbug.com/1234',
'https://crbug.com/angleproject/2345',
]
result_output._OutputUrlsForClDescription(urls, [], self._file_handle)
self._file_handle.close()
with open(self._filepath) as f:
self.assertEqual(f.read(), ('Affected bugs for CL description:\n'
'Bug: 1234, angleproject:2345\n'))
def testBugLimit(self) -> None:
"""Tests that only a certain number of bugs are allowed per line."""
urls = [
'crbug.com/1',
'crbug.com/2',
'crbug.com/3',
'crbug.com/4',
'crbug.com/5',
'crbug.com/6',
]
result_output._OutputUrlsForClDescription(urls, [], self._file_handle)
self._file_handle.close()
with open(self._filepath) as f:
self.assertEqual(f.read(), ('Affected bugs for CL description:\n'
'Bug: 1, 2, 3, 4, 5\n'
'Bug: 6\n'))
def testLengthLimit(self) -> None:
"""Tests that only a certain number of characters are allowed per line."""
urls = [
'crbug.com/averylongprojectthatwillgooverthelinelength/1',
'crbug.com/averylongprojectthatwillgooverthelinelength/2',
]
result_output._OutputUrlsForClDescription(urls, [], self._file_handle)
self._file_handle.close()
with open(self._filepath) as f:
self.assertEqual(f.read(),
('Affected bugs for CL description:\n'
'Bug: averylongprojectthatwillgooverthelinelength:1\n'
'Bug: averylongprojectthatwillgooverthelinelength:2\n'))
project_name = (result_output.MAX_CHARACTERS_PER_CL_LINE - len('Bug: ') -
len(':1, 2')) * 'a'
urls = [
'crbug.com/%s/1' % project_name,
'crbug.com/2',
]
with open(self._filepath, 'w') as f:
result_output._OutputUrlsForClDescription(urls, [], f)
with open(self._filepath) as f:
self.assertEqual(f.read(), ('Affected bugs for CL description:\n'
'Bug: 2, %s:1\n' % project_name))
project_name += 'a'
urls = [
'crbug.com/%s/1' % project_name,
'crbug.com/2',
]
with open(self._filepath, 'w') as f:
result_output._OutputUrlsForClDescription(urls, [], f)
with open(self._filepath) as f:
self.assertEqual(f.read(), ('Affected bugs for CL description:\n'
'Bug: 2\nBug: %s:1\n' % project_name))
def testSingleBugOverLineLimit(self) -> None:
"""Tests the behavior when a single bug by itself is over the line limit."""
project_name = result_output.MAX_CHARACTERS_PER_CL_LINE * 'a'
urls = [
'crbug.com/%s/1' % project_name,
'crbug.com/2',
]
result_output._OutputUrlsForClDescription(urls, [], self._file_handle)
self._file_handle.close()
with open(self._filepath) as f:
self.assertEqual(f.read(), ('Affected bugs for CL description:\n'
'Bug: 2\n'
'Bug: %s:1\n' % project_name))
def testOrphanedBugs(self) -> None:
"""Tests that orphaned bugs are output properly alongside affected ones."""
urls = [
'crbug.com/1',
'crbug.com/2',
'crbug.com/3',
]
orphaned_urls = ['crbug.com/2']
result_output._OutputUrlsForClDescription(urls, orphaned_urls,
self._file_handle)
self._file_handle.close()
with open(self._filepath) as f:
self.assertEqual(f.read(), ('Affected bugs for CL description:\n'
'Bug: 1, 3\n'
'Fixed: 2\n'))
def testOnlyOrphanedBugs(self) -> None:
"""Tests output when all affected bugs are orphaned bugs."""
urls = [
'crbug.com/1',
'crbug.com/2',
]
orphaned_urls = [
'crbug.com/1',
'crbug.com/2',
]
result_output._OutputUrlsForClDescription(urls, orphaned_urls,
self._file_handle)
self._file_handle.close()
with open(self._filepath) as f:
self.assertEqual(f.read(), ('Affected bugs for CL description:\n'
'Fixed: 1, 2\n'))
class ConvertBuilderMapToPassOrderedStringDictUnittest(unittest.TestCase):
def testEmptyInput(self) -> None:
"""Tests that an empty input doesn't cause breakage."""
output = result_output.ConvertBuilderMapToPassOrderedStringDict(
data_types.BuilderStepMap())
expected_output = collections.OrderedDict()
expected_output[result_output.FULL_PASS] = {}
expected_output[result_output.NEVER_PASS] = {}
expected_output[result_output.PARTIAL_PASS] = {}
self.assertEqual(output, expected_output)
def testBasic(self) -> None:
"""Tests that a map is properly converted."""
builder_map = data_types.BuilderStepMap({
'fully pass':
data_types.StepBuildStatsMap({
'step1': uu.CreateStatsWithPassFails(1, 0),
}),
'never pass':
data_types.StepBuildStatsMap({
'step3': uu.CreateStatsWithPassFails(0, 1),
}),
'partial pass':
data_types.StepBuildStatsMap({
'step5': uu.CreateStatsWithPassFails(1, 1),
}),
'mixed':
data_types.StepBuildStatsMap({
'step7': uu.CreateStatsWithPassFails(1, 0),
'step8': uu.CreateStatsWithPassFails(0, 1),
'step9': uu.CreateStatsWithPassFails(1, 1),
}),
})
output = result_output.ConvertBuilderMapToPassOrderedStringDict(builder_map)
expected_output = collections.OrderedDict()
expected_output[result_output.FULL_PASS] = {
'fully pass': [
'step1 (1/1 passed)',
],
'mixed': [
'step7 (1/1 passed)',
],
}
expected_output[result_output.NEVER_PASS] = {
'never pass': [
'step3 (0/1 passed)',
],
'mixed': [
'step8 (0/1 passed)',
],
}
expected_output[result_output.PARTIAL_PASS] = {
'partial pass': {
'step5 (1/2 passed)': [
'http://ci.chromium.org/b/build_id0',
],
},
'mixed': {
'step9 (1/2 passed)': [
'http://ci.chromium.org/b/build_id0',
],
},
}
self.assertEqual(output, expected_output)
def _Dedent(s: str) -> str:
output = ''
for line in s.splitlines(True):
output += line.lstrip()
return output
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
"[email protected]"
] | |
dbbd26e6813d379ee76fd9dead2b2a25694a3d5b | e0ace1f9534d6e9082d97d1ebddd0198669d0a89 | /thrift/compiler/test/fixtures/qualified/gen-py3/module1/types.pyi | 337e67bc6e14561e2a2c058754e9f9cf90ce61b0 | [
"Apache-2.0"
] | permissive | kulv2012/fbthrift | 20614a907c1f7ffb368a2ca3154569b781e64fec | 0ef9f7c09603a9c3f218ea79b1cd7509ec265085 | refs/heads/master | 2022-11-16T01:17:59.679426 | 2020-07-06T08:10:25 | 2020-07-06T08:11:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,452 | pyi | #
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
import folly.iobuf as __iobuf
import thrift.py3.types
import thrift.py3.exceptions
from thrift.py3.types import __NotSet, NOTSET
import typing as _typing
from typing_extensions import Final
import sys
import itertools
__property__ = property
class Enum(thrift.py3.types.Enum):
ONE: Enum = ...
TWO: Enum = ...
THREE: Enum = ...
class Struct(thrift.py3.types.Struct, _typing.Hashable, _typing.Iterable[_typing.Tuple[str, _typing.Any]]):
first: Final[int] = ...
second: Final[str] = ...
def __init__(
self, *,
first: _typing.Optional[int]=None,
second: _typing.Optional[str]=None
) -> None: ...
def __call__(
self, *,
first: _typing.Union[int, __NotSet, None]=NOTSET,
second: _typing.Union[str, __NotSet, None]=NOTSET
) -> Struct: ...
def __reduce__(self) -> _typing.Tuple[_typing.Callable, _typing.Tuple[_typing.Type['Struct'], bytes]]: ...
def __iter__(self) -> _typing.Iterator[_typing.Tuple[str, _typing.Any]]: ...
def __bool__(self) -> bool: ...
def __hash__(self) -> int: ...
def __repr__(self) -> str: ...
def __lt__(self, other: 'Struct') -> bool: ...
def __gt__(self, other: 'Struct') -> bool: ...
def __le__(self, other: 'Struct') -> bool: ...
def __ge__(self, other: 'Struct') -> bool: ...
_List__EnumT = _typing.TypeVar('_List__EnumT', bound=_typing.Sequence[Enum])
class List__Enum(_typing.Sequence[Enum], _typing.Hashable):
def __init__(self, items: _typing.Sequence[Enum]=None) -> None: ...
def __repr__(self) -> str: ...
def __len__(self) -> int: ...
def __hash__(self) -> int: ...
def __contains__(self, x: object) -> bool: ...
def __copy__(self) -> _typing.Sequence[Enum]: ...
@_typing.overload
def __getitem__(self, i: int) -> Enum: ...
@_typing.overload
def __getitem__(self, s: slice) -> _typing.Sequence[Enum]: ...
def count(self, item: _typing.Any) -> int: ...
def index(self, item: _typing.Any, start: int = ..., stop: int = ...) -> int: ...
def __add__(self, other: _typing.Sequence[Enum]) -> 'List__Enum': ...
def __radd__(self, other: _List__EnumT) -> _List__EnumT: ...
def __reversed__(self) -> _typing.Iterator[Enum]: ...
def __iter__(self) -> _typing.Iterator[Enum]: ...
c1: Struct = ...
e1s: List__Enum = ...
| [
"[email protected]"
] | |
a65c5e6f3d1028b77c6d9135de5a4beb064b9211 | 92035a9a1d2d55830f82d5e6f92a8e41feaef863 | /get screenshot in a video in frame.py | 35aad0f3fad645e76e9f88bf60354e4d592fcf93 | [] | no_license | Valarzz/make-picture-data-set | 5c7a87e11495d5d89ce53405fbfd3e4917e11f93 | 847a7ee52893b152e161fdf7f7a1eb586551603d | refs/heads/master | 2020-09-28T04:47:28.824686 | 2019-12-08T15:58:56 | 2019-12-08T15:58:56 | 226,688,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | import cv2,os
# Use opencv to capture video frames at regular intervals and save them as images
vc = cv2.VideoCapture('C:\\Users\\10300\\robomaster.flv') # write your video address here
c = 0
print("------------")
if vc.isOpened(): # Determine if it is open properly
print("yes")
rval, frame = vc.read()
else:
rval = False
print("false")
timeF = 1000 # Video frame counting interval frequency
os.chdir("E:/baiduyun/looklok/" ) #write where you want to save here
i = 0
while rval:
rval,frame = vc.read()
print(c,timeF,c%timeF)
if (c % timeF == 0):# Store every timeF frame
print("write...")
cv2.imwrite( str(i) + '.jpg', frame) # save as image
print("success!")
c = c + 1000
i = i + 1
cv2.waitKey(1)
vc.release()
print("==================================")
| [
"[email protected]"
] | |
3355a52f9ba92cb9f46a0ba9de124ed9d3a31aa3 | b497be9a4c593e8397de96b70f6412b98094fa13 | /lib/odenvp_conditional_augment.py | a3d9bb4a20202eade0b318f62d7739947f42aacd | [
"MIT"
] | permissive | minhtannguyen/ffjord | 3804fdf76d05d90b88b1b612486eed8d0d354658 | f3418249eaa4647f4339aea8d814cf2ce33be141 | refs/heads/master | 2020-04-21T11:21:50.958015 | 2019-08-23T05:44:36 | 2019-08-23T05:44:36 | 169,522,404 | 0 | 0 | MIT | 2019-02-07T05:10:16 | 2019-02-07T05:10:16 | null | UTF-8 | Python | false | false | 7,251 | py | import torch
import torch.nn as nn
import lib.layers as layers
from lib.layers.odefunc import ODEnet
import numpy as np
from . import modules
from . import thops
class ODENVP(nn.Module):
"""
Real NVP for image data. Will downsample the input until one of the
dimensions is less than or equal to 4.
Args:
input_size (tuple): 4D tuple of the input size.
n_scale (int): Number of scales for the representation z.
n_resblocks (int): Length of the resnet for each coupling layer.
"""
def __init__(
self,
input_size,
n_scale=float('inf'),
n_blocks=2,
intermediate_dims=(32,),
nonlinearity="softplus",
squash_input=True,
alpha=0.05,
cnf_kwargs=None,
y_class=10,
):
super(ODENVP, self).__init__()
self.n_scale = min(n_scale, self._calc_n_scale(input_size))
self.n_blocks = n_blocks
self.intermediate_dims = intermediate_dims
self.nonlinearity = nonlinearity
self.squash_input = squash_input
self.alpha = alpha
self.cnf_kwargs = cnf_kwargs if cnf_kwargs else {}
self.y_class = y_class
if not self.n_scale > 0:
raise ValueError('Could not compute number of scales for input of' 'size (%d,%d,%d,%d)' % input_size)
self.transforms = self._build_net(input_size)
self.dims = [o[1:] for o in self.calc_output_size(input_size)]
# for conditional
C = np.prod(input_size[1:])
self.project_ycond = modules.LinearZeros(self.y_class, 2 * C)
self.project_class = modules.LinearZeros(C, self.y_class)
self.register_parameter(
"prior_h",
nn.Parameter(torch.zeros([1, 2 * C])))
def _build_net(self, input_size):
_, c, h, w = input_size
transforms = []
for i in range(self.n_scale):
transforms.append(
StackedCNFLayers(
initial_size=(c, h, w),
idims=self.intermediate_dims,
squeeze=(i < self.n_scale - 1), # don't squeeze last layer
init_layer=(layers.LogitTransform(self.alpha) if self.alpha > 0 else layers.ZeroMeanTransform()) if self.squash_input and i == 0 else None,
n_blocks=self.n_blocks,
cnf_kwargs=self.cnf_kwargs,
nonlinearity=self.nonlinearity,
)
)
c, h, w = c * 2, h // 2, w // 2
return nn.ModuleList(transforms)
def get_regularization(self):
if len(self.regularization_fns) == 0:
return None
acc_reg_states = tuple([0.] * len(self.regularization_fns))
for module in self.modules():
if isinstance(module, layers.CNF_augment):
acc_reg_states = tuple(
acc + reg for acc, reg in zip(acc_reg_states, module.get_regularization_states())
)
return sum(state * coeff for state, coeff in zip(acc_reg_states, self.regularization_coeffs))
def _calc_n_scale(self, input_size):
_, _, h, w = input_size
n_scale = 0
while h >= 4 and w >= 4:
n_scale += 1
h = h // 2
w = w // 2
return n_scale
def calc_output_size(self, input_size):
n, c, h, w = input_size
output_sizes = []
for i in range(self.n_scale):
if i < self.n_scale - 1:
c *= 2
h //= 2
w //= 2
output_sizes.append((n, c, h, w))
else:
output_sizes.append((n, c, h, w))
return tuple(output_sizes)
def forward(self, x, logpx=None, reverse=False):
if reverse:
return self._generate(x, logpx)
else:
return self._logdensity(x, logpx)
def _prior(self, y_onehot=None):
# compute the mean and std of the gaussian used to compute logpz
h = self.prior_h.detach().clone()
assert torch.sum(h) == 0.0
assert y_onehot is not None
yp = self.project_ycond(y_onehot)
h = yp + h
return thops.split_feature(h, "split")
def _logdensity(self, x, logpx=None):
_logpx = torch.zeros(x.shape[0], 1).to(x) if logpx is None else logpx
out = []
for idx in range(len(self.transforms)):
x, _logpx = self.transforms[idx].forward(x, _logpx)
if idx < len(self.transforms) - 1:
d = x.size(1) // 2
x, factor_out = x[:, :d], x[:, d:]
else:
# last layer, no factor out
factor_out = x
out.append(factor_out)
out = [o.view(o.size()[0], -1) for o in out]
out = torch.cat(out, 1)
return out if logpx is None else (out, _logpx)
@staticmethod
def loss_class(y_logits, y):
if y_logits is None:
return 0
else:
CE = torch.nn.CrossEntropyLoss()
return CE(y_logits, y.long())
def _generate(self, z, logpz=None):
z = z.view(z.shape[0], -1)
zs = []
i = 0
for dims in self.dims:
s = np.prod(dims)
zs.append(z[:, i:i + s])
i += s
zs = [_z.view(_z.size()[0], *zsize) for _z, zsize in zip(zs, self.dims)]
_logpz = torch.zeros(zs[0].shape[0], 1).to(zs[0]) if logpz is None else logpz
z_prev, _logpz = self.transforms[-1](zs[-1], _logpz, reverse=True)
for idx in range(len(self.transforms) - 2, -1, -1):
z_prev = torch.cat((z_prev, zs[idx]), dim=1)
z_prev, _logpz = self.transforms[idx](z_prev, _logpz, reverse=True)
return z_prev if logpz is None else (z_prev, _logpz)
class StackedCNFLayers(layers.SequentialFlow):
def __init__(
self,
initial_size,
idims=(32,),
nonlinearity="softplus",
squeeze=True,
init_layer=None,
n_blocks=1,
cnf_kwargs={},
):
strides = tuple([1] + [1 for _ in idims])
chain = []
if init_layer is not None:
chain.append(init_layer)
def _make_odefunc(size):
net = ODEnet(idims, size, strides, True, layer_type="concat", nonlinearity=nonlinearity)
f = layers.ODEfunc(net)
return f
if squeeze:
c, h, w = initial_size
initial_size = c + cnf_kwargs['concat_size'], h, w
after_squeeze_size = c * 4 + cnf_kwargs['concat_size'], h // 2, w // 2
pre = [layers.CNF_augment(_make_odefunc(initial_size), **cnf_kwargs) for _ in range(n_blocks)]
post = [layers.CNF_augment(_make_odefunc(after_squeeze_size), **cnf_kwargs) for _ in range(n_blocks)]
chain += pre + [layers.SqueezeLayer(2)] + post
else:
c, h, w = initial_size
initial_size = c + cnf_kwargs['concat_size'], h, w
chain += [layers.CNF_augment(_make_odefunc(initial_size), **cnf_kwargs) for _ in range(n_blocks)]
super(StackedCNFLayers, self).__init__(chain)
| [
"[email protected]"
] | |
24608146b7c1a3e71d18422efbd1b700dcfe4aec | 95e6fa634946facb61e96124f994fdf8e24c48b9 | /Python_programming/Data_filtration_1_5.py | 09c40d7545fad54b7b3c8eef9daea548a7e1bb66 | [] | no_license | Accolyte42/Carpov_ML | f70da4fcd1c209c7e1e2e9e23e35687a0994885d | b04aa8a49613dcfd5b5ebf783a61e5004be6cc14 | refs/heads/master | 2023-08-11T12:25:21.952937 | 2021-10-03T21:25:21 | 2021-10-03T21:25:21 | 397,374,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,325 | py | import pandas as pd
import numpy as np
# Подгрузим данные из степика (но этот же файл есть и в рабочей директории)
students_performanse = pd.read_csv('https://stepik.org/media/attachments/course/4852/StudentsPerformance.csv')
students_performanse_with_names = students_performanse.iloc[[0, 3, 4, 7, 8]]
students_performanse_with_names.index = ['Cersei', 'Tywin', 'Gregor', 'Joffrey', 'Ilyn Payne']
students_performanse = students_performanse \
.rename(columns =
{'parental level of education': 'parental_level_of_education',
'test preparation course': 'test_preparation_course',
'math score': 'math_score',
'writing score': 'writing_score',
'reading score': 'reading_score'})
# print(students_performanse.query('writing_score > 78').head())
score_columns = [i for i in list(students_performanse) if 'score' in i]
# print(students_performanse[score_columns].head())
# print(students_performanse_with_names.filter(like='co')) # выделение всех колонок, у которых есть "со" в названии
print(students_performanse_with_names.filter(like='y', axis=0).head()) # выделение всех колонок, у которых есть "со" в названии
| [
"[email protected]"
] | |
76177ac654854c18651a5090814bab3ad872a612 | 4daf54241d399522bf9f3a6cd235f87132ad3459 | /cifar/resnet.shared.no_share_bn.branchx0.01.deep/main.py | b729378353c0399de87978be959c19f83ad3d0b5 | [
"MIT"
] | permissive | zyclarkcheng/tensorpack-exp | f54b3d9ef2fee92e102f2abfcd72f0ccfeb121de | 863d3ce56a985c0ad291e55e673531c63e30b42d | refs/heads/master | 2020-04-27T00:39:55.822944 | 2017-07-17T15:09:38 | 2017-07-17T15:09:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,459 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# File: cifar10-resnet.py
# Author: Yuxin Wu <[email protected]>
import numpy as np
import argparse
import os
from tensorpack import *
from tensorpack.tfutils.symbolic_functions import *
from tensorpack.tfutils.summary import *
import tensorflow as tf
from tensorflow.contrib.layers import variance_scaling_initializer
"""
CIFAR10 ResNet example. See:
Deep Residual Learning for Image Recognition, arxiv:1512.03385
This implementation uses the variants proposed in:
Identity Mappings in Deep Residual Networks, arxiv:1603.05027
I can reproduce the results on 2 TitanX for
n=5, about 7.1% val error after 67k steps (20.4 step/s)
n=18, about 5.95% val error after 80k steps (5.6 step/s, not converged)
n=30: a 182-layer network, about 5.6% val error after 51k steps (3.4 step/s)
This model uses the whole training set instead of a train-val split.
To train:
./cifar10-resnet.py --gpu 0,1
"""
BATCH_SIZE = 128
NUM_UNITS = None
class Model(ModelDesc):
def __init__(self, n):
super(Model, self).__init__()
self.n = n
def _get_inputs(self):
return [InputDesc(tf.float32, [None, 32, 32, 3], 'input'),
InputDesc(tf.int32, [None], 'label')]
def _build_graph(self, inputs):
image, label = inputs
image = image / 128.0
assert tf.test.is_gpu_available()
image = tf.transpose(image, [0, 3, 1, 2])
def residual(name, l, increase_dim=False, first=False, reuse=False, cnt=[0]):
cnt[0] += 1
shape = l.get_shape().as_list()
in_channel = shape[1]
if increase_dim:
out_channel = in_channel * 2
stride1 = 2
else:
out_channel = in_channel
stride1 = 1
with tf.variable_scope(name + '_{}'.format(cnt[0]), reuse=None) as scope:
b1 = l if first else BNReLU(l)
with tf.variable_scope(name, reuse=reuse) as scope:
c1 = Conv2D('conv1', b1, out_channel, stride=stride1, nl=tf.identity)
cnt[0] += 1
with tf.variable_scope(name + '_{}'.format(cnt[0]), reuse=None) as scope:
c1 = BNReLU(c1)
with tf.variable_scope(name, reuse=reuse) as scope:
c2 = Conv2D('conv2', c1, out_channel)
if increase_dim:
l = AvgPooling('pool', l, 2)
l = tf.pad(l, [[0, 0], [in_channel // 2, in_channel // 2], [0, 0], [0, 0]])
l = 0.01 * c2 + l
return l
with argscope([Conv2D, AvgPooling, BatchNorm, GlobalAvgPooling], data_format='NCHW'), \
argscope(Conv2D, nl=tf.identity, use_bias=False, kernel_shape=3,
W_init=variance_scaling_initializer(mode='FAN_OUT')):
l = Conv2D('conv0', image, 16, nl=BNReLU)
l = residual('res1.0', l, first=True)
for k in range(1, self.n):
l = residual('res1.1', l, reuse=True if k > 1 else False)
# 32,c=16
l = residual('res2.0', l, increase_dim=True)
for k in range(1, self.n):
l = residual('res2.1', l, reuse=True if k > 1 else False)
# 16,c=32
l = residual('res3.0', l, increase_dim=True)
for k in range(1, self.n):
l = residual('res3.1', l, reuse=True if k > 1 else False)
l = BNReLU('bnlast', l)
# 8,c=64
l = GlobalAvgPooling('gap', l)
logits = FullyConnected('linear', l, out_dim=10, nl=tf.identity)
prob = tf.nn.softmax(logits, name='output')
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
wrong = prediction_incorrect(logits, label)
# monitor training error
add_moving_summary(tf.reduce_mean(wrong, name='train_error'))
# weight decay on all W of fc layers
wd_w = tf.train.exponential_decay(0.0002, get_global_step_var(),
480000, 0.2, True)
wd_cost = tf.multiply(wd_w, regularize_cost('.*/W', tf.nn.l2_loss), name='wd_cost')
add_moving_summary(cost, wd_cost)
add_param_summary(('.*/W', ['histogram'])) # monitor W
self.cost = tf.add_n([cost, wd_cost], name='cost')
def _get_optimizer(self):
lr = get_scalar_var('learning_rate', 0.01, summary=True)
opt = tf.train.MomentumOptimizer(lr, 0.9)
return opt
def get_data(train_or_test):
isTrain = train_or_test == 'train'
ds = dataset.Cifar10(train_or_test)
pp_mean = ds.get_per_pixel_mean()
if isTrain:
augmentors = [
imgaug.CenterPaste((40, 40)),
imgaug.RandomCrop((32, 32)),
imgaug.Flip(horiz=True),
imgaug.MapImage(lambda x: x - pp_mean),
]
else:
augmentors = [
imgaug.MapImage(lambda x: x - pp_mean)
]
ds = AugmentImageComponent(ds, augmentors)
ds = BatchData(ds, BATCH_SIZE, remainder=not isTrain)
if isTrain:
ds = PrefetchData(ds, 3, 2)
return ds
def get_config():
logger.auto_set_dir()
dataset_train = get_data('train')
dataset_test = get_data('test')
return TrainConfig(
dataflow=dataset_train,
callbacks=[
ModelSaver(),
InferenceRunner(dataset_test,
[ScalarStats('cost'), ClassificationError()]),
ScheduledHyperParamSetter('learning_rate',
[(1, 0.1), (82, 0.01), (123, 0.001), (300, 0.0002)])
],
model=Model(n=NUM_UNITS),
max_epoch=400,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('-n', '--num_units',
help='number of units in each stage',
type=int, default=36)
parser.add_argument('--load', help='load model')
args = parser.parse_args()
NUM_UNITS = args.num_units
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
config = get_config()
if args.load:
config.session_init = SaverRestore(args.load)
config.nr_tower = max(get_nr_gpu(), 1)
SyncMultiGPUTrainer(config).train()
| [
"[email protected]"
] | |
1c7580c59cef2404518a10ae5835349a1cb98e2e | fe6e0a2cfb00d34b58f64f164a747e3df08e8a9d | /client/application/controller/alterquyu.py | 61f40100ef80376d2038f953734fbe44e087f49d | [] | no_license | huboqiao/kmvip | c141814666631c35b8adeec3d3beb5aca0d2d1cd | 11ae7e1f78943c8425516c4f06acf043a99acdcc | refs/heads/master | 2020-02-26T14:58:31.573602 | 2016-08-03T06:29:41 | 2016-08-03T06:29:41 | 64,809,269 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,866 | py | # -*- coding: utf-8 -*-
'''
Created on 2015年2月11日
@author: LiuXinwu
'''
from PyQt4 import QtGui
from application.lib.Commethods import *
from application.view.alterquyu import Ui_Dialog
from application.model.storagemodel import StorageModel
class AlterQuYuController(ControllerAction,Ui_Dialog):
def __init__(self,parent=None):
ControllerAction.__init__(self,parent)
self.parent = parent
self.comboBox.setCurrentIndex(1)
self.model = StorageModel()
self.qid = self.parent.quyuid
self.initData()
#绑定事件
self.connect(self.pushButton,SIGNAL('clicked()'),self.alterQuYu)
self.connect(self.pushButton_2,SIGNAL('clicked()'),self.cancel)
#初始化数据,填充空框
def initData(self):
#查询本区域的所有信息
quyuname = self.parent.quyuname
self.lineEdit.setText(self.tr(str(quyuname)))
strisactive = str(self.parent.quyustatname)
if cmp(strisactive,'启用') == 0:
self.comboBox.setCurrentIndex(1)
else:
self.comboBox.setCurrentIndex(0)
#修改区域
def alterQuYu(self):
nameStr = str(self.lineEdit.text())
name = nameStr.strip()
if len(name)==0:
self.boxInfo(u'提示:',u'区域名称不能为空!')
self.lineEdit.setFocus(True)
return
else:
#修改区域
isactive = self.comboBox.currentIndex()
re = self.model.updateQuYu({'id':str(self.qid),'name':name,'isactive':str(isactive)})
self.boxInfo(u'提示:',re['msg'])
self.parent.init()
self.close()
#关闭窗口
def cancel(self):
self.close() | [
"[email protected]"
] | |
3badef67a00af67ad02f5b54ef749b7f0db9b0e4 | 0facb323be8a76bb4c168641309972fa77cbecf2 | /Configurations/HWWSemiLepHighMass/nanoAODv6/2018/SKIM5/aliases.py | f27631f33c008f438576a6b4452b5e73593fef9e | [] | no_license | bhoh/SNuAnalytics | ef0a1ba9fa0d682834672a831739dfcfa1e7486b | 34d1fc062e212da152faa83be50561600819df0e | refs/heads/master | 2023-07-06T03:23:45.343449 | 2023-06-26T12:18:28 | 2023-06-26T12:18:28 | 242,880,298 | 0 | 1 | null | 2020-02-25T01:17:50 | 2020-02-25T01:17:49 | null | UTF-8 | Python | false | false | 2,362 | py | import os
import copy
import inspect
##---WP2017---##
from WPandCut2018 import *
##-End WP--##
configurations = '%s/src/SNuAnalytics/Configurations/HWWSemiLepHighMass/' % os.getenv('CMSSW_BASE')
print configurations
mc = [skey for skey in samples if skey not in ('Fake', 'DATA')]
###---Btag SF---###
btagSFSource = '%s/src/PhysicsTools/NanoAODTools/data/btagSF/DeepCSV_102XSF_V1.csv' % os.getenv('CMSSW_BASE')
aliases['Jet_btagSF_shapeFix'] = {
'linesToAdd': [
'gSystem->Load("libCondFormatsBTauObjects.so");',
'gSystem->Load("libCondToolsBTau.so");',
'gSystem->AddIncludePath("-I%s/src");' % os.getenv('CMSSW_RELEASE_BASE'),
'.L %s/patches/btagsfpatch.cc+' % configurations
],
'class': 'BtagSF',
'args': (btagSFSource,),
'samples': mc
}
aliases['btagSF'] = {
'expr': 'TMath::Exp(Sum$(TMath::Log((CleanJet_pt>20 && abs(CleanJet_eta)<2.5)*Jet_btagSF_shapeFix[CleanJet_jetIdx]+1*(CleanJet_pt<20 || abs(CleanJet_eta)>2.5))))',
'samples': mc
}
for shift in ['jes', 'lf', 'hf', 'lfstats1', 'lfstats2', 'hfstats1', 'hfstats2', 'cferr1', 'cferr2']:
#aliases['Jet_btagSF_shapeFix_up_%s' % shift] = {
aliases['Jet_btagSF%sup_shapeFix' % shift] = {
'class': 'BtagSF',
'args': (btagSFSource, 'up_' + shift),
'samples': mc
}
aliases['Jet_btagSF%sdown_shapeFix' % shift] = {
'class': 'BtagSF',
'args': (btagSFSource, 'down_' + shift),
'samples': mc
}
aliases['btagSF%sup' % shift] = {
'expr': aliases['btagSF']['expr'].replace('SF', 'SF' + shift + 'up'),
'samples': mc
}
aliases['btagSF%sdown' % shift] = {
'expr': aliases['btagSF']['expr'].replace('SF', 'SF' + shift + 'down'),
'samples': mc
}
aliases['Wlep_Mt']={
'linesToAdd':[ '.L %s/functions/GetMt.C+' % configurations], ##float GetMt(float pt1, float phi1, float m1, float pt2, float phi2, float m2 )
'expr':'GetMt(Lepton_pt[0],Lepton_phi[0],0,PuppiMET_pt,PuppiMET_phi,0)'
}
aliases['lnjj_Mt_alt']={
'expr':'GetMt(Lepton_pt[0],Lepton_phi[0],0,PuppiMET_pt,PuppiMET_phi,0,Whad_pt,Whad_phi,Whad_mass)'
}
aliases['tau21SF']={
'expr' : '0.98*(isBoosted) + 1*(!isBoosted)',
'samples' : mc
}
| [
"[email protected]"
] | |
cf42b75b2efa1dec4f3180e98cefa5b25abe9359 | f3b233e5053e28fa95c549017bd75a30456eb50c | /p38a_input/L2O/2O-2L_MD_NVT_rerun/set_2.py | ecc0726685dd675f8bc7fe5a217d3d78a9435df4 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | import os
dir = '/mnt/scratch/songlin3/run/p38a/L2O/MD_NVT_rerun/ti_one-step/2O_2L/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_2.in'
temp_pbs = filesdir + 'temp_2.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_2.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_2.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"[email protected]"
] | |
65fbd86580aa9e043d9f0caef295d3ed684f3f1a | 396787df1b472ddfab7d934c149b150352342f03 | /python_fundemental/245_Maximum_Product_Subarray.py | a2757d13801609719dd8cbee8a3de7390dfeba2f | [] | no_license | Deanwinger/python_project | a47b50a9dfc88853a5557da090b0a2ac3f3ce191 | 8c0c2a8bcd51825e6902e4d03dabbaf6f303ba83 | refs/heads/master | 2022-07-10T16:41:56.853165 | 2019-07-21T13:08:48 | 2019-07-21T13:08:48 | 107,653,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | # leetcode 152. Maximum Product Subarray
# 程序员代码面试指南 P373 | [
"[email protected]"
] | |
538613b4f26e7b13f1d2a94dcb67bc4b9d93c1f4 | df9aa3daa84aba377ca4006d8ec959c28088817a | /mobile/__init__.py | c172fea8eadc5f93e1e5c0c7083f55027f6cf3c5 | [] | no_license | wangyu190810/ufind | c9569137271a151836a16e6dbb5a52ee7d252311 | 8ed8bca8f2cf373d7ee45f1aec31671afac2a9aa | refs/heads/master | 2021-01-15T17:41:07.083036 | 2015-06-08T15:41:58 | 2015-06-08T15:41:58 | 29,869,833 | 1 | 2 | null | 2015-04-26T17:37:13 | 2015-01-26T16:26:58 | Python | UTF-8 | Python | false | false | 50 | py | # -*-coding:utf-8-*-
__author__ = 'Administrator'
| [
"[email protected]"
] | |
32bda72aae9ed518066316d040349215cfefd155 | fcabf294eb251f5d1c657aac8ae5a2affbabf5d2 | /logger/logger_helper.py | 2fc9f3d4b1f429b31fd0827388ce18d3c734cfb6 | [] | no_license | fubUmich/Daily-log-tool | decc87ed6729ef8f1d0de5c0ad0f01d7c1d2228e | 2f58883fb39244c796a4edc168bff4170b80d463 | refs/heads/master | 2020-12-24T11:52:42.555624 | 2016-11-07T19:32:20 | 2016-11-07T19:32:20 | 73,108,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,437 | py | from datetime import date, datetime, timedelta
import os.path
NOW = datetime.now()
TODAY = date.today()
YESTERDAY = TODAY - timedelta(1)
def show_log(date):
print_log_file(get_log_file_name(date))
def print_log_file(filename):
if os.path.isfile(filename):
with open(filename) as log_file:
print log_file.read()
else:
print "No log found for the date"
def get_date_str(date):
if date == 'today':
return TODAY
elif date == 'yesterday':
return YESTERDAY
else:
return date
def get_log_file_name(date):
date_str = get_date_str(date)
script_dir = os.path.dirname(__file__)
return '{script_dir}/logs/{date}.log'.format(script_dir=script_dir, date=str(date_str))
def write_log(log):
filename = get_log_file_name('today')
with open(filename, 'a+') as log_file:
log_file.write('%s %02d:%02d %s\n' % (NOW.date(), NOW.hour, NOW.minute, log))
def remove_whole_log():
filename = get_log_file_name('today')
with open(filename, 'w+') as log_file:
log_file.write("removed at %s:%s\n" % (NOW.hour, NOW.minute))
def remove_last_log(n):
filename = get_log_file_name('today')
if os.path.isfile(filename):
with open(filename, 'r+') as log_file:
lines = log_file.readlines()
lines = lines[:-int(n)]
with open(filename, 'w') as log_file:
log_file.writelines(lines)
| [
"[email protected]"
] | |
88d91c837a9beaf7ec4dc6398b5189d91918d8e2 | 39cccf732927ffed8faa3d844c5cfac6f74a92c0 | /Effective Python Development for Biologists/testing/find_common_kmers_fast.py | b92f00d73038fccc76ed6393a6d636b9c630164c | [] | no_license | pjx1990/python_for_biologists | aca80be2adabbef277a97213716835802105b4ea | c86c1bcffdeea3f82c5b988775d177997437473c | refs/heads/master | 2021-05-18T13:26:41.197766 | 2020-03-30T10:01:24 | 2020-03-30T10:01:24 | 251,262,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | def find_common_kmers(dna, k, threshold):
kmer2count = {}
for start in range(len(dna) + 1 - k):
kmer = dna[start:start+k]
old_count = kmer2count.get(kmer, 0)
kmer2count[kmer] = old_count + 1
result = []
for kmer, count in kmer2count.items():
if count >= threshold:
result.append(kmer)
return result | [
"[email protected]"
] | |
8ef3c2a5dab45535442ca11c46d8fac758790618 | ca55aadabae74fbc52fb07cb2156ba5e35fa7380 | /original_my_quiz.py | 52b20d28d12e1d56ac4d0a0eadbfcb1730682065 | [] | no_license | Metal-Whale/Udacity-IPND-Code-Your-Own-Quiz-Project | 3e6e63fb577da8922e09c5ea4436275a6153413e | 5635cec998d913f758f1f6ae9df450a5c1689dde | refs/heads/master | 2022-01-09T02:39:06.072167 | 2019-05-14T01:43:03 | 2019-05-14T01:43:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,284 | py | import time
sample = '''A ___1___ is created with the def keyword. You specify the inputs a ___1___ takes by
adding ___2___ separated by commas between the parentheses. ___1___s by default return ___3___ if you
don't specify the value to return. ___2___ can be standard data types such as string, number, dictionary,
tuple, and ___4___ or can be more complicated such as objects and lambda functions.'''
quiz_template = {'easy': '''Water exists in __1__ states. Water in the ocean would be a __2__. If water is frozen then
it becomes a __3__. When water boils it turns into a __4__.\n''', 'medium': '''Space is known as the __1__ frontier.
Did you know that the __2__ was once part of Earth? Harry Kerry once said, If the moon was made of __3__ would you eat it?
The __4__ that astronauts have left on the moon will be there for 100,000,000 years.\n''','hard': '''The closest star to earth
besides the sun is __1__. That star is 4.2 __2__ away. It would take approximatley 500,000 earth __3__ to travel to __1__.
Scientific research states that __1__ is located in the __4__ zone, which means it could support life\n'''}
#variable that contains a dictionary with nested lists
my_answers = {'easy': ['three', 'liquid', 'solid', 'gas'],
'medium': ['final', 'moon', 'rib', 'footprints'],
'hard': ["alpha centauri", "light years", "years", "habitable" ]}
blanks = ["__1__", "__2__", "__3__", "__4__", "__5__"]
user_answers = []
def check_answers(level_difficulty,blank_counter,lives): #Compares user's input with the value of each list in the my_answers dictionary to see if the answer is right or wrong.
print quiz_template[level_difficulty]
print "What is the answer to " + blanks[blank_counter] + "?\n"
user_answer = raw_input()
user_answers.append(user_answer)
for level,answers in sorted(my_answers.items()):
for answer in answers:
if user_answer in answers:
#if user_answers == len(my_answers[level_difficulty]):
return correct(level_difficulty,blank_counter,lives)
else:
return wrong(level_difficulty,blank_counter,lives)
def correct(level_difficulty,blank_counter,lives): #Ends the program if all answers are correct or outputs the users current level, increases the index by 1 and the users current lives
print "That's Correct!\n"
if blank_counter == 3:
print "Congratulations! You passed the quiz with " + str(lives) + " lives remaining!"
return
else:
print "Let's continue with the " + level_difficulty + " Quiz\n"
return check_answers(level_difficulty, blank_counter + 1, lives)
def wrong(level_difficulty,blank_counter,lives): #Deducts a life from the user and outputs all current values or ends the quiz if there are no more lives
print "Sorry, that is wrong answer."
lives -= 1
print "You have " + str(lives) + " lives left."
if lives == 0:
print"Game Over"
else:
return check_answers(level_difficulty, blank_counter, lives)
def life(level_difficulty): #User chooses lives, initialize counter index, outputs assigned values
blank_counter = 0
lives = int(raw_input("Choos your lives: "))
print "\nYou will have " + str(lives) + " lives\n"
print "Begin " + level_difficulty + " Quiz:\n"
check_answers(level_difficulty.lower(),blank_counter,lives)
def quiz_intro(): #Quiz Intro function starts the quiz and outputs the user's level based on their input.
print "Welcome to the Quiz, Please select easy | medium | hard to continue"
level_difficulty = raw_input("Choose Your Fate: ")
if level_difficulty.lower() == "easy":
print "Easy? I'm dissapointed in you.\n"
return life(level_difficulty)
elif level_difficulty.lower() == "medium":
print "Challenge Accepted\n"
return life(level_difficulty)
elif level_difficulty.lower() == "hard":
print "Are you sure you want to lose?"
time.sleep(2)
print "Too late now..."
time.sleep(1)
print "Let the Games Begin!\n"
return life(level_difficulty)
else:
print "Access Denied. This message will self destruct"
print quiz_intro()
# for help with debuggin I used
# print "variable: ", variable
# to test complex functions
# debug using scientific method
#assert statements will crash the function if there is an error
#range'''
| [
"[email protected]"
] | |
6577b9582d2bf65a5354e5d3a33283e27f69895f | 64b301791f73c37950ade855f43e3c6f98aa7492 | /regularexp/exampleTwo.py | 4beb6f89d5f4cf3bef0d89e590adb5361b4e8ceb | [] | no_license | nandansn/pythonlab | cf71dc6f894076d84c0bff3bd25b395fef37dfa2 | b3d6b49614631b621f52789ef20ed9f6d978feb0 | refs/heads/master | 2022-12-27T03:49:08.811610 | 2022-12-15T06:25:53 | 2022-12-15T06:25:53 | 95,769,362 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | import re
myString ="nandakumar"
matchedPattern = re.findall(r'a|n|d|k|u|m|r',myString)
print(matchedPattern) | [
"[email protected]"
] | |
3328c94f669787d0537128c0ce831c4590974225 | a2e638cd0c124254e67963bda62c21351881ee75 | /Python modules/Asian_Vorst.py | bfbf7cdde70cf2f3a964587cc08d48d3fda1e972 | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,781 | py | """
Valuation of Asian Options using Vorst's method.
Only valid for fixed strike arithmetic average options.
Implementation closely based on Black-Scholes implementation
contained in FC/Eq/OptDiv. Mathematics provided by Dr Antonie Kotze.
Implementation relies on SEQ package being installed and updated daily i.r.o
average options.
History:
2003-11-12 - Russel Webber - Created
2003-11-24 - Russel Webber - Made changes to return Greeks to Arena, returning list as PV.
2003-11-25 - Russel Webber - Added discounting of pv if pay offset > 0
2003-11-28 - Russel Webber - Added function to return model inputs to ASQLs
"""
import ael, math
if ael.userid() == 'EXTRACT':
printout = 0
else:
printout = 1
cached_greeks = {}
def AsianVorst(s, k, Sav, rd, t, divy, sigma, phi, asian_times, mm):
"""AsianVorst(s, k, Sav, rd, t, divy, sigma, phi, asian_times, mm)
Implementation of Vorst's Asian option valuation method.
s (float): Spot price
k (float): Strike price
Sav (float): Average spot price so far
rd (float): Continuous compounded risk-free rate
t (float): Fractional years to expiry
divy (float): Continuous compounded dividend yield
sigma (float): Continuous compounded volatility
phi (int): Binary variable = 1 for call, -1 for put
asian_times (list): List of fractional years between valuation date and
averaging date.
mm (int): Number of averaging dates passed so far
returns (tuple): Tuple of present value, delta and gamma"""
n = len(asian_times)
mm = float(mm)
n = float(n)
Y = rd - divy - (sigma * sigma * 0.5)
Sum1 = 0
for i in range(mm, n):
Sum1 = Sum1 + (Y * asian_times[i])
BigM = math.log(s) + (Sum1 / (n - mm))
Sum2 = 0
for i in range(mm, n):
for j in range(mm, n):
Sum2 = Sum2 + min(asian_times[i], asian_times[j])
V = Sum2 * sigma * sigma /((n - mm) * (n - mm))
Sum3 = 0
U1 = math.exp(BigM + (V * 0.5))
for i in range(mm, n):
Sum3 = Sum3 + (math.exp((rd - divy) * asian_times[i]))
AA = Sum3 / (n - mm)
Kstar = n / (n - mm) * (k - mm / n * Sav)
KK = Kstar - (s * AA - U1)
if Kstar > 0 and V > 0:
xx = (BigM - math.log(KK) + V) / math.sqrt(V)
yy = xx - math.sqrt(V)
pv = (n - mm) / n * math.exp(-rd * t) * phi * (math.exp(BigM + V * 0.5) * \
ael.normal_dist(phi * xx) - KK * ael.normal_dist(phi * yy))
else:
pv = math.exp(-rd * t) * phi * (mm / n * Sav - k + ( n-mm)/n * s * AA)
b1 = math.exp(-rd * t) * phi
b2 = math.exp(BigM + V * 0.5)
if Kstar > 0 and V > 0:
delta = (n - mm) / n * b1 * (b2 / s * (ael.normal_dist(phi * xx) - \
ael.normal_dist(phi * yy)) + AA * ael.normal_dist(phi * yy))
gamma = -phi * (n - mm) / n * b1 * (math.exp(-xx * xx / 2) / math.sqrt(2 * math.pi)) / \
math.sqrt(V) * ((1 - b2) / s + AA / KK) * \
(1 / s + (AA - b2 / s) / KK)
else:
delta = math.exp(-rd * t) * phi * ((n-mm) / n * AA)
gamma = 0
return (pv, delta, gamma)
def nac2ccr(m, r):
"""nac2ccr(m, r)
Converts notional amount compounded annually/semi-annually etc
to continuous compounded format.
m (int): Number of compounds per annum (1 for annually)
r (float): Interest rate per annum
returns (float): Continuous compounded yield"""
try:
m = float(m)
r = float(r)
except:
print 'Arguments must be numeric values'
return m * math.log(1 + r/m)
def sim2nac(m, simpleYield, t):
"""sim2nac(m, simpleYield, t)
Converts simple yield to notional amount compounded
m (int): Number of compounds per annum (1 for annually)
simpleYield (float): Simple yield
t (float): Time in years
returns (float): Yield in NAC format"""
try:
m = float(m)
simpleYield = float(simpleYield)
t = float(t)
except:
print 'Arguments must be numeric values'
sys.exit()
return ((1 + simpleYield*t)**(1/(m*t)) - 1)*m
def DividendYieldFromCash(aelOption):
"""DividendYieldFromCash(aelOption)
Calculates the dividend yield from the underlying's cash dividends
aelOption An Instrument entity of type Option
returns The dividend yield in CCR format"""
aelUnderlying = aelOption.und_insaddr
selDivs = aelUnderlying.dividends(aelOption.exp_day)
SDate = ael.date_valueday()
EDate = aelOption.exp_day
# Calculate the risk free rate in CCR format
ycrf = aelOption.used_yield_curve(aelOption.curr)
rd = ycrf.yc_rate(SDate,
EDate,
'Continuous',
ael.YieldCurve[ycrf.ir_name].storage_daycount,
'Spot Rate',
0)
# Get the spot price for the underlying
Spot = aelOption.used_und_price()
DivsPv = 0.0
for div in selDivs:
LDRDate = div.day # Record day
payDate = div.pay_day
cashValue = div.dividend
if SDate <= LDRDate and EDate >= LDRDate:
t = SDate.days_between(payDate)/365.0
DivsPv = DivsPv + cashValue*math.exp(-rd*t)
t = SDate.days_between(EDate)
simpleYield = DivsPv / (Spot * t)
dyNacs = sim2nac(2, simpleYield, t)
dyCCR = nac2ccr(2, dyNacs)
return dyCCR
def AsianOption(aelOption, d):
"""AsianOption(aelOption, d)
Valuation function of asian options
aelOption An instrument entity of type Option
d Dividend yield in CCR format"""
# Retrieve s (current spot market price of the underlying) from ADS
s = aelOption.used_und_price()
# Retrieve k (strike price) from ADS
k = aelOption.strike_price
# Retrieve r (risk free rate) from ADS
ycrf = aelOption.used_yield_curve(aelOption.curr)
r = ycrf.yc_rate(ael.date_valueday(),
aelOption.exp_day,
'Continuous',
ael.YieldCurve[ycrf.ir_name].storage_daycount,
'Spot Rate',
0)
t = ael.date_valueday().days_between(aelOption.exp_day)/365.0
FairForward = s*math.exp((r-d)*t)
if printout: print 'FairForward = ', FairForward
# Calculate sigma (volatility)
vs = aelOption.used_volatility(aelOption.curr)
# Changed by PG and SM on 12/03/03. Skew is defined in terms of a relative to spot percentange not
# absolute strike. Changed appropriately.
if printout: print 'ATM Vol on Expiry = ', vs.volatility(0, aelOption.exp_day)*100
sigma = vs.volatility((k/FairForward-1)*100, aelOption.exp_day)
#Test Values
#r = nac2ccr(1, 0.08)
#sigma = 0.2325
#s= 8990.0
if printout: print 'Volatility = ', sigma*100
if printout: print 'Moneyednes = ', (k/FairForward-1)*100
if printout: print 'Expiry = ', aelOption.exp_day
# Determine phi (1 = call, -1 = put)
if aelOption.call_option:
phi = 1.0
else:
phi = -1.0
#Need to get ave dates and calc ave spot so far
ave_dates = []
ave_spot = 0
n = 0
if len(aelOption.time_series()) == 0:
ael.log('Error - Instrument ' + aelOption.insid + ' has no averaging dates!')
return 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
for ts in aelOption.time_series():
if ts.ts_specnbr.field_name == 'FAveragePrices':
ave_dates.append(ts.day)
if ts.value != -1 and ts.day <= ael.date_valueday():
ave_spot = ave_spot + ts.value
n = n + 1
ave_dates.sort()
if n > 0:
ave_spot = ave_spot / n
mTheta = 1
else:
ave_spot = s
mTheta = 0
if printout: print '-----------------'
if printout: print 'Ave s \t= ', ave_spot
if printout: print 's \t= ', s
if printout: print 'k \t= ', k
if printout: print 'r \t= ', r
if printout: print 'sigma\t= ', sigma
if printout: print 't \t= ', t
if printout: print 'd \t= ', d
if printout: print 'phi\t= ', phi
if t < 0:
return 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
elif t == 0:
return max(0.0, phi*(ave_spot-k)), 0.0, 0.0, 0.0, 0.0, 0.0
else:
#Create list of fractional dates for averaging
#Calc m, number of averaging dates passed by value date
asian_times = []
m = 0
for ave_date in ave_dates:
asian_times.append(ael.date_valueday().days_between(ave_date)/365.0)
if ave_date <= ael.date_valueday():
m = m + 1
#Get pv
pv, delta, gamma = AsianVorst(s, k, ave_spot, r, t, d, sigma, phi, asian_times, m)
#Calc rho
r1 = math.exp(r) - 1
r1 = r1 + 0.01
r1 = math.log(1 + r1)
pv1 = AsianVorst(s, k, ave_spot, r1, t, d, sigma, phi, asian_times, m)[0]
rho = pv1 - pv
#Calc vega
sigma1 = sigma + 0.01
pv1 = AsianVorst(s, k, ave_spot, r, t, d, sigma1, phi, asian_times, m)[0]
vega = pv1 - pv
#Calc theta
t1 = t - 1.0 / 365.0
if t1 <= 0.00000000001:
pv1 = max(0, phi * (ave_spot - k))
else:
asian_times = []
for ave_date in ave_dates:
asian_times.append(ael.date_valueday().add_days(1).days_between(ave_date)/365.0)
if mTheta: mTheta = m + 1
pv1 = AsianVorst(s, k, ave_spot, r, t1, d, sigma, phi, asian_times, mTheta)[0]
theta = pv1 - pv
if aelOption.pay_day_offset:
yc = aelOption.used_yield_curve()
expiry_date = aelOption.exp_day
pay_date = expiry_date.add_banking_day(aelOption.und_insaddr.curr, aelOption.pay_day_offset)
df = yc.yc_rate(expiry_date, pay_date, 'None', 'None', 'Discount')
pv = pv * df
if printout: print 'Delta ', delta
if printout: print 'Gamma ', gamma
if printout: print 'Theta ', theta
if printout: print 'Rho', rho
if printout: print 'Vega', vega
if printout: print 'Present Value', pv
if printout: print '################################'
return [pv, delta, gamma, theta, rho, vega]
def pvExp(i, calc=1, ref=0):
"""pvExp(i, calc=1, ref=0)
Wrapper function for valuation of asian options on dividend equities.
Explicit dividend yield is retrieved from the underlying"""
if calc and i.record_type == 'Instrument' and i.instype == 'Option':
# retrieve d (dividend yield) from additional info (NACA format)
d = float(i.und_insaddr.add_info('DividendYield'))
# Convert d from NACA to CCR format
d = nac2ccr(1, d)
res = AsianOption(i, d)[0:4]
else:
res = [0.0, None, None, None]
return [ [ res, i.exp_day, i.curr] ]
def pvImp(i, calc=1, ref=0):
"""pvImp(i, calc=1, ref=0)
Wrapper function for valuation of asian options on dividend equities.
Dividend yield is calculated implicitly from underlying's cash dividends"""
if calc and i.record_type == 'Instrument' and i.instype == 'Option':
# Calculate implicit yield from cash dividends
d = DividendYieldFromCash(i)
res = AsianOption(i, d)[0:4]
else:
res = [0.0, None, None, None]
return [ [ res, i.exp_day, i.curr] ]
def SQL_Greeks(i, greek, *rest):
if i.record_type == 'Instrument' and i.instype == 'Option':
if i not in cached_greeks.keys():
# retrieve d (dividend yield) from additional info (NACA format)
d = float(i.und_insaddr.add_info('DividendYield'))
# Convert d from NACA to CCR format
d = nac2ccr(1, d)
res = AsianOption(i, d)
pv = res[0]
delta = res[1]
gamma = res[2]
theta = res[3]
rho = res[4]
vega = res[5]
cached_greeks[i] = [pv, delta, gamma, theta, rho, vega]
if greek == 'PV':
return cached_greeks[i][0]
if greek == 'Delta':
return cached_greeks[i][1]
elif greek == 'Gamma':
return cached_greeks[i][2]
elif greek == 'Theta':
return cached_greeks[i][3]
elif greek == 'Rho':
return cached_greeks[i][4]
elif greek == 'Vega':
return cached_greeks[i][5]
else:
return 0.0
def SQL_ModelInputs(i, input, *rest):
# retrieve d (dividend yield) from additional info (NACA format)
d = float(i.und_insaddr.add_info('DividendYield'))
# Convert d from NACA to CCR format
d = nac2ccr(1, d)
# Retrieve s (current spot market price of the underlying) from ADS
s = i.used_und_price()
# Retrieve k (strike price) from ADS
k = i.strike_price
# Retrieve r (risk free rate) from ADS
ycrf = i.used_yield_curve(i.curr)
r = ycrf.yc_rate(ael.date_valueday(),
i.exp_day,
'Continuous',
ael.YieldCurve[ycrf.ir_name].storage_daycount,
'Spot Rate',
0)
t = ael.date_valueday().days_between(i.exp_day)/365.0
FairForward = s*math.exp((r-d)*t)
# Calculate sigma (volatility)
vs = i.used_volatility(i.curr)
# Changed by PG and SM on 12/03/03. Skew is defined in terms of a relative to spot percentange not
# absolute strike. Changed appropriately.
sigma = vs.volatility((k/FairForward-1)*100, i.exp_day)
# Determine phi (1 = call, -1 = put)
if i.call_option:
phi = 1.0
else:
phi = -1.0
if input == 'd':
return d
elif input == 's':
return s
elif input == 'k':
return k
elif input == 'r':
return r
elif input == 't':
return t
elif input == 'FairForward':
return FairForward
elif input == 'sigma':
return sigma
elif input == 'phi':
return phi
else:
return 0.0
| [
"[email protected]"
] | |
4e8f515df8c7dcaaf54cfd5914649b2aea7c15d9 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/3135/codes/1679_1880.py | db0fd44f615b14c67d5ec39b46a5181316fd0db0 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | # Ao testar sua solução, não se limite ao caso de exemplo.
m= int(input("Insira o mes:"))
x1="janeiro"
x2="fevereiro"
x3="marco"
x4="abril"
x5="maio"
x6="junho"
x7="julho"
x8="agosto"
x9="setembro"
x10="outubro"
x11="novembro"
x12="dezembro"
if(1<=m and m<=12):
if(m==1):
print(x1.lower())
elif(m==2):
print(x2.lower())
elif(m==3):
print(x3.lower())
elif(m==4):
print(x4.lower())
elif(m==5):
print(x5.lower())
elif(m==6):
print(x6.lower())
elif(m==7):
print(x7.lower())
elif(m==8):
print(x8.lower())
elif(m==9):
print(x9.lower())
elif(m==10):
print(x10.lower())
elif(m==11):
print(x11.lower())
elif(m==12):
print(x12.lower())
else:
print("numero de mes invalido") | [
"[email protected]"
] | |
50cc11aedb932282abe8ea24e603f8d0c932338b | 12902e18cc2c836d7407a2a9b9344396aed1c7dd | /log_parse/parse-local.py | baf95e899225b0872e4f9fa6953cce0b5f6f9c0c | [] | no_license | 1010kazu/ccf-log-analyzer | 9bf1bce11682a880e59d01fa614dbca4ca2a438e | 90b7b075ded1741d6276c20ff35231f5417d9ff8 | refs/heads/main | 2023-08-25T02:00:22.183292 | 2021-09-27T21:59:53 | 2021-09-27T21:59:53 | 409,393,092 | 0 | 0 | null | 2021-09-23T00:12:20 | 2021-09-23T00:12:19 | null | UTF-8 | Python | false | false | 1,351 | py | from types import ClassMethodDescriptorType
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import re
content = []
with open('log-dev-ver.txt') as f:
for line in f:
line = line.rstrip() # 改行文字削除
content.append(line)
content = [x for x in content if x != '' and 'ccb' in x]
content = [y.split(" ") for y in content]
results = []
results = [[c[1], c[-3], c[-1]] for c in content] # キャラ名・出目、成功失敗
pattern = '.*?(\d+).*'
## test dev-ver
for r in results:
print(r[0], r[1], r[2])
matched = re.match(pattern, r[2])
print(matched.group(1))
for i in results: # 全角スペース削除
i[0] = i[0].replace('\u3000', ' ')
print('total ccb:', len(results))
data = []
for i in results:
data.append(i[1])
intdata=[]
for i, d in enumerate(data):
print(i, d)
try:
intdata.append(int(d)) # グラフ描画のためint変換
except:
# print(d)
print("error in maping str to int") # フォーマットに沿ってない例外を抹殺
continue
nplist = np.array(intdata)
hist_data, bins = np.histogram(nplist, bins=10)
return_data = list(hist_data)
print(return_data)
# グラフ描画
# df = pd.DataFrame(intdata)
# plt.figure()
# df.hist()
# plt.title('Dice Histgram')
# plt.savefig('./hist')
# plt.close('all')
| [
"[email protected]"
] | |
5cd77e31d401a90641b8ad3d0e8e6e078b280814 | fa25d937309fea55ff5a33d30262012adedfaf19 | /04_LinkedLists/remove_duplicates_from_sorted_list.py | d39589b5b97447b5f621d948a8900800f05ed8a4 | [
"MIT"
] | permissive | alqamahjsr/InterviewBit-1 | 20d033a2feecb85a37b28f2ff178b8d85424a6ea | fe2ce1bd64814c3a5687bf9b827b46bdbcf9144f | refs/heads/master | 2020-09-16T18:31:10.326427 | 2018-12-09T11:13:14 | 2018-12-09T11:13:14 | 223,853,679 | 1 | 3 | MIT | 2019-11-25T03:31:24 | 2019-11-25T03:31:23 | null | UTF-8 | Python | false | false | 1,064 | py | # Remove Duplicates from Sorted List
# https://www.interviewbit.com/problems/remove-duplicates-from-sorted-list/
#
# Given a sorted linked list, delete all duplicates such that each element appear only once.
#
# For example,
# Given 1->1->2, return 1->2.
# Given 1->1->2->3->3, return 1->2->3.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param A : head node of linked list
# @return the head node in the linked list
def deleteDuplicates(self, A):
if A is None or A.next is None:
return A
prev, tmp = A, A.next
while tmp:
if prev.val == tmp.val:
prev.next = tmp.next
tmp = prev.next
else:
prev = tmp
tmp = tmp.next
return A
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # | [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.